From ea8adc8c7c31a3b6c3a55a80f1403d1a6ad61b6a Mon Sep 17 00:00:00 2001 From: Ximin Luo Date: Sat, 25 Nov 2017 15:21:16 +0100 Subject: [PATCH] New upstream version 1.22.1+dfsg1 --- CONTRIBUTING.md | 108 +- README.md | 12 + RELEASES.md | 106 +- config.toml.example | 379 + configure | 782 +- git-commit-hash | 1 + src/Cargo.lock | 910 +- src/Cargo.toml | 9 + src/bootstrap/Cargo.toml | 2 +- src/bootstrap/README.md | 7 +- src/bootstrap/bin/rustc.rs | 6 + src/bootstrap/bin/sccache-plus-cl.rs | 5 +- src/bootstrap/bootstrap.py | 345 +- src/bootstrap/bootstrap_test.py | 6 +- src/bootstrap/builder.rs | 38 +- src/bootstrap/{cc.rs => cc_detect.rs} | 49 +- src/bootstrap/channel.rs | 4 +- src/bootstrap/check.rs | 242 +- src/bootstrap/clean.rs | 35 +- src/bootstrap/config.rs | 324 +- src/bootstrap/configure.py | 417 + src/bootstrap/dist.rs | 77 +- src/bootstrap/doc.rs | 5 - src/bootstrap/flags.rs | 23 +- src/bootstrap/lib.rs | 70 +- src/bootstrap/mk/Makefile.in | 4 +- src/bootstrap/native.rs | 24 +- src/bootstrap/sanity.rs | 5 +- src/bootstrap/tool.rs | 172 +- src/bootstrap/toolstate.rs | 51 + src/bootstrap/util.rs | 11 +- src/build_helper/lib.rs | 80 +- src/ci/docker/README.md | 2 +- src/ci/docker/arm-android/Dockerfile | 14 +- src/ci/docker/cross/Dockerfile | 5 +- src/ci/docker/cross/build-arm-musl.sh | 27 + src/ci/docker/cross2/Dockerfile | 52 + .../docker/cross2/build-fuchsia-toolchain.sh | 65 + .../docker/cross2/build-solaris-toolchain.sh | 107 + .../docker/{dist-fuchsia => cross2}/shared.sh | 0 src/ci/docker/disabled/aarch64-gnu/Dockerfile | 2 +- .../disabled/dist-aarch64-android/Dockerfile | 2 +- .../disabled/dist-armv7-android/Dockerfile | 16 +- .../disabled/dist-i686-android/Dockerfile | 16 +- .../disabled/dist-x86_64-android/Dockerfile | 2 +- .../disabled/dist-x86_64-haiku/Dockerfile | 49 + .../dist-x86_64-haiku/build-toolchain.sh | 74 + .../dist-x86_64-haiku/fetch-packages.sh | 18 + .../disabled/dist-x86_64-haiku/llvm-config.sh | 67 + src/ci/docker/disabled/wasm32-exp/Dockerfile | 2 +- src/ci/docker/dist-android/Dockerfile | 12 +- src/ci/docker/dist-fuchsia/Dockerfile | 42 - src/ci/docker/dist-fuchsia/build-toolchain.sh | 126 - .../dist-fuchsia/compiler-rt-dso-handle.patch | 41 - .../build-powerpc64le-toolchain.sh | 2 +- src/ci/docker/run.sh | 8 +- src/ci/docker/scripts/android-sdk.sh | 52 +- src/ci/docker/x86_64-gnu-aux/Dockerfile | 2 +- src/ci/docker/x86_64-gnu-distcheck/Dockerfile | 2 +- src/ci/run.sh | 6 +- src/doc/book/.travis.yml | 1 + src/doc/book/README.md | 6 +- src/doc/book/second-edition/dictionary.txt | 32 + .../book/second-edition/nostarch/chapter02.md | 176 +- .../book/second-edition/nostarch/chapter03.md | 296 +- .../book/second-edition/nostarch/chapter05.md | 210 +- .../book/second-edition/nostarch/chapter08.md | 829 +- .../book/second-edition/nostarch/chapter09.md | 812 +- .../book/second-edition/nostarch/chapter13.md | 1097 +- .../book/second-edition/nostarch/chapter14.md | 292 +- .../book/second-edition/nostarch/chapter15.md | 2598 ++- .../book/second-edition/nostarch/chapter16.md | 1133 +- .../src/appendix-02-operators.md | 44 +- .../second-edition/src/ch01-02-hello-world.md | 10 +- .../src/ch02-00-guessing-game-tutorial.md | 41 +- .../src/ch03-01-variables-and-mutability.md | 2 + .../second-edition/src/ch03-02-data-types.md | 31 +- .../src/ch03-03-how-functions-work.md | 32 +- .../second-edition/src/ch03-04-comments.md | 3 +- .../src/ch03-05-control-flow.md | 14 +- .../second-edition/src/ch05-00-structs.md | 8 +- .../src/ch05-01-defining-structs.md | 106 +- .../src/ch05-02-example-structs.md | 62 +- .../src/ch05-03-method-syntax.md | 36 +- .../src/ch06-01-defining-an-enum.md | 2 +- .../book/second-edition/src/ch06-02-match.md | 8 +- .../second-edition/src/ch07-00-modules.md | 2 +- .../src/ch07-01-mod-and-the-filesystem.md | 4 +- .../src/ch07-03-importing-names-with-use.md | 2 +- .../src/ch08-00-common-collections.md | 18 +- .../second-edition/src/ch08-01-vectors.md | 262 +- .../second-edition/src/ch08-02-strings.md | 349 +- .../second-edition/src/ch08-03-hash-maps.md | 207 +- .../src/ch09-00-error-handling.md | 27 +- ...ch09-01-unrecoverable-errors-with-panic.md | 149 +- .../ch09-02-recoverable-errors-with-result.md | 269 +- .../src/ch09-03-to-panic-or-not-to-panic.md | 188 +- .../book/second-edition/src/ch10-02-traits.md | 24 +- .../src/ch10-03-lifetime-syntax.md | 10 +- .../src/ch11-01-writing-tests.md | 11 +- .../src/ch11-02-running-tests.md | 2 +- ...2-04-testing-the-librarys-functionality.md | 2 +- ...-06-writing-to-stderr-instead-of-stdout.md | 2 +- .../src/ch13-00-functional-features.md | 17 +- .../second-edition/src/ch13-01-closures.md | 661 +- .../second-edition/src/ch13-02-iterators.md | 330 +- .../src/ch13-03-improving-our-io-project.md | 108 +- .../second-edition/src/ch13-04-performance.md | 43 +- .../src/ch14-00-more-about-cargo.md | 5 +- .../src/ch14-01-release-profiles.md | 70 +- .../src/ch14-02-publishing-to-crates-io.md | 152 +- .../src/ch14-03-cargo-workspaces.md | 56 +- .../src/ch14-04-installing-binaries.md | 7 +- .../src/ch15-00-smart-pointers.md | 118 +- .../book/second-edition/src/ch15-01-box.md | 318 +- .../book/second-edition/src/ch15-02-deref.md | 537 +- .../book/second-edition/src/ch15-03-drop.md | 271 +- src/doc/book/second-edition/src/ch15-04-rc.md | 231 +- .../src/ch15-05-interior-mutability.md | 586 +- .../src/ch15-06-reference-cycles.md | 539 +- .../second-edition/src/ch16-00-concurrency.md | 88 +- .../second-edition/src/ch16-01-threads.md | 297 +- .../src/ch16-02-message-passing.md | 249 +- .../src/ch16-03-shared-state.md | 371 +- ...04-extensible-concurrency-sync-and-send.md | 125 +- .../ch18-01-all-the-places-for-patterns.md | 2 +- .../src/ch18-02-refutability.md | 4 +- .../src/ch19-02-advanced-lifetimes.md | 2 +- .../src/ch19-03-advanced-traits.md | 329 +- .../src/ch19-04-advanced-types.md | 14 +- ...ch19-05-advanced-functions-and-closures.md | 4 +- .../src/ch20-01-single-threaded.md | 2 +- .../ch20-05-sending-requests-via-channels.md | 2 +- .../ch20-06-graceful-shutdown-and-cleanup.md | 36 +- src/doc/book/second-edition/theme/index.hbs | 79 +- .../book/second-edition/tools/docx-to-md.xsl | 7 +- {man => src/doc/man}/rustc.1 | 0 {man => src/doc/man}/rustdoc.1 | 0 src/doc/nomicon/src/other-reprs.md | 2 - src/doc/reference/src/SUMMARY.md | 32 + src/doc/reference/src/attributes.md | 19 +- .../src/behavior-considered-undefined.md | 59 +- src/doc/reference/src/comments.md | 8 +- .../reference/src/crates-and-source-files.md | 2 +- .../reference/src/dynamically-sized-types.md | 32 + src/doc/reference/src/expressions.md | 1381 +- .../reference/src/expressions/array-expr.md | 53 + .../reference/src/expressions/block-expr.md | 34 + .../reference/src/expressions/call-expr.md | 89 + .../reference/src/expressions/closure-expr.md | 58 + .../src/expressions/enum-variant-expr.md | 15 + .../reference/src/expressions/field-expr.md | 43 + src/doc/reference/src/expressions/if-expr.md | 59 + .../reference/src/expressions/literal-expr.md | 11 + .../reference/src/expressions/loop-expr.md | 167 + .../reference/src/expressions/match-expr.md | 128 + .../src/expressions/method-call-expr.md | 51 + .../src/expressions/operator-expr.md | 359 + .../reference/src/expressions/path-expr.md | 21 + .../reference/src/expressions/range-expr.md | 20 + .../reference/src/expressions/return-expr.md | 17 + .../reference/src/expressions/struct-expr.md | 80 + .../reference/src/expressions/tuple-expr.md | 38 + src/doc/reference/src/interior-mutability.md | 30 + src/doc/reference/src/introduction.md | 4 + src/doc/reference/src/items.md | 1320 +- src/doc/reference/src/items/constant-items.md | 42 + src/doc/reference/src/items/enumerations.md | 69 + src/doc/reference/src/items/extern-crates.md | 37 + .../reference/src/items/external-blocks.md | 83 + src/doc/reference/src/items/functions.md | 167 + .../reference/src/items/implementations.md | 76 + src/doc/reference/src/items/modules.md | 63 + src/doc/reference/src/items/static-items.md | 105 + src/doc/reference/src/items/structs.md | 45 + src/doc/reference/src/items/traits.md | 308 + src/doc/reference/src/items/type-aliases.md | 25 + src/doc/reference/src/items/unions.md | 141 + .../reference/src/items/use-declarations.md | 113 + src/doc/reference/src/keywords.md | 6 +- src/doc/reference/src/macros-by-example.md | 4 +- src/doc/reference/src/macros.md | 6 +- src/doc/reference/src/statements.md | 4 +- .../reference/src/string-table-productions.md | 6 +- src/doc/reference/src/tokens.md | 185 +- src/doc/reference/src/types.md | 382 +- src/doc/reference/src/unsafety.md | 5 +- src/doc/reference/theme/index.hbs | 227 + src/doc/rustdoc/src/documentation-tests.md | 41 +- .../src/language-features/doc-masked.md | 24 + .../src/language-features/fn-must-use.md | 30 + .../src/language-features/generators.md | 245 + .../inclusive-range-syntax.md | 4 +- .../language-features/match-beginning-vert.md | 23 + .../match_default_bindings.md | 58 + .../src/language-features/trace-macros.md | 39 + .../src/library-features/entry-and-modify.md | 77 + .../src/library-features/entry-or-default.md | 13 + .../src/library-features/splice.md | 3 +- src/etc/htmldocck.py | 18 + src/etc/platform-intrinsics/powerpc.json | 133 + src/liballoc/arc.rs | 62 +- src/liballoc/binary_heap.rs | 2 +- src/liballoc/boxed.rs | 65 +- src/liballoc/btree/map.rs | 61 + src/liballoc/btree/set.rs | 14 +- src/liballoc/lib.rs | 4 +- src/liballoc/linked_list.rs | 2 +- src/liballoc/macros.rs | 48 +- src/liballoc/rc.rs | 105 +- src/liballoc/slice.rs | 45 +- src/liballoc/str.rs | 94 +- src/liballoc/string.rs | 143 +- src/liballoc/tests/btree/map.rs | 28 +- src/liballoc/tests/lib.rs | 16 + src/liballoc/tests/str.rs | 16 +- src/liballoc/tests/string.rs | 24 +- src/liballoc/tests/vec.rs | 28 +- src/liballoc/vec.rs | 24 +- src/liballoc/vec_deque.rs | 24 +- src/liballoc_jemalloc/Cargo.toml | 2 +- src/liballoc_jemalloc/build.rs | 20 +- src/liballoc_jemalloc/lib.rs | 3 + src/liballoc_system/lib.rs | 7 +- src/libbacktrace/configure | 9 + src/libbacktrace/configure.ac | 9 + src/libbacktrace/filetype.awk | 6 + src/libbacktrace/macho.c | 1416 ++ src/libcompiler_builtins/Cargo.toml | 7 +- src/libcompiler_builtins/README.md | 6 +- src/libcompiler_builtins/appveyor.yml | 4 +- src/libcompiler_builtins/build.rs | 24 +- src/libcompiler_builtins/src/float/add.rs | 318 +- src/libcompiler_builtins/src/float/conv.rs | 18 +- src/libcompiler_builtins/src/float/mod.rs | 193 +- src/libcompiler_builtins/src/float/pow.rs | 22 +- src/libcompiler_builtins/src/float/sub.rs | 4 +- src/libcompiler_builtins/src/int/mod.rs | 139 +- src/libcompiler_builtins/src/int/mul.rs | 14 +- src/libcompiler_builtins/src/int/sdiv.rs | 10 +- src/libcompiler_builtins/src/int/shift.rs | 16 +- src/libcompiler_builtins/src/int/udiv.rs | 38 +- src/libcompiler_builtins/src/lib.rs | 3 + src/libcompiler_builtins/src/probestack.rs | 8 +- src/libcompiler_builtins/src/x86.rs | 71 + src/libcompiler_builtins/src/x86_64.rs | 86 +- src/libcompiler_builtins/tests/floattidf.rs | 1 + src/libcore/array.rs | 8 - src/libcore/benches/iter.rs | 129 + src/libcore/cell.rs | 3 + src/libcore/clone.rs | 44 +- src/libcore/cmp.rs | 4 +- src/libcore/convert.rs | 41 +- src/libcore/fmt/builders.rs | 10 +- src/libcore/fmt/mod.rs | 43 +- src/libcore/fmt/num.rs | 3 +- src/libcore/hash/mod.rs | 46 + src/libcore/internal_macros.rs | 19 + src/libcore/intrinsics.rs | 52 +- src/libcore/iter/iterator.rs | 2 +- src/libcore/iter/mod.rs | 243 +- src/libcore/iter/range.rs | 2 + src/libcore/iter/traits.rs | 81 + src/libcore/lib.rs | 18 + src/libcore/macros.rs | 77 +- src/libcore/marker.rs | 2 +- src/libcore/mem.rs | 234 +- src/libcore/nonzero.rs | 4 + src/libcore/num/dec2flt/mod.rs | 1 - src/libcore/num/f32.rs | 2 - src/libcore/num/mod.rs | 54 +- src/libcore/num/wrapping.rs | 12 + src/libcore/ops/arith.rs | 10 + src/libcore/ops/bit.rs | 10 + src/libcore/ops/deref.rs | 4 +- src/libcore/ops/generator.rs | 131 + src/libcore/ops/mod.rs | 6 +- src/libcore/ops/range.rs | 50 +- src/libcore/ops/try.rs | 20 +- src/libcore/ops/unsize.rs | 2 +- src/libcore/option.rs | 48 +- src/libcore/ptr.rs | 1310 +- src/libcore/slice/mod.rs | 46 +- src/libcore/str/mod.rs | 81 +- src/libcore/sync/atomic.rs | 58 +- src/libcore/tests/char.rs | 1 - src/libcore/tests/hash/mod.rs | 20 +- src/libcore/tests/iter.rs | 224 +- src/libcore/tests/lib.rs | 7 +- src/libcore/tests/num/mod.rs | 10 +- src/libcore/tests/option.rs | 27 + src/libcore/tests/result.rs | 29 + src/libcore/tuple.rs | 8 - src/libgetopts/lib.rs | 11 +- src/liblibc/Cargo.lock | 12 +- src/liblibc/Cargo.toml | 2 +- src/liblibc/README.md | 3 - src/liblibc/ci/README.md | 2 +- src/liblibc/ci/android-install-sdk.sh | 2 +- .../aarch64-unknown-linux-musl/Dockerfile | 24 - src/liblibc/libc-test/build.rs | 10 - src/liblibc/src/{redox/mod.rs => redox.rs} | 68 +- src/liblibc/src/redox/net.rs | 110 - src/liblibc/src/unix/bsd/apple/mod.rs | 8 - .../src/unix/bsd/freebsdlike/freebsd/mod.rs | 2 - src/liblibc/src/unix/bsd/mod.rs | 7 - src/liblibc/src/unix/bsd/netbsdlike/mod.rs | 2 - .../src/unix/bsd/netbsdlike/netbsd/mod.rs | 59 - src/liblibc/src/unix/haiku/mod.rs | 345 +- src/liblibc/src/unix/mod.rs | 9 + src/liblibc/src/unix/newlib/mod.rs | 9 - src/liblibc/src/unix/notbsd/android/mod.rs | 23 - src/liblibc/src/unix/notbsd/linux/mips/mod.rs | 28 - src/liblibc/src/unix/notbsd/linux/mod.rs | 10 +- .../src/unix/notbsd/linux/musl/b32/mod.rs | 21 - .../src/unix/notbsd/linux/musl/b64/aarch64.rs | 74 - .../src/unix/notbsd/linux/musl/b64/mod.rs | 49 + .../unix/notbsd/linux/musl/b64/powerpc64.rs | 73 - .../src/unix/notbsd/linux/musl/b64/x86_64.rs | 71 - src/liblibc/src/unix/notbsd/linux/musl/mod.rs | 18 +- .../src/unix/notbsd/linux/other/mod.rs | 28 - src/liblibc/src/unix/notbsd/mod.rs | 17 - src/liblibc/src/unix/solaris/mod.rs | 13 - src/liblibc/src/unix/uclibc/mod.rs | 8 - src/libproc_macro/Cargo.toml | 1 + src/libproc_macro/diagnostic.rs | 134 + src/libproc_macro/lib.rs | 246 +- src/libproc_macro/quote.rs | 4 +- src/libprofiler_builtins/Cargo.toml | 2 +- src/libprofiler_builtins/build.rs | 4 +- src/librand/isaac.rs | 3 +- src/librand/lib.rs | 3 +- src/librustc/Cargo.toml | 2 +- src/librustc/README.md | 341 +- .../mem.rs => librustc/benches/dispatch.rs} | 26 - src/librustc/benches/lib.rs | 19 + src/librustc/benches/pattern.rs | 35 + src/librustc/cfg/construct.rs | 100 +- src/librustc/cfg/graphviz.rs | 64 +- src/librustc/cfg/mod.rs | 13 +- src/librustc/dep_graph/dep_node.rs | 277 +- src/librustc/dep_graph/edges.rs | 265 - src/librustc/dep_graph/graph.rs | 680 +- src/librustc/dep_graph/mod.rs | 15 +- src/librustc/dep_graph/prev.rs | 60 + src/librustc/dep_graph/query.rs | 5 +- src/librustc/dep_graph/raii.rs | 32 +- src/librustc/dep_graph/safe.rs | 7 + src/librustc/dep_graph/serialized.rs | 50 + src/librustc/diagnostics.rs | 254 +- src/librustc/hir/README.md | 119 + src/librustc/hir/def.rs | 13 +- src/librustc/hir/def_id.rs | 26 +- src/librustc/hir/intravisit.rs | 43 +- src/librustc/hir/lowering.rs | 306 +- src/librustc/hir/map/README.md | 4 + src/librustc/hir/map/blocks.rs | 2 +- src/librustc/hir/map/collector.rs | 191 +- src/librustc/hir/map/def_collector.rs | 43 +- src/librustc/hir/map/definitions.rs | 60 +- src/librustc/hir/map/mod.rs | 66 +- src/librustc/hir/mod.rs | 143 +- src/librustc/hir/pat_util.rs | 14 +- src/librustc/hir/print.rs | 320 +- src/librustc/ich/caching_codemap_view.rs | 11 +- src/librustc/ich/hcx.rs | 354 +- src/librustc/ich/impls_cstore.rs | 41 + src/librustc/ich/impls_hir.rs | 370 +- src/librustc/ich/impls_mir.rs | 149 +- src/librustc/ich/impls_misc.rs | 26 + src/librustc/ich/impls_syntax.rs | 103 +- src/librustc/ich/impls_ty.rs | 394 +- src/librustc/ich/mod.rs | 6 +- .../error_reporting/anon_anon_conflict.rs | 302 - .../error_reporting/different_lifetimes.rs | 394 + src/librustc/infer/error_reporting/mod.rs | 184 +- .../error_reporting/named_anon_conflict.rs | 80 +- src/librustc/infer/error_reporting/note.rs | 128 +- src/librustc/infer/error_reporting/util.rs | 193 +- src/librustc/infer/freshen.rs | 184 +- src/librustc/infer/higher_ranked/mod.rs | 56 +- src/librustc/infer/mod.rs | 33 +- .../infer/region_inference/graphviz.rs | 10 +- src/librustc/infer/region_inference/mod.rs | 8 +- src/librustc/lib.rs | 33 +- src/librustc/lint/builtin.rs | 9 +- src/librustc/lint/levels.rs | 87 +- src/librustc/lint/mod.rs | 17 + src/librustc/macros.rs | 16 +- src/librustc/middle/const_val.rs | 103 +- src/librustc/middle/cstore.rs | 230 +- src/librustc/middle/dataflow.rs | 96 +- src/librustc/middle/dead.rs | 33 +- src/librustc/middle/dependency_format.rs | 159 +- src/librustc/middle/effect.rs | 271 - src/librustc/middle/exported_symbols.rs | 36 + src/librustc/middle/expr_use_visitor.rs | 51 +- src/librustc/middle/free_region.rs | 25 +- src/librustc/middle/lang_items.rs | 137 +- src/librustc/middle/liveness.rs | 40 +- src/librustc/middle/mem_categorization.rs | 102 +- src/librustc/middle/reachable.rs | 63 +- src/librustc/middle/region.rs | 786 +- src/librustc/middle/resolve_lifetime.rs | 181 +- src/librustc/middle/stability.rs | 265 +- src/librustc/middle/trans.rs | 189 + src/librustc/middle/weak_lang_items.rs | 40 +- src/librustc/mir/cache.rs | 4 +- src/librustc/mir/mod.rs | 383 +- src/librustc/mir/tcx.rs | 16 +- src/librustc/mir/transform.rs | 6 +- src/librustc/mir/visit.rs | 93 +- src/librustc/session/config.rs | 240 +- src/librustc/session/mod.rs | 64 +- src/librustc/traits/coherence.rs | 34 +- src/librustc/traits/error_reporting.rs | 295 +- src/librustc/traits/fulfill.rs | 25 +- src/librustc/traits/mod.rs | 40 +- src/librustc/traits/object_safety.rs | 8 +- src/librustc/traits/on_unimplemented.rs | 307 + src/librustc/traits/project.rs | 148 +- src/librustc/traits/select.rs | 383 +- src/librustc/traits/specialize/mod.rs | 26 +- .../traits/specialize/specialization_graph.rs | 43 +- src/librustc/traits/structural_impls.rs | 46 + src/librustc/traits/trans/mod.rs | 15 +- src/librustc/traits/util.rs | 22 +- src/librustc/ty/README.md | 165 + src/librustc/ty/adjustment.rs | 4 +- src/librustc/ty/context.rs | 636 +- src/librustc/ty/error.rs | 59 +- src/librustc/ty/fast_reject.rs | 96 +- src/librustc/ty/flags.rs | 49 +- src/librustc/ty/fold.rs | 22 +- src/librustc/ty/inhabitedness/mod.rs | 2 +- src/librustc/ty/instance.rs | 221 +- src/librustc/ty/item_path.rs | 15 +- src/librustc/ty/layout.rs | 189 +- src/librustc/ty/maps.rs | 1174 -- src/librustc/ty/maps/README.md | 302 + src/librustc/ty/maps/config.rs | 504 + src/librustc/ty/maps/keys.rs | 162 + src/librustc/ty/maps/mod.rs | 464 + src/librustc/ty/maps/plumbing.rs | 865 + src/librustc/ty/maps/values.rs | 49 + src/librustc/ty/mod.rs | 289 +- src/librustc/ty/outlives.rs | 10 + src/librustc/ty/relate.rs | 70 +- src/librustc/ty/structural_impls.rs | 318 +- src/librustc/ty/sty.rs | 130 +- src/librustc/ty/trait_def.rs | 34 +- src/librustc/ty/util.rs | 42 +- src/librustc/ty/walk.rs | 50 +- src/librustc/ty/wf.rs | 62 +- src/librustc/util/common.rs | 5 +- src/librustc/util/ppaux.rs | 111 +- src/librustc_allocator/expand.rs | 5 +- src/librustc_apfloat/Cargo.toml | 3 +- src/librustc_apfloat/lib.rs | 34 +- src/librustc_back/README.md | 6 + src/librustc_back/target/aarch64_apple_ios.rs | 1 + .../target/aarch64_linux_android.rs | 1 + .../target/aarch64_unknown_freebsd.rs | 1 + .../target/aarch64_unknown_fuchsia.rs | 1 + .../target/aarch64_unknown_linux_gnu.rs | 1 + .../target/aarch64_unknown_linux_musl.rs | 37 + .../target/arm_linux_androideabi.rs | 3 +- .../target/arm_unknown_linux_gnueabi.rs | 3 +- .../target/arm_unknown_linux_gnueabihf.rs | 3 +- .../target/arm_unknown_linux_musleabi.rs | 3 +- .../target/arm_unknown_linux_musleabihf.rs | 3 +- .../target/armv5te_unknown_linux_gnueabi.rs | 9 +- src/librustc_back/target/armv7_apple_ios.rs | 1 + .../target/armv7_linux_androideabi.rs | 3 +- .../target/armv7_unknown_linux_gnueabihf.rs | 1 + .../target/armv7_unknown_linux_musleabihf.rs | 1 + src/librustc_back/target/armv7s_apple_ios.rs | 1 + .../target/asmjs_unknown_emscripten.rs | 1 + src/librustc_back/target/i386_apple_ios.rs | 1 + src/librustc_back/target/i686_apple_darwin.rs | 1 + .../target/i686_linux_android.rs | 1 + .../target/i686_pc_windows_gnu.rs | 1 + .../target/i686_pc_windows_msvc.rs | 1 + .../target/i686_unknown_dragonfly.rs | 1 + .../target/i686_unknown_freebsd.rs | 1 + .../target/i686_unknown_haiku.rs | 1 + .../target/i686_unknown_linux_gnu.rs | 1 + .../target/i686_unknown_linux_musl.rs | 1 + .../target/i686_unknown_netbsd.rs | 1 + .../target/i686_unknown_openbsd.rs | 1 + src/librustc_back/target/le32_unknown_nacl.rs | 50 - .../target/mips64_unknown_linux_gnuabi64.rs | 1 + .../target/mips64el_unknown_linux_gnuabi64.rs | 1 + .../target/mips_unknown_linux_gnu.rs | 1 + .../target/mips_unknown_linux_musl.rs | 1 + .../target/mips_unknown_linux_uclibc.rs | 1 + .../target/mipsel_unknown_linux_gnu.rs | 1 + .../target/mipsel_unknown_linux_musl.rs | 1 + .../target/mipsel_unknown_linux_uclibc.rs | 1 + src/librustc_back/target/mod.rs | 12 +- src/librustc_back/target/msp430_none_elf.rs | 1 + .../target/powerpc64_unknown_linux_gnu.rs | 1 + .../target/powerpc64le_unknown_linux_gnu.rs | 1 + .../target/powerpc_unknown_linux_gnu.rs | 1 + .../target/s390x_unknown_linux_gnu.rs | 2 + .../target/sparc64_unknown_linux_gnu.rs | 1 + .../target/sparc64_unknown_netbsd.rs | 1 + .../target/sparcv9_sun_solaris.rs | 2 + .../target/thumbv6m_none_eabi.rs | 1 + .../target/thumbv7em_none_eabi.rs | 1 + .../target/thumbv7em_none_eabihf.rs | 1 + .../target/thumbv7m_none_eabi.rs | 1 + .../target/wasm32_experimental_emscripten.rs | 1 + .../target/wasm32_unknown_emscripten.rs | 1 + .../target/x86_64_apple_darwin.rs | 1 + src/librustc_back/target/x86_64_apple_ios.rs | 1 + .../target/x86_64_linux_android.rs | 1 + .../target/x86_64_pc_windows_gnu.rs | 1 + .../target/x86_64_pc_windows_msvc.rs | 1 + .../target/x86_64_rumprun_netbsd.rs | 1 + .../target/x86_64_sun_solaris.rs | 1 + .../target/x86_64_unknown_bitrig.rs | 1 + .../target/x86_64_unknown_dragonfly.rs | 1 + .../target/x86_64_unknown_freebsd.rs | 1 + .../target/x86_64_unknown_fuchsia.rs | 1 + .../target/x86_64_unknown_haiku.rs | 1 + .../target/x86_64_unknown_l4re_uclibc.rs | 1 + .../target/x86_64_unknown_linux_gnu.rs | 1 + .../target/x86_64_unknown_linux_musl.rs | 1 + .../target/x86_64_unknown_netbsd.rs | 1 + .../target/x86_64_unknown_openbsd.rs | 1 + .../target/x86_64_unknown_redox.rs | 1 + src/librustc_bitflags/Cargo.toml | 9 - src/librustc_bitflags/lib.rs | 494 - src/librustc_borrowck/borrowck/check_loans.rs | 193 +- .../borrowck/gather_loans/gather_moves.rs | 15 +- .../borrowck/gather_loans/lifetime.rs | 7 +- .../borrowck/gather_loans/mod.rs | 55 +- .../borrowck/gather_loans/move_error.rs | 41 +- src/librustc_borrowck/borrowck/mod.rs | 472 +- src/librustc_borrowck/borrowck/move_data.rs | 53 +- src/librustc_borrowck/diagnostics.rs | 353 +- src/librustc_borrowck/graphviz.rs | 2 +- src/librustc_const_eval/_match.rs | 120 +- src/librustc_const_eval/check_match.rs | 12 +- src/librustc_const_eval/diagnostics.rs | 2 +- src/librustc_const_eval/eval.rs | 215 +- src/librustc_const_eval/lib.rs | 4 +- src/librustc_const_eval/pattern.rs | 112 +- src/librustc_const_math/int.rs | 8 +- src/librustc_const_math/is.rs | 26 +- src/librustc_const_math/lib.rs | 5 +- src/librustc_const_math/us.rs | 26 +- src/librustc_cratesio_shim/Cargo.toml | 23 + src/librustc_cratesio_shim/src/lib.rs | 14 + src/librustc_data_structures/bitslice.rs | 5 + src/librustc_data_structures/bitvec.rs | 2 +- src/librustc_data_structures/graph/mod.rs | 7 + src/librustc_data_structures/indexed_set.rs | 53 +- src/librustc_data_structures/indexed_vec.rs | 39 +- src/librustc_data_structures/snapshot_vec.rs | 7 + src/librustc_data_structures/stable_hasher.rs | 226 +- .../transitive_relation.rs | 6 +- src/librustc_data_structures/unify/mod.rs | 20 +- src/librustc_driver/README.md | 12 + src/librustc_driver/driver.rs | 270 +- src/librustc_driver/lib.rs | 131 +- src/librustc_driver/pretty.rs | 46 +- src/librustc_driver/profile/trace.rs | 4 +- src/librustc_driver/test.rs | 104 +- src/librustc_errors/diagnostic.rs | 6 +- src/librustc_errors/diagnostic_builder.rs | 13 + src/librustc_errors/emitter.rs | 14 +- src/librustc_errors/lib.rs | 24 +- src/librustc_errors/styled_buffer.rs | 17 +- src/librustc_incremental/assert_dep_graph.rs | 2 +- src/librustc_incremental/calculate_svh/mod.rs | 321 - src/librustc_incremental/lib.rs | 7 +- src/librustc_incremental/persist/data.rs | 77 +- .../persist/dirty_clean.rs | 162 +- src/librustc_incremental/persist/fs.rs | 171 +- src/librustc_incremental/persist/hash.rs | 225 - src/librustc_incremental/persist/load.rs | 412 +- src/librustc_incremental/persist/mod.rs | 5 +- .../persist/preds/compress/README.md | 48 - .../persist/preds/compress/classify/mod.rs | 151 - .../persist/preds/compress/classify/test.rs | 94 - .../persist/preds/compress/construct.rs | 223 - .../persist/preds/compress/dag_id.rs | 43 - .../persist/preds/compress/mod.rs | 125 - .../persist/preds/compress/test.rs | 259 - .../persist/preds/compress/test_macro.rs | 39 - src/librustc_incremental/persist/preds/mod.rs | 110 - src/librustc_incremental/persist/save.rs | 172 +- .../persist/work_product.rs | 10 +- src/librustc_lint/bad_style.rs | 9 +- src/librustc_lint/builtin.rs | 61 +- src/librustc_lint/lib.rs | 1 - src/librustc_lint/types.rs | 33 +- src/librustc_lint/unused.rs | 174 +- src/librustc_llvm/Cargo.toml | 5 +- src/librustc_llvm/build.rs | 8 +- src/librustc_llvm/ffi.rs | 103 +- src/librustc_llvm/lib.rs | 13 +- src/librustc_metadata/astencode.rs | 29 +- src/librustc_metadata/creader.rs | 391 +- src/librustc_metadata/cstore.rs | 174 +- src/librustc_metadata/cstore_impl.rs | 479 +- src/librustc_metadata/decoder.rs | 156 +- src/librustc_metadata/diagnostics.rs | 6 +- src/librustc_metadata/encoder.rs | 71 +- src/librustc_metadata/isolated_encoder.rs | 16 +- src/librustc_metadata/lib.rs | 2 + src/librustc_metadata/link_args.rs | 65 + src/librustc_metadata/native_libs.rs | 217 + src/librustc_metadata/schema.rs | 89 +- src/librustc_mir/Cargo.toml | 3 +- src/librustc_mir/borrow_check.rs | 852 +- src/librustc_mir/build/block.rs | 105 +- src/librustc_mir/build/cfg.rs | 22 +- src/librustc_mir/build/expr/as_constant.rs | 2 +- src/librustc_mir/build/expr/as_lvalue.rs | 18 +- src/librustc_mir/build/expr/as_operand.rs | 14 +- src/librustc_mir/build/expr/as_rvalue.rs | 95 +- src/librustc_mir/build/expr/as_temp.rs | 28 +- src/librustc_mir/build/expr/category.rs | 1 + src/librustc_mir/build/expr/into.rs | 29 +- src/librustc_mir/build/expr/stmt.rs | 22 +- src/librustc_mir/build/matches/mod.rs | 42 +- src/librustc_mir/build/matches/test.rs | 42 +- src/librustc_mir/build/misc.rs | 27 +- src/librustc_mir/build/mod.rs | 167 +- src/librustc_mir/build/scope.rs | 335 +- src/librustc_mir/dataflow/impls/borrows.rs | 45 +- src/librustc_mir/dataflow/impls/mod.rs | 4 + .../dataflow/impls/storage_liveness.rs | 82 + src/librustc_mir/dataflow/mod.rs | 30 + .../dataflow/move_paths/abs_domain.rs | 7 +- .../dataflow/move_paths/builder.rs | 168 +- src/librustc_mir/dataflow/move_paths/mod.rs | 31 +- src/librustc_mir/diagnostics.rs | 329 +- src/librustc_mir/hair/cx/block.rs | 59 +- src/librustc_mir/hair/cx/expr.rs | 212 +- src/librustc_mir/hair/cx/mod.rs | 101 +- src/librustc_mir/hair/mod.rs | 62 +- src/librustc_mir/lib.rs | 10 +- src/librustc_mir/shim.rs | 147 +- src/librustc_mir/transform/add_validation.rs | 4 +- src/librustc_mir/transform/check_unsafety.rs | 422 + .../transform/clean_end_regions.rs | 14 +- src/librustc_mir/transform/copy_prop.rs | 4 +- src/librustc_mir/transform/elaborate_drops.rs | 76 +- src/librustc_mir/transform/erase_regions.rs | 35 +- src/librustc_mir/transform/generator.rs | 842 + src/librustc_mir/transform/inline.rs | 97 +- src/librustc_mir/transform/mod.rs | 27 +- .../transform/{nll.rs => nll/mod.rs} | 16 +- src/librustc_mir/transform/no_landing_pads.rs | 2 + src/librustc_mir/transform/promote_consts.rs | 105 +- src/librustc_mir/transform/qualify_consts.rs | 420 +- src/librustc_mir/transform/rustc_peek.rs | 2 +- src/librustc_mir/transform/simplify.rs | 27 +- .../transform/simplify_branches.rs | 6 +- src/librustc_mir/transform/type_check.rs | 78 +- src/librustc_mir/util/borrowck_errors.rs | 172 +- src/librustc_mir/util/def_use.rs | 54 +- src/librustc_mir/util/elaborate_drops.rs | 45 +- src/librustc_mir/util/liveness.rs | 245 + src/librustc_mir/util/mod.rs | 1 + src/librustc_mir/util/patch.rs | 7 + src/librustc_mir/util/pretty.rs | 46 +- src/librustc_passes/ast_validation.rs | 8 - src/librustc_passes/consts.rs | 92 +- src/librustc_passes/loops.rs | 2 +- src/librustc_passes/mir_stats.rs | 23 +- src/librustc_platform_intrinsics/powerpc.rs | 110 + src/librustc_privacy/Cargo.toml | 1 + src/librustc_privacy/lib.rs | 165 +- src/librustc_resolve/build_reduced_graph.rs | 31 +- src/librustc_resolve/lib.rs | 47 +- src/librustc_resolve/macros.rs | 5 +- src/librustc_resolve/resolve_imports.rs | 11 +- src/librustc_save_analysis/dump_visitor.rs | 3 +- src/librustc_save_analysis/lib.rs | 45 +- src/librustc_save_analysis/span_utils.rs | 4 +- src/librustc_trans/Cargo.toml | 4 +- src/librustc_trans/README.md | 8 +- src/librustc_trans/abi.rs | 54 +- src/librustc_trans/adt.rs | 12 +- src/librustc_trans/assert_module_sources.rs | 72 +- src/librustc_trans/back/archive.rs | 8 +- src/librustc_trans/back/bytecode.rs | 160 + src/librustc_trans/back/command.rs | 114 + src/librustc_trans/back/link.rs | 499 +- src/librustc_trans/back/linker.rs | 63 +- src/librustc_trans/back/lto.rs | 694 +- src/librustc_trans/back/rpath.rs | 4 +- src/librustc_trans/back/symbol_export.rs | 305 +- src/librustc_trans/back/symbol_names.rs | 84 +- src/librustc_trans/back/write.rs | 754 +- src/librustc_trans/base.rs | 868 +- src/librustc_trans/builder.rs | 9 +- src/librustc_trans/cabi_powerpc64.rs | 64 +- src/librustc_trans/cabi_x86.rs | 41 +- src/librustc_trans/callee.rs | 107 +- src/librustc_trans/collector.rs | 294 +- src/librustc_trans/common.rs | 113 +- src/librustc_trans/consts.rs | 39 +- src/librustc_trans/context.rs | 148 +- src/librustc_trans/debuginfo/metadata.rs | 23 +- src/librustc_trans/debuginfo/mod.rs | 2 +- src/librustc_trans/debuginfo/type_names.rs | 7 +- src/librustc_trans/debuginfo/utils.rs | 5 +- src/librustc_trans/declare.rs | 8 +- src/librustc_trans/diagnostics.rs | 24 + src/librustc_trans/glue.rs | 63 +- src/librustc_trans/intrinsic.rs | 30 +- src/librustc_trans/lib.rs | 172 +- src/librustc_trans/machine.rs | 2 +- src/librustc_trans/meth.rs | 12 +- src/librustc_trans/mir/analyze.rs | 86 +- src/librustc_trans/mir/block.rs | 41 +- src/librustc_trans/mir/constant.rs | 107 +- src/librustc_trans/mir/lvalue.rs | 29 +- src/librustc_trans/mir/mod.rs | 26 +- src/librustc_trans/mir/rvalue.rs | 24 +- src/librustc_trans/mir/statement.rs | 16 +- src/librustc_trans/monomorphize.rs | 140 +- src/librustc_trans/partitioning.rs | 308 +- src/librustc_trans/time_graph.rs | 169 +- src/librustc_trans/trans_item.rs | 256 +- src/librustc_trans/tvec.rs | 4 +- src/librustc_trans/type_.rs | 17 +- src/librustc_trans/type_of.rs | 9 +- src/librustc_trans_utils/Cargo.toml | 8 +- src/librustc_trans_utils/lib.rs | 84 +- src/librustc_trans_utils/link.rs | 47 +- src/librustc_trans_utils/trans_crate.rs | 249 + src/librustc_typeck/README.md | 48 + src/librustc_typeck/astconv.rs | 285 +- src/librustc_typeck/check/_match.rs | 193 +- src/librustc_typeck/check/autoderef.rs | 2 +- src/librustc_typeck/check/callee.rs | 20 +- src/librustc_typeck/check/closure.rs | 26 +- src/librustc_typeck/check/coercion.rs | 10 +- src/librustc_typeck/check/compare_method.rs | 8 +- src/librustc_typeck/check/demand.rs | 57 +- src/librustc_typeck/check/dropck.rs | 12 +- .../check/generator_interior.rs | 133 + src/librustc_typeck/check/intrinsic.rs | 11 +- src/librustc_typeck/check/method/confirm.rs | 57 +- src/librustc_typeck/check/method/mod.rs | 34 +- src/librustc_typeck/check/method/probe.rs | 919 +- src/librustc_typeck/check/method/suggest.rs | 31 +- src/librustc_typeck/check/mod.rs | 346 +- src/librustc_typeck/check/op.rs | 2 +- src/librustc_typeck/check/regionck.rs | 45 +- src/librustc_typeck/check/upvar.rs | 57 +- src/librustc_typeck/check/wfcheck.rs | 6 +- src/librustc_typeck/check/writeback.rs | 59 +- src/librustc_typeck/check_unused.rs | 32 +- src/librustc_typeck/coherence/builtin.rs | 16 +- .../coherence/inherent_impls.rs | 39 +- .../coherence/inherent_impls_overlap.rs | 34 +- src/librustc_typeck/coherence/mod.rs | 2 +- src/librustc_typeck/collect.rs | 110 +- .../constrained_type_params.rs | 6 +- src/librustc_typeck/diagnostics.rs | 48 +- src/librustc_typeck/impl_wf_check.rs | 2 +- src/librustc_typeck/lib.rs | 1 - src/librustc_typeck/variance/constraints.rs | 39 +- src/librustc_typeck/variance/terms.rs | 14 +- src/librustdoc/Cargo.toml | 5 +- src/librustdoc/build.rs | 6 +- src/librustdoc/clean/cfg.rs | 3 +- src/librustdoc/clean/inline.rs | 59 +- src/librustdoc/clean/mod.rs | 207 +- src/librustdoc/clean/simplify.rs | 9 +- src/librustdoc/core.rs | 24 +- src/librustdoc/html/format.rs | 4 +- src/librustdoc/html/highlight.rs | 72 +- src/librustdoc/html/markdown.rs | 55 +- src/librustdoc/html/render.rs | 391 +- src/librustdoc/html/static/main.js | 18 + src/librustdoc/html/static/rustdoc.css | 89 +- src/librustdoc/html/static/styles/main.css | 34 +- src/librustdoc/lib.rs | 2 + src/librustdoc/test.rs | 45 +- src/librustdoc/visit_ast.rs | 20 +- src/librustdoc/visit_lib.rs | 7 +- src/libstd/Cargo.toml | 2 +- src/libstd/build.rs | 19 +- src/libstd/collections/hash/map.rs | 112 +- src/libstd/collections/hash/set.rs | 3 + src/libstd/collections/hash/table.rs | 4 +- src/libstd/error.rs | 23 + src/libstd/ffi/c_str.rs | 19 +- src/libstd/ffi/os_str.rs | 18 +- src/libstd/fs.rs | 76 +- src/libstd/io/impls.rs | 8 + src/libstd/io/util.rs | 13 +- src/libstd/lib.rs | 35 +- src/libstd/macros.rs | 150 +- src/libstd/net/addr.rs | 88 +- src/libstd/net/ip.rs | 90 +- src/libstd/net/tcp.rs | 61 +- src/libstd/net/udp.rs | 97 +- src/libstd/os/mod.rs | 3 +- src/libstd/os/nacl/fs.rs | 128 - src/libstd/os/nacl/raw.rs | 56 - src/libstd/os/raw.rs | 2 + src/libstd/panicking.rs | 34 - src/libstd/path.rs | 16 +- src/libstd/primitive_docs.rs | 8 + src/libstd/process.rs | 47 +- src/libstd/rt.rs | 2 - src/libstd/sync/mutex.rs | 24 +- src/libstd/sync/once.rs | 1 + src/libstd/sync/rwlock.rs | 133 +- src/libstd/sys/redox/net/netc.rs | 4 +- src/libstd/sys/redox/syscall/call.rs | 65 +- src/libstd/sys/redox/syscall/data.rs | 120 +- src/libstd/sys/redox/syscall/flag.rs | 17 + src/libstd/sys/redox/syscall/number.rs | 6 + src/libstd/sys/redox/thread.rs | 2 + src/libstd/sys/redox/time.rs | 22 +- src/libstd/sys/unix/args.rs | 1 + src/libstd/sys/unix/backtrace/mod.rs | 16 +- .../sys/unix/backtrace/printing/dladdr.rs | 11 +- src/libstd/sys/unix/backtrace/printing/mod.rs | 41 +- src/libstd/sys/unix/condvar.rs | 10 +- src/libstd/sys/unix/env.rs | 32 +- src/libstd/sys/unix/ext/fs.rs | 4 +- src/libstd/sys/unix/fd.rs | 2 + src/libstd/sys/unix/fs.rs | 8 +- src/libstd/sys/unix/l4re.rs | 445 + src/libstd/sys/unix/mod.rs | 11 +- src/libstd/sys/unix/net.rs | 79 + src/libstd/sys/unix/os.rs | 11 +- src/libstd/sys/unix/process/mod.rs | 2 +- src/libstd/sys/unix/process/process_common.rs | 1 - .../sys/unix/process/process_fuchsia.rs | 78 +- src/libstd/sys/unix/process/process_unix.rs | 32 +- .../unix/process/{magenta.rs => zircon.rs} | 197 +- src/libstd/sys/unix/rand.rs | 8 +- src/libstd/sys/unix/thread.rs | 11 +- src/libstd/sys/unix/time.rs | 22 +- src/libstd/sys/windows/c.rs | 22 +- src/libstd/sys/windows/fs.rs | 8 +- src/libstd/sys/windows/mod.rs | 2 + src/libstd/sys/windows/thread.rs | 15 +- src/libstd/sys/windows/time.rs | 9 +- src/libstd/sys_common/backtrace.rs | 2 - src/libstd/sys_common/gnu/libbacktrace.rs | 38 +- src/libstd/sys_common/memchr.rs | 9 +- src/libstd/sys_common/mod.rs | 6 +- src/libstd/sys_common/net.rs | 21 +- src/libstd/sys_common/poison.rs | 59 +- src/libstd/sys_common/thread.rs | 18 + src/libstd/sys_common/util.rs | 16 - src/libstd/thread/local.rs | 25 +- src/libstd/thread/mod.rs | 11 +- src/libstd/time/duration.rs | 23 + src/libstd/time/mod.rs | 22 +- src/libstd_unicode/char.rs | 2 +- src/libstd_unicode/u_str.rs | 2 +- src/libsyntax/Cargo.toml | 3 +- src/libsyntax/README.md | 7 + src/libsyntax/ast.rs | 33 +- src/libsyntax/attr.rs | 160 +- src/libsyntax/codemap.rs | 82 +- src/libsyntax/config.rs | 5 +- src/libsyntax/diagnostic_list.rs | 30 +- src/libsyntax/diagnostics/metadata.rs | 2 +- src/libsyntax/ext/base.rs | 12 +- src/libsyntax/ext/build.rs | 5 +- src/libsyntax/ext/derive.rs | 2 +- src/libsyntax/ext/expand.rs | 98 +- src/libsyntax/ext/quote.rs | 2 + src/libsyntax/ext/source_util.rs | 8 +- src/libsyntax/ext/tt/macro_parser.rs | 7 +- src/libsyntax/ext/tt/macro_rules.rs | 8 +- src/libsyntax/ext/tt/quoted.rs | 8 +- src/libsyntax/ext/tt/transcribe.rs | 6 +- src/libsyntax/feature_gate.rs | 150 +- src/libsyntax/fold.rs | 5 +- src/libsyntax/json.rs | 8 +- src/libsyntax/lib.rs | 6 +- src/libsyntax/parse/lexer/comments.rs | 2 +- src/libsyntax/parse/lexer/mod.rs | 118 +- src/libsyntax/parse/lexer/tokentrees.rs | 7 +- src/libsyntax/parse/lexer/unicode_chars.rs | 2 +- src/libsyntax/parse/mod.rs | 31 +- src/libsyntax/parse/parser.rs | 237 +- src/libsyntax/parse/token.rs | 121 +- src/libsyntax/print/pprust.rs | 214 +- src/libsyntax/std_inject.rs | 2 +- src/libsyntax/test.rs | 2 +- src/libsyntax/test_snippet.rs | 6 +- src/libsyntax/tokenstream.rs | 10 +- src/libsyntax/util/parser.rs | 121 +- src/libsyntax/visit.rs | 3 + src/libsyntax_ext/cfg.rs | 1 + src/libsyntax_ext/concat.rs | 1 + src/libsyntax_ext/concat_idents.rs | 2 +- src/libsyntax_ext/deriving/clone.rs | 2 +- src/libsyntax_ext/deriving/cmp/eq.rs | 2 +- src/libsyntax_ext/deriving/debug.rs | 2 +- src/libsyntax_ext/deriving/generic/mod.rs | 23 +- src/libsyntax_ext/deriving/mod.rs | 4 +- src/libsyntax_ext/env.rs | 1 + src/libsyntax_ext/format.rs | 51 +- src/libsyntax_ext/proc_macro_registrar.rs | 2 +- src/libsyntax_pos/hygiene.rs | 2 +- src/libsyntax_pos/lib.rs | 165 +- src/libsyntax_pos/span_encoding.rs | 143 + src/libsyntax_pos/symbol.rs | 6 + src/libtest/lib.rs | 6 +- src/rtstartup/rsbegin.rs | 2 +- src/rtstartup/rsend.rs | 2 +- src/rustc/compiler_builtins_shim/Cargo.toml | 21 +- src/rustc/compiler_builtins_shim/build.rs | 13 +- src/rustc/libc_shim/Cargo.toml | 27 +- src/rustllvm/PassWrapper.cpp | 468 + src/rustllvm/RustWrapper.cpp | 62 + src/stage0.txt | 6 +- .../drop_in_place_intrinsic.rs | 7 +- .../item-collection/generic-drop-glue.rs | 13 +- .../instantiation-through-vtable.rs | 5 +- .../item-collection/non-generic-drop-glue.rs | 5 +- .../item-collection/transitive-drop-glue.rs | 19 +- .../item-collection/tuple-drop-glue.rs | 9 +- .../codegen-units/item-collection/unsizing.rs | 9 +- .../partitioning/extern-drop-glue.rs | 1 + .../inlining-from-extern-crate.rs | 1 + .../partitioning/local-drop-glue.rs | 1 + .../local-inlining-but-not-all.rs | 54 + .../partitioning/local-inlining.rs | 1 + .../partitioning/local-transitive-inlining.rs | 1 + .../partitioning/vtable-through-const.rs | 1 + .../codegen/abi-main-signature-16bit-c-int.rs | 32 + .../codegen/abi-main-signature-32bit-c-int.rs | 20 + src/test/codegen/issue-34947-pow-i32.rs | 23 + src/test/codegen/remap_path_prefix/aux_mod.rs | 16 + src/test/codegen/remap_path_prefix/main.rs | 7 + .../dropck_tarena_cycle_checked.rs | 3 +- .../gated-macro-reexports.rs | 2 +- src/test/compile-fail/E0232.rs | 2 +- src/test/compile-fail/E0506.rs | 7 +- src/test/compile-fail/E0586.rs | 2 +- src/test/compile-fail/E0594.rs | 20 + src/test/compile-fail/E0637.rs | 20 + .../associated-types-subtyping-1.rs | 4 +- .../cache/project-fn-ret-contravariant.rs | 24 +- .../cache/project-fn-ret-invariant.rs | 46 +- .../borrowck/borrowck-assign-comp.rs | 15 +- .../borrowck/borrowck-assign-to-constants.rs | 7 +- .../borrowck/borrowck-closures-mut-and-imm.rs | 36 +- .../borrowck/borrowck-fn-in-const-a.rs | 7 +- ...k-imm-ref-to-mut-rec-field-issue-3162-c.rs | 7 +- .../borrowck/borrowck-lend-flow-match.rs | 7 +- .../borrowck-match-binding-is-assignment.rs | 23 +- .../borrowck/borrowck-move-from-unsafe-ptr.rs | 4 +- .../borrowck/borrowck-move-in-irrefut-pat.rs | 15 +- ...rowck-move-out-of-overloaded-auto-deref.rs | 7 +- .../borrowck-move-out-of-static-item.rs | 7 +- .../borrowck-move-out-of-struct-with-dtor.rs | 15 +- ...k-overloaded-index-and-overloaded-deref.rs | 7 +- .../borrowck-overloaded-index-ref-index.rs | 15 +- .../borrowck/borrowck-pat-reassign-binding.rs | 7 +- .../borrowck-struct-update-with-dtor.rs | 11 +- .../borrowck/borrowck-union-borrow.rs | 50 +- .../borrowck-vec-pattern-move-tail.rs | 7 +- .../move-in-static-initializer-issue-38520.rs | 11 +- .../cdylib-deps-must-be-static.rs | 2 +- .../check-static-values-constraints.rs | 26 +- .../closure-expected-type/issue-38714.rs | 26 - .../coerce-overloaded-autoderef.rs | 19 +- .../coherence-overlap-downstream-inherent.rs | 32 + .../coherence-overlap-downstream.rs | 32 + .../coherence-overlap-issue-23516-inherent.rs | 26 + .../coherence-overlap-issue-23516.rs | 7 +- .../coherence-overlap-upstream-inherent.rs | 28 + .../coherence-overlap-upstream.rs | 28 + .../const-block-non-item-statement-3.rs | 15 + .../const-block-non-item-statement.rs | 4 - src/test/compile-fail/const-call.rs | 2 - src/test/compile-fail/const-fn-error.rs | 2 +- .../compile-fail/const-fn-feature-flags.rs | 24 + .../const-len-underflow-separate-spans.rs | 2 +- src/test/compile-fail/const-size_of-cycle.rs | 3 +- .../dropck_trait_cycle_checked.rs | 2 +- src/test/compile-fail/empty-linkname.rs | 2 + .../feature-gate-clone-closures.rs | 21 + .../feature-gate-copy-closures.rs | 19 + .../compile-fail/feature-gate-doc_masked.rs | 14 + .../feature-gate-dotdoteq_in_patterns.rs | 16 + .../compile-fail/feature-gate-fn_must_use.rs | 8 +- .../compile-fail/feature-gate-generators.rs | 13 + .../feature-gate-match_beginning_vert.rs | 36 + .../feature-gate-match_default_bindings.rs | 16 + .../compile-fail/feature-gate-no-debug.rs | 2 +- .../compile-fail/feature-gate-repr-simd.rs | 4 +- src/test/compile-fail/feature-gate-repr128.rs | 17 + .../compile-fail/feature-gate-repr_align.rs | 4 +- .../feature-gate-rustc_const_unstable.rs} | 17 +- src/test/compile-fail/feature-gate-simd.rs | 3 +- .../feature-gate-static-nobundle.rs | 2 + .../feature-gate-underscore-lifetimes.rs | 20 + .../issue-43106-gating-of-builtin-attrs.rs | 6 +- .../issue-43106-gating-of-derive-2.rs | 24 +- .../issue-43106-gating-of-derive.rs | 5 +- ...nctional-struct-update-respects-privacy.rs | 2 - src/test/compile-fail/hr-subtype.rs | 3 - .../compile-fail/hrtb-identity-fn-borrows.rs | 7 +- .../compile-fail/hygiene/assoc_item_ctxt.rs | 52 + .../compile-fail/hygiene/assoc_ty_bindings.rs | 49 + src/test/compile-fail/impossible_range.rs | 8 +- .../compile-fail/issue-17718-const-borrow.rs | 2 +- src/test/compile-fail/issue-1962.rs | 2 +- src/test/compile-fail/issue-20261.rs | 3 +- src/test/compile-fail/issue-22706.rs | 13 + src/test/compile-fail/issue-22933-2.rs | 3 - src/test/compile-fail/issue-22933-3.rs | 14 + src/test/compile-fail/issue-30355.rs | 21 + .../compile-fail/issue-31924-non-snake-ffi.rs | 18 + src/test/compile-fail/issue-33241.rs | 23 + src/test/compile-fail/issue-37887.rs | 14 + src/test/compile-fail/issue-39559.rs | 3 +- src/test/compile-fail/issue-41229-ref-str.rs | 16 + src/test/compile-fail/issue-43023.rs | 6 +- src/test/compile-fail/issue-43733-2.rs | 2 +- src/test/compile-fail/issue-43733.rs | 6 +- src/test/compile-fail/issue-44021.rs | 16 + src/test/compile-fail/issue-44373.rs | 16 + src/test/compile-fail/issue-44578.rs | 34 + .../issue-45087-unreachable-unsafe.rs | 15 + .../issue-45729-unsafe-in-generator.rs | 19 + src/test/compile-fail/issue-45801.rs | 35 + src/test/compile-fail/issue-5500-1.rs | 7 +- src/test/compile-fail/issue-7364.rs | 5 +- src/test/compile-fail/issue-7867.rs | 13 - ...time-underscore.rs => label-underscore.rs} | 6 - src/test/compile-fail/lint-ctypes.rs | 4 +- src/test/compile-fail/lint-impl-fn.rs | 5 + src/test/compile-fail/lint-output-format-2.rs | 8 +- .../compile-fail/lint-stability-deprecated.rs | 1 + .../lint-unknown-feature-default.rs | 8 +- src/test/compile-fail/lint-unknown-feature.rs | 10 +- .../compile-fail/lint-unused-mut-variables.rs | 8 + .../compile-fail/macro-local-data-key-priv.rs | 2 +- src/test/compile-fail/match-vec-mismatch.rs | 3 +- .../mut-pattern-internal-mutability.rs | 11 +- src/test/compile-fail/not-clone-closure.rs | 24 + src/test/compile-fail/not-copy-closure.rs | 24 + .../object-lifetime-default-mybox.rs | 2 +- src/test/compile-fail/pat-slice-old-style.rs | 10 +- .../compile-fail/private-inferred-type.rs | 5 +- .../compile-fail/private-type-in-interface.rs | 1 - src/test/compile-fail/range_inclusive_gate.rs | 2 +- ...ion-lifetime-bounds-on-fns-where-clause.rs | 2 +- ...ple-lifetime-bounds-on-fns-where-clause.rs | 4 +- .../regions-free-region-ordering-caller.rs | 10 +- ...egions-infer-contravariance-due-to-decl.rs | 2 +- .../regions-infer-covariance-due-to-decl.rs | 2 +- .../regions-lifetime-bounds-on-fns.rs | 2 +- .../regions-pattern-typing-issue-19997.rs | 7 +- ...ariant-use-covariant-in-second-position.rs | 2 +- ...ns-variance-contravariant-use-covariant.rs | 2 +- ...ns-variance-covariant-use-contravariant.rs | 2 +- ...ns-variance-invariant-use-contravariant.rs | 2 +- src/test/compile-fail/rmeta_lib.rs | 2 +- .../stability-attribute-sanity.rs | 7 +- src/test/compile-fail/stable-features.rs | 5 +- src/test/compile-fail/static-drop-scope.rs | 31 + .../compile-fail/static-mut-not-constant.rs | 3 - src/test/compile-fail/synthetic-param.rs | 38 + .../compile-fail/type-path-err-node-types.rs | 36 + .../underscore-lifetime-binders.rs | 39 + .../underscore-lifetime-elison-mismatch.rs | 15 + src/test/compile-fail/union/union-unsafe.rs | 4 +- .../unsafe-fn-assign-deref-ptr.rs | 3 +- src/test/compile-fail/unsafe-move-val-init.rs | 20 + .../variance-cell-is-invariant.rs | 2 +- src/test/compile-fail/weak-lang-item.rs | 2 + src/test/debuginfo/constant-debug-locs.rs | 2 +- src/test/incremental/cache_file_headers.rs | 1 + .../change_symbol_export_status.rs | 9 +- src/test/incremental/commandline-args.rs | 1 + src/test/incremental/dirty_clean.rs | 4 +- src/test/incremental/hashes/enum_defs.rs | 2 +- .../hashes/indexing_expressions.rs | 2 +- src/test/incremental/hashes/struct_defs.rs | 181 +- .../ich_method_call_trait_scope.rs | 5 +- src/test/incremental/issue-35593.rs | 1 + src/test/incremental/issue-38222.rs | 2 + .../incremental/remapped_paths_cc/main.rs | 1 - .../remove-private-item-cross-crate/main.rs | 1 + src/test/incremental/spike.rs | 1 + src/test/incremental/string_constant.rs | 5 +- src/test/mir-opt/end_region_1.rs | 8 +- src/test/mir-opt/end_region_2.rs | 16 +- src/test/mir-opt/end_region_3.rs | 16 +- src/test/mir-opt/end_region_4.rs | 16 +- src/test/mir-opt/end_region_5.rs | 16 +- src/test/mir-opt/end_region_6.rs | 20 +- src/test/mir-opt/end_region_7.rs | 8 +- src/test/mir-opt/end_region_8.rs | 18 +- src/test/mir-opt/end_region_9.rs | 9 +- src/test/mir-opt/end_region_cyclic.rs | 132 + .../end_region_destruction_extents_1.rs | 161 + src/test/mir-opt/issue-43457.rs | 10 +- src/test/mir-opt/validate_1.rs | 22 +- src/test/mir-opt/validate_3.rs | 14 +- src/test/mir-opt/validate_4.rs | 12 +- src/test/mir-opt/validate_5.rs | 10 +- .../parse-fail/issue-23620-invalid-escapes.rs | 9 +- src/test/parse-fail/issue-43692.rs | 15 + src/test/parse-fail/new-unicode-escapes-2.rs | 2 +- src/test/parse-fail/new-unicode-escapes-3.rs | 3 +- src/test/parse-fail/new-unicode-escapes-4.rs | 2 - src/test/parse-fail/pat-ranges-1.rs | 2 +- src/test/parse-fail/pat-ranges-2.rs | 2 +- src/test/parse-fail/pat-ranges-3.rs | 2 +- src/test/parse-fail/pat-ranges-4.rs | 3 +- src/test/parse-fail/range_inclusive.rs | 2 +- src/test/parse-fail/range_inclusive_gate.rs | 14 +- .../struct-literal-restrictions-in-lamda.rs | 29 + src/test/pretty/issue-4264.pp | 22 +- src/test/run-fail/issue-44216-add-instant.rs | 18 + .../run-fail/issue-44216-add-system-time.rs | 18 + src/test/run-fail/issue-44216-sub-instant.rs | 18 + .../run-fail/issue-44216-sub-system-time.rs | 18 + .../run-make/codegen-options-parsing/Makefile | 30 +- .../extern-fn-struct-passing-abi/test.c | 32 +- .../extern-fn-struct-passing-abi/test.rs | 27 + .../extern-fn-with-packed-struct/test.c | 5 + .../extern-fn-with-packed-struct/test.rs | 26 +- .../extra-filename-with-temp-outputs/Makefile | 2 +- src/test/run-make/issue-19371/foo.rs | 5 +- src/test/run-make/llvm-phase/test.rs | 1 + .../long-linker-command-lines/Makefile | 5 + .../run-make/long-linker-command-lines/foo.rs | 88 + src/test/run-make/min-global-align/Makefile | 22 + .../min-global-align/min_global_align.rs | 38 + src/test/run-make/sanitizer-address/Makefile | 15 +- .../run-make/sanitizer-cdylib-link/Makefile | 14 +- .../run-make/sanitizer-dylib-link/Makefile | 14 +- src/test/run-make/sepcomp-cci-copies/Makefile | 5 +- src/test/run-make/sepcomp-inlining/Makefile | 11 +- src/test/run-make/sepcomp-separate/Makefile | 2 +- .../sysroot-crates-are-unstable/Makefile | 37 +- .../sysroot-crates-are-unstable/test.py | 71 + .../target-specs/my-awesome-platform.json | 1 + .../target-specs/my-incomplete-platform.json | 1 + .../x86_64-unknown-linux-gnu.json | 1 + src/test/run-make/tools.mk | 1 + src/test/run-pass-fulldeps/compiler-calls.rs | 2 + .../pprust-expr-roundtrip.rs | 245 + .../proc-macro/attr-cfg.rs} | 38 +- .../proc-macro/attr-on-trait.rs | 29 + .../proc-macro/auxiliary/attr-cfg.rs | 32 + .../proc-macro/auxiliary/attr-on-trait.rs | 25 + .../proc-macro/auxiliary/derive-attr-cfg.rs | 23 + .../proc-macro/auxiliary/span-api-tests.rs | 45 + .../proc-macro/auxiliary/span-test-macros.rs | 19 + .../proc-macro/derive-attr-cfg.rs | 27 + .../proc-macro/span-api-tests.rs | 43 + .../run-pass-valgrind/cast-enum-with-dtor.rs | 2 +- ...om-type-param-via-bound-in-where-clause.rs | 2 +- src/test/run-pass/auxiliary/const_fn_lib.rs | 2 +- .../run-pass/auxiliary/issue-17718-aux.rs | 2 +- .../auxiliary/thin-lto-inlines-aux.rs | 17 + .../auxiliary/thread-local-extern-static.rs | 1 + src/test/run-pass/backtrace-debuginfo.rs | 4 +- src/test/run-pass/box-of-array-of-drop-1.rs | 2 +- src/test/run-pass/box-of-array-of-drop-2.rs | 2 +- src/test/run-pass/char_unicode.rs | 22 + src/test/run-pass/clone-closure.rs | 29 + .../closure-returning-closure.rs} | 5 +- src/test/run-pass/const-fn-feature-flags.rs | 25 + .../const-fn-stability-calls.rs | 8 +- src/test/run-pass/const-size_of-align_of.rs | 2 +- src/test/run-pass/copy-closure.rs | 28 + ...unconstrained-element-type-i32-fallback.rs | 4 +- .../generator/auxiliary/xcrate-reachable.rs | 24 + .../run-pass/generator/auxiliary/xcrate.rs | 27 + .../run-pass/generator/borrow-in-tail-expr.rs | 19 + .../run-pass/generator/conditional-drop.rs | 65 + src/test/run-pass/generator/control-flow.rs | 56 + src/test/run-pass/generator/drop-env.rs | 70 + src/test/run-pass/generator/iterator-count.rs | 48 + src/test/run-pass/generator/match-bindings.rs | 30 + src/test/run-pass/generator/panic-drops.rs | 62 + src/test/run-pass/generator/panic-safe.rs | 35 + .../run-pass/generator/resume-after-return.rs | 33 + src/test/run-pass/generator/smoke.rs | 181 + .../run-pass/generator/xcrate-reachable.rs | 21 + src/test/run-pass/generator/xcrate.rs | 37 + .../run-pass/generator/yield-in-args-rev.rs | 26 + src/test/run-pass/generator/yield-in-box.rs | 26 + src/test/run-pass/generator/yield-subtype.rs | 23 + .../hygiene/specialization.rs} | 31 +- src/test/run-pass/ifmt.rs | 12 + .../import-crate-with-invalid-spans/main.rs | 2 +- src/test/run-pass/inc-range-pat.rs | 22 + .../issue-17718-const-destructors.rs | 1 - .../issue-17718-static-unsafe-interior.rs | 3 +- src/test/run-pass/issue-17718.rs | 2 +- src/test/run-pass/issue-21486.rs | 2 +- .../issue-23338-ensure-param-drop-order.rs | 5 +- src/test/run-pass/issue-26655.rs | 4 +- src/test/run-pass/issue-27997.rs | 2 +- src/test/run-pass/issue-33185.rs | 26 + src/test/run-pass/issue-34053.rs | 22 +- src/test/run-pass/issue-35376.rs | 51 + src/test/run-pass/issue-43692.rs | 14 + src/test/run-pass/issue-44373.rs | 18 + src/test/run-pass/issue-44730.rs | 24 + src/test/run-pass/issue-45731.rs | 34 + .../{compile-fail => run-pass}/issue-9243.rs | 1 - src/test/run-pass/lto-many-codegen-units.rs | 15 + ...thod-argument-inference-associated-type.rs | 37 + .../no-trait-method-issue-40473.rs | 25 + src/test/run-pass/nested-vec-3.rs | 2 +- src/test/run-pass/num-wrapping.rs | 9 + .../run-pass/op-assign-builtins-by-ref.rs | 84 + .../run-pass/overloaded-autoderef-order.rs | 3 +- src/test/run-pass/panic-handler-chain.rs | 3 +- src/test/run-pass/panic-handler-set-twice.rs | 3 +- src/test/run-pass/range_inclusive.rs | 68 +- src/test/run-pass/range_inclusive_gate.rs | 2 +- .../rfc-2005-default-binding-mode/box.rs | 27 + .../rfc-2005-default-binding-mode/constref.rs | 51 + .../rfc-2005-default-binding-mode/enum.rs | 56 + .../rfc-2005-default-binding-mode/for.rs | 31 + .../rfc-2005-default-binding-mode/general.rs | 259 + .../rfc-2005-default-binding-mode/lit.rs | 44 + .../rfc-2005-default-binding-mode/range.rs | 20 + .../ref-region.rs | 27 + .../rfc-2005-default-binding-mode/slice.rs | 36 + .../rfc-2005-default-binding-mode/struct.rs | 33 + .../tuple-struct.rs | 29 + .../rfc-2005-default-binding-mode/tuple.rs | 23 + src/test/run-pass/rvalue-static-promotion.rs | 18 +- src/test/run-pass/semistatement-in-lambda.rs | 19 + src/test/run-pass/struct-order-of-eval-3.rs | 2 +- src/test/run-pass/struct-order-of-eval-4.rs | 2 +- src/test/run-pass/thin-lto-inlines.rs | 39 + src/test/run-pass/thin-lto-inlines2.rs | 38 + ...oxed-closures-infer-explicit-call-early.rs | 17 + src/test/run-pass/underscore-lifetimes.rs | 47 + src/test/run-pass/union/union-nodrop.rs | 12 +- src/test/run-pass/vector-sort-panic-safe.rs | 2 +- src/test/rustdoc/codeblock-title.rs | 31 + src/test/rustdoc/const-fn.rs | 27 + src/test/rustdoc/fn-pointer-arg-name.rs | 15 + src/test/rustdoc/inline_local/glob-private.rs | 3 + src/test/rustdoc/issue-41783.rs | 8 +- src/test/rustdoc/issue-43869.rs | 51 + src/test/rustdoc/issue-43893.rs | 24 + src/test/rustdoc/pub-method.rs | 31 + .../lint-plugin-cmdline-allow.stderr | 2 +- .../proc-macro/auxiliary/three-equals.rs | 56 + .../ui-fulldeps/proc-macro/three-equals.rs | 38 + .../proc-macro/three-equals.stderr | 48 + src/test/ui/block-result/issue-3563.stderr | 2 + .../overlapping_inherent_impls.stderr | 2 + src/test/ui/codemap_tests/tab.rs | 4 + src/test/ui/codemap_tests/tab.stderr | 17 +- src/test/ui/codemap_tests/tab_2.rs | 15 + src/test/ui/codemap_tests/tab_2.stderr | 10 + src/test/ui/codemap_tests/tab_3.rs | 19 + src/test/ui/codemap_tests/tab_3.stderr | 13 + src/test/ui/const-expr-addr-operator.rs | 20 + src/test/ui/const-expr-addr-operator.stderr | 14 + src/test/ui/deref-suggestion.rs | 34 + src/test/ui/deref-suggestion.stderr | 59 + src/test/ui/generator/borrowing.rs | 30 + src/test/ui/generator/borrowing.stderr | 29 + .../generator/no-arguments-on-generators.rs | 17 + .../no-arguments-on-generators.stderr | 8 + src/test/ui/generator/not-send-sync.rs | 31 + src/test/ui/generator/not-send-sync.stderr | 24 + .../ref-escapes-but-not-over-yield.rs | 28 + .../ref-escapes-but-not-over-yield.stderr | 12 + src/test/ui/generator/yield-in-args.rs | 20 + src/test/ui/generator/yield-in-args.stderr | 8 + src/test/ui/generator/yield-in-const.rs | 14 + src/test/ui/generator/yield-in-const.stderr | 10 + src/test/ui/generator/yield-in-function.rs | 14 + .../ui/generator/yield-in-function.stderr | 8 + src/test/ui/generator/yield-in-static.rs | 14 + src/test/ui/generator/yield-in-static.stderr | 10 + .../ui/generator/yield-while-iterating.rs | 84 + .../ui/generator/yield-while-iterating.stderr | 24 + .../generator/yield-while-local-borrowed.rs | 56 + .../yield-while-local-borrowed.stderr | 10 + .../generator/yield-while-ref-reborrowed.rs | 49 + .../yield-while-ref-reborrowed.stderr | 16 + .../no-method-suggested-traits.stderr | 4 + src/test/ui/issue-36400.rs | 16 + src/test/ui/issue-36400.stderr | 10 + src/test/ui/issue-38875/issue_38875.stderr | 2 +- ...sue-45107-unnecessary-unsafe-in-closure.rs | 35 + ...45107-unnecessary-unsafe-in-closure.stderr | 72 + ...ne-existing-name-if-else-using-impl.stderr | 28 +- ...e-existing-name-return-type-is-anon.stderr | 28 +- ...turn-one-existing-name-self-is-anon.stderr | 28 +- .../ex3-both-anon-regions-4.stderr | 18 + ...oth-anon-regions-both-are-structs-3.stderr | 2 +- ...x3-both-anon-regions-both-are-structs-4.rs | 19 + ...oth-anon-regions-both-are-structs-4.stderr | 12 + ...ons-both-are-structs-earlybound-regions.rs | 21 + ...both-are-structs-earlybound-regions.stderr | 11 + ...ions-both-are-structs-latebound-regions.rs | 18 + ...-both-are-structs-latebound-regions.stderr | 10 + ...x3-both-anon-regions-earlybound-regions.rs | 21 + ...oth-anon-regions-earlybound-regions.stderr | 11 + ...ex3-both-anon-regions-latebound-regions.rs | 15 + ...both-anon-regions-latebound-regions.stderr | 10 + .../ex3-both-anon-regions-one-is-struct-4.rs | 17 + ...3-both-anon-regions-one-is-struct-4.stderr | 10 + ...th-anon-regions-return-type-is-anon.stderr | 23 +- .../ex3-both-anon-regions-self-is-anon.stderr | 23 +- .../ex3-both-anon-regions-using-fn-items.rs | 14 + ...x3-both-anon-regions-using-fn-items.stderr | 10 + ...3-both-anon-regions-using-trait-objects.rs | 14 + ...th-anon-regions-using-trait-objects.stderr | 10 + .../lifetime-doesnt-live-long-enough.rs} | 22 +- .../lifetime-doesnt-live-long-enough.stderr | 90 + src/test/ui/lint/fn_must_use.stderr | 18 - src/test/ui/lint/not_found.rs | 21 + src/test/ui/lint/not_found.stderr | 20 + src/test/ui/lint/suggestions.rs | 20 + src/test/ui/lint/suggestions.stderr | 45 + .../ui/lint/unused_parens_json_suggestion.rs | 25 + .../lint/unused_parens_json_suggestion.stderr | 1 + .../ui/macros/assert_eq_trailing_comma.stderr | 2 + .../ui/macros/assert_ne_trailing_comma.stderr | 2 + src/test/ui/macros/trace_faulty_macros.rs | 55 + src/test/ui/macros/trace_faulty_macros.stderr | 47 + src/test/ui/mismatched_types/E0281.stderr | 13 - src/test/ui/mismatched_types/E0631.rs | 21 + src/test/ui/mismatched_types/E0631.stderr | 44 + .../ui/mismatched_types/closure-arg-count.rs | 4 + .../mismatched_types/closure-arg-count.stderr | 28 +- .../closure-arg-type-mismatch.rs | 21 + .../closure-arg-type-mismatch.stderr | 45 + .../mismatched_types/closure-mismatch.stderr | 8 +- .../ui/mismatched_types/fn-variance-1.stderr | 14 +- .../ui/mismatched_types/issue-36053-2.stderr | 9 +- src/test/ui/mismatched_types/issue-38371.rs | 2 + .../ui/mismatched_types/issue-38371.stderr | 13 +- .../unboxed-closures-vtable-mismatch.stderr | 9 +- .../ui/on-unimplemented/bad-annotation.rs | 24 + .../ui/on-unimplemented/bad-annotation.stderr | 60 +- src/test/ui/path-lookahead.stderr | 14 +- .../regions-fn-subtyping-return-static.stderr | 2 - .../ui/resolve/token-error-correct.stderr | 4 +- .../ui/rfc-2005-default-binding-mode/const.rs | 29 + .../const.stderr | 11 + .../ui/rfc-2005-default-binding-mode/enum.rs | 34 + .../rfc-2005-default-binding-mode/enum.stderr | 26 + .../explicit-mut.rs | 40 + .../explicit-mut.stderr | 26 + .../ui/rfc-2005-default-binding-mode/for.rs | 20 + .../rfc-2005-default-binding-mode/for.stderr | 10 + .../issue-44912-or.rs | 21 + .../issue-44912-or.stderr | 8 + .../ui/rfc-2005-default-binding-mode/lit.rs | 36 + .../rfc-2005-default-binding-mode/lit.stderr | 20 + .../no-double-error.rs | 21 + .../no-double-error.stderr | 8 + .../ui/rfc-2005-default-binding-mode/slice.rs | 20 + .../slice.stderr | 8 + .../fn_must_use.rs | 76 + .../fn_must_use.stderr | 48 + src/test/ui/span/E0493.rs | 2 +- src/test/ui/span/E0493.stderr | 9 +- src/test/ui/span/dropck_arr_cycle_checked.rs | 2 +- src/test/ui/span/dropck_vec_cycle_checked.rs | 2 +- src/test/ui/span/gated-features-attr-spans.rs | 39 + .../ui/span/gated-features-attr-spans.stderr | 34 + src/test/ui/span/issue-24690.stderr | 2 +- .../ui/span/issue-43927-non-ADT-derive.rs | 16 + .../ui/span/issue-43927-non-ADT-derive.stderr | 8 + src/test/ui/span/missing-unit-argument.rs | 19 + src/test/ui/span/missing-unit-argument.stderr | 45 + .../span/unused-warning-point-at-signature.rs | 40 + .../unused-warning-point-at-signature.stderr | 36 + .../vec-must-not-hide-type-from-dropck.rs | 2 +- src/test/ui/str-lit-type-mismatch.rs | 16 + src/test/ui/str-lit-type-mismatch.stderr | 32 + .../issue-43420-no-over-suggest.rs | 19 + .../issue-43420-no-over-suggest.stderr | 11 + src/test/ui/suggestions/suggest-methods.rs | 40 + .../ui/suggestions/suggest-methods.stderr | 32 + src/test/ui/suggestions/try-on-option.rs | 25 + src/test/ui/suggestions/try-on-option.stderr | 22 + .../ui/suggestions/try-operator-on-main.rs | 20 + .../suggestions/try-operator-on-main.stderr | 42 +- src/test/ui/type-check/issue-41314.rs | 19 + src/test/ui/type-check/issue-41314.stderr | 16 + src/tools/build-manifest/src/main.rs | 100 +- src/tools/cargotest/main.rs | 2 +- src/tools/clippy/.editorconfig | 16 + src/tools/clippy/.github/ISSUE_TEMPLATE.md | 8 + src/tools/clippy/.github/deploy.sh | 83 + src/tools/clippy/.github/deploy_key.enc | Bin 0 -> 1680 bytes src/tools/clippy/.remarkrc.json | 12 + src/tools/clippy/.travis.yml | 61 + src/tools/clippy/CHANGELOG.md | 640 + src/tools/clippy/CONTRIBUTING.md | 104 + src/tools/clippy/Cargo.toml | 50 + src/tools/clippy/LICENSE | 373 + src/tools/clippy/PUBLISH.md | 14 + src/tools/clippy/README.md | 186 + src/tools/clippy/appveyor.yml | 35 + src/tools/clippy/clippy_lints/Cargo.toml | 33 + src/tools/clippy/clippy_lints/README.md | 3 + .../clippy/clippy_lints/src/approx_const.rs | 121 + .../clippy/clippy_lints/src/arithmetic.rs | 90 + .../clippy/clippy_lints/src/array_indexing.rs | 156 + .../clippy/clippy_lints/src/assign_ops.rs | 216 + src/tools/clippy/clippy_lints/src/attrs.rs | 273 + src/tools/clippy/clippy_lints/src/bit_mask.rs | 338 + .../clippy_lints/src/blacklisted_name.rs | 53 + .../clippy_lints/src/block_in_if_condition.rs | 132 + src/tools/clippy/clippy_lints/src/booleans.rs | 427 + .../clippy/clippy_lints/src/bytecount.rs | 121 + .../clippy/clippy_lints/src/collapsible_if.rs | 157 + src/tools/clippy/clippy_lints/src/consts.rs | 380 + src/tools/clippy/clippy_lints/src/copies.rs | 352 + .../clippy_lints/src/cyclomatic_complexity.rs | 224 + .../clippy_lints/src/deprecated_lints.rs | 65 + src/tools/clippy/clippy_lints/src/derive.rs | 171 + src/tools/clippy/clippy_lints/src/doc.rs | 283 + .../clippy/clippy_lints/src/double_parens.rs | 62 + .../clippy_lints/src/drop_forget_ref.rs | 164 + .../clippy/clippy_lints/src/empty_enum.rs | 49 + src/tools/clippy/clippy_lints/src/entry.rs | 159 + .../clippy/clippy_lints/src/enum_clike.rs | 73 + .../clippy/clippy_lints/src/enum_glob_use.rs | 63 + .../clippy/clippy_lints/src/enum_variants.rs | 282 + src/tools/clippy/clippy_lints/src/eq_op.rs | 163 + src/tools/clippy/clippy_lints/src/escape.rs | 173 + .../clippy/clippy_lints/src/eta_reduction.rs | 102 + .../clippy_lints/src/eval_order_dependence.rs | 359 + src/tools/clippy/clippy_lints/src/format.rs | 141 + .../clippy/clippy_lints/src/formatting.rs | 222 + .../clippy/clippy_lints/src/functions.rs | 228 + .../clippy/clippy_lints/src/identity_op.rs | 87 + .../src/if_let_redundant_pattern_matching.rs | 83 + .../clippy/clippy_lints/src/if_not_else.rs | 76 + .../clippy/clippy_lints/src/infinite_iter.rs | 243 + .../clippy/clippy_lints/src/is_unit_expr.rs | 149 + .../src/items_after_statements.rs | 74 + .../clippy_lints/src/large_enum_variant.rs | 131 + src/tools/clippy/clippy_lints/src/len_zero.rs | 263 + .../clippy/clippy_lints/src/let_if_seq.rs | 194 + src/tools/clippy/clippy_lints/src/lib.rs | 556 + .../clippy/clippy_lints/src/lifetimes.rs | 444 + .../src/literal_digit_grouping.rs | 351 + src/tools/clippy/clippy_lints/src/loops.rs | 1525 ++ .../clippy/clippy_lints/src/map_clone.rs | 143 + src/tools/clippy/clippy_lints/src/matches.rs | 608 + .../clippy/clippy_lints/src/mem_forget.rs | 50 + src/tools/clippy/clippy_lints/src/methods.rs | 1548 ++ src/tools/clippy/clippy_lints/src/minmax.rs | 98 + src/tools/clippy/clippy_lints/src/misc.rs | 604 + .../clippy/clippy_lints/src/misc_early.rs | 423 + .../clippy/clippy_lints/src/missing_doc.rs | 193 + src/tools/clippy/clippy_lints/src/mut_mut.rs | 107 + .../clippy/clippy_lints/src/mut_reference.rs | 83 + .../clippy/clippy_lints/src/mutex_atomic.rs | 90 + .../clippy/clippy_lints/src/needless_bool.rs | 222 + .../clippy_lints/src/needless_borrow.rs | 76 + .../clippy_lints/src/needless_borrowed_ref.rs | 82 + .../clippy_lints/src/needless_continue.rs | 427 + .../src/needless_pass_by_value.rs | 309 + .../clippy_lints/src/needless_update.rs | 49 + .../clippy/clippy_lints/src/neg_multiply.rs | 60 + .../clippy_lints/src/new_without_default.rs | 173 + .../clippy/clippy_lints/src/no_effect.rs | 196 + .../clippy_lints/src/non_expressive_names.rs | 325 + .../clippy/clippy_lints/src/ok_if_let.rs | 63 + .../clippy/clippy_lints/src/open_options.rs | 196 + .../src/overflow_check_conditional.rs | 82 + src/tools/clippy/clippy_lints/src/panic.rs | 55 + .../clippy_lints/src/partialeq_ne_impl.rs | 56 + .../clippy/clippy_lints/src/precedence.rs | 134 + src/tools/clippy/clippy_lints/src/print.rs | 153 + src/tools/clippy/clippy_lints/src/ptr.rs | 198 + src/tools/clippy/clippy_lints/src/ranges.rs | 104 + .../clippy/clippy_lints/src/reference.rs | 54 + src/tools/clippy/clippy_lints/src/regex.rs | 252 + src/tools/clippy/clippy_lints/src/returns.rs | 156 + .../clippy/clippy_lints/src/serde_api.rs | 59 + src/tools/clippy/clippy_lints/src/shadow.rs | 392 + .../clippy_lints/src/should_assert_eq.rs | 61 + src/tools/clippy/clippy_lints/src/strings.rs | 170 + src/tools/clippy/clippy_lints/src/swap.rs | 178 + .../clippy_lints/src/temporary_assignment.rs | 54 + .../clippy/clippy_lints/src/transmute.rs | 219 + src/tools/clippy/clippy_lints/src/types.rs | 1465 ++ src/tools/clippy/clippy_lints/src/unicode.rs | 135 + .../src/unsafe_removed_from_name.rs | 80 + .../clippy_lints/src/unused_io_amount.rs | 94 + .../clippy/clippy_lints/src/unused_label.rs | 90 + src/tools/clippy/clippy_lints/src/use_self.rs | 94 + .../clippy/clippy_lints/src/utils/author.rs | 486 + .../clippy_lints/src/utils/comparisons.rs | 32 + .../clippy/clippy_lints/src/utils/conf.rs | 263 + .../clippy_lints/src/utils/constants.rs | 26 + .../clippy/clippy_lints/src/utils/higher.rs | 209 + .../clippy_lints/src/utils/hir_utils.rs | 580 + .../clippy_lints/src/utils/inspector.rs | 522 + .../clippy_lints/src/utils/internal_lints.rs | 203 + .../clippy/clippy_lints/src/utils/mod.rs | 1042 ++ .../clippy/clippy_lints/src/utils/paths.rs | 83 + .../clippy/clippy_lints/src/utils/sugg.rs | 470 + src/tools/clippy/clippy_lints/src/vec.rs | 103 + .../clippy/clippy_lints/src/zero_div_zero.rs | 57 + .../clippy/clippy_workspace_tests/Cargo.toml | 6 + .../clippy/clippy_workspace_tests/src/main.rs | 2 + .../subcrate/Cargo.toml | 3 + .../subcrate/src/lib.rs} | 0 src/tools/clippy/mini-macro/Cargo.toml | 19 + src/tools/clippy/mini-macro/src/lib.rs | 22 + src/tools/clippy/pre_publish.sh | 23 + src/tools/clippy/publish.files | 3 + src/tools/clippy/rls.toml | 1 + src/tools/clippy/rustfmt.toml | 8 + src/tools/clippy/src/lib.rs | 33 + src/tools/clippy/src/main.rs | 391 + .../conf_french_blacklisted_name.toml | 1 + .../tests/auxiliary/conf_unknown_key.toml | 6 + .../tests/auxiliary/conf_whitelisted.toml | 3 + src/tools/clippy/tests/camel_case.rs | 50 + src/tools/clippy/tests/cc_seme.rs | 28 + src/tools/clippy/tests/compile-test.rs | 34 + src/tools/clippy/tests/dogfood.rs | 51 + src/tools/clippy/tests/ice_exacte_size.rs | 17 + src/tools/clippy/tests/issue-825.rs | 17 + src/tools/clippy/tests/matches.rs | 46 + .../clippy/tests/needless_continue_helpers.rs | 88 + .../tests/run-pass/associated-constant-ice.rs | 14 + .../clippy/tests/run-pass/conf_whitelisted.rs | 4 + .../tests/run-pass/enum-glob-import-crate.rs | 8 + src/tools/clippy/tests/run-pass/ice-1588.rs | 13 + src/tools/clippy/tests/run-pass/ice-1969.rs | 13 + src/tools/clippy/tests/run-pass/ice-700.rs | 9 + .../clippy/tests/run-pass/mut_mut_macro.rs | 31 + .../run-pass/needless_lifetimes_impl_trait.rs | 23 + .../clippy/tests/run-pass/procedural_macro.rs | 7 + .../clippy/tests/run-pass/regressions.rs | 9 + src/tools/clippy/tests/run-pass/returns.rs | 19 + .../tests/run-pass/single-match-else.rs | 11 + src/tools/clippy/tests/trim_multiline.rs | 55 + .../tests/ui-posix/conf_non_existant.rs | 6 + .../tests/ui-posix/conf_non_existant.stderr | 4 + .../tests/ui-posix/update-all-references.sh | 28 + .../tests/ui-posix/update-references.sh | 50 + .../tests/ui-windows/conf_non_existant.rs | 6 + .../tests/ui-windows/conf_non_existant.stderr | 4 + .../tests/ui-windows/update-all-references.sh | 28 + .../tests/ui-windows/update-references.sh | 50 + .../tests/ui/absurd-extreme-comparisons.rs | 52 + .../ui/absurd-extreme-comparisons.stderr | 147 + src/tools/clippy/tests/ui/approx_const.rs | 57 + src/tools/clippy/tests/ui/approx_const.stderr | 118 + src/tools/clippy/tests/ui/arithmetic.rs | 30 + src/tools/clippy/tests/ui/arithmetic.stderr | 73 + src/tools/clippy/tests/ui/array_indexing.rs | 45 + .../clippy/tests/ui/array_indexing.stderr | 120 + src/tools/clippy/tests/ui/assign_ops.rs | 41 + src/tools/clippy/tests/ui/assign_ops.stderr | 138 + src/tools/clippy/tests/ui/assign_ops2.rs | 41 + src/tools/clippy/tests/ui/assign_ops2.stderr | 52 + src/tools/clippy/tests/ui/attrs.rs | 41 + src/tools/clippy/tests/ui/attrs.stderr | 24 + src/tools/clippy/tests/ui/bit_masks.rs | 61 + src/tools/clippy/tests/ui/bit_masks.stderr | 110 + src/tools/clippy/tests/ui/blacklisted_name.rs | 36 + .../clippy/tests/ui/blacklisted_name.stderr | 88 + .../clippy/tests/ui/block_in_if_condition.rs | 90 + .../tests/ui/block_in_if_condition.stderr | 54 + src/tools/clippy/tests/ui/bool_comparison.rs | 11 + .../clippy/tests/ui/bool_comparison.stderr | 28 + src/tools/clippy/tests/ui/booleans.rs | 40 + src/tools/clippy/tests/ui/booleans.stderr | 146 + src/tools/clippy/tests/ui/borrow_box.rs | 84 + src/tools/clippy/tests/ui/borrow_box.stderr | 32 + src/tools/clippy/tests/ui/box_vec.rs | 34 + src/tools/clippy/tests/ui/box_vec.stderr | 11 + .../clippy/tests/ui/builtin-type-shadow.rs | 11 + .../tests/ui/builtin-type-shadow.stderr | 21 + src/tools/clippy/tests/ui/bytecount.rs | 27 + src/tools/clippy/tests/ui/bytecount.stderr | 26 + src/tools/clippy/tests/ui/cast.rs | 93 + src/tools/clippy/tests/ui/cast.stderr | 458 + src/tools/clippy/tests/ui/char_lit_as_u8.rs | 8 + .../clippy/tests/ui/char_lit_as_u8.stderr | 12 + src/tools/clippy/tests/ui/cmp_nan.rs | 22 + src/tools/clippy/tests/ui/cmp_nan.stderr | 76 + src/tools/clippy/tests/ui/cmp_null.rs | 19 + src/tools/clippy/tests/ui/cmp_null.stderr | 16 + src/tools/clippy/tests/ui/cmp_owned.rs | 55 + src/tools/clippy/tests/ui/cmp_owned.stderr | 40 + src/tools/clippy/tests/ui/collapsible_if.rs | 144 + .../clippy/tests/ui/collapsible_if.stderr | 256 + src/tools/clippy/tests/ui/complex_types.rs | 44 + .../clippy/tests/ui/complex_types.stderr | 94 + src/tools/clippy/tests/ui/conf_bad_arg.rs | 6 + src/tools/clippy/tests/ui/conf_bad_arg.stderr | 14 + src/tools/clippy/tests/ui/conf_bad_toml.rs | 6 + .../clippy/tests/ui/conf_bad_toml.stderr | 4 + src/tools/clippy/tests/ui/conf_bad_toml.toml | 2 + src/tools/clippy/tests/ui/conf_bad_type.rs | 6 + .../clippy/tests/ui/conf_bad_type.stderr | 4 + src/tools/clippy/tests/ui/conf_bad_type.toml | 1 + .../tests/ui/conf_french_blacklisted_name.rs | 23 + .../ui/conf_french_blacklisted_name.stderr | 46 + .../clippy/tests/ui/conf_path_non_string.rs | 5 + .../tests/ui/conf_path_non_string.stderr | 14 + src/tools/clippy/tests/ui/conf_unknown_key.rs | 6 + .../clippy/tests/ui/conf_unknown_key.stderr | 4 + src/tools/clippy/tests/ui/copies.rs | 399 + src/tools/clippy/tests/ui/copies.stderr | 37 + .../clippy/tests/ui/cyclomatic_complexity.rs | 373 + .../tests/ui/cyclomatic_complexity.stderr | 273 + .../ui/cyclomatic_complexity_attr_used.rs | 17 + .../ui/cyclomatic_complexity_attr_used.stderr | 17 + src/tools/clippy/tests/ui/deprecated.rs | 12 + src/tools/clippy/tests/ui/deprecated.stderr | 28 + src/tools/clippy/tests/ui/derive.rs | 99 + src/tools/clippy/tests/ui/derive.stderr | 78 + .../tests/ui/diverging_sub_expression.rs | 40 + .../tests/ui/diverging_sub_expression.stderr | 40 + src/tools/clippy/tests/ui/dlist.rs | 43 + src/tools/clippy/tests/ui/dlist.stderr | 51 + src/tools/clippy/tests/ui/doc.rs | 161 + src/tools/clippy/tests/ui/doc.stderr | 160 + src/tools/clippy/tests/ui/double_neg.rs | 10 + src/tools/clippy/tests/ui/double_neg.stderr | 10 + src/tools/clippy/tests/ui/double_parens.rs | 51 + .../clippy/tests/ui/double_parens.stderr | 34 + src/tools/clippy/tests/ui/drop_forget_copy.rs | 62 + .../clippy/tests/ui/drop_forget_copy.stderr | 76 + src/tools/clippy/tests/ui/drop_forget_ref.rs | 60 + .../clippy/tests/ui/drop_forget_ref.stderr | 220 + .../tests/ui/duplicate_underscore_argument.rs | 13 + .../ui/duplicate_underscore_argument.stderr | 10 + src/tools/clippy/tests/ui/empty_enum.rs | 10 + src/tools/clippy/tests/ui/empty_enum.stderr | 15 + src/tools/clippy/tests/ui/entry.rs | 45 + src/tools/clippy/tests/ui/entry.stderr | 46 + src/tools/clippy/tests/ui/enum_glob_use.rs | 26 + .../clippy/tests/ui/enum_glob_use.stderr | 16 + src/tools/clippy/tests/ui/enum_variants.rs | 105 + .../clippy/tests/ui/enum_variants.stderr | 101 + src/tools/clippy/tests/ui/enums_clike.rs | 54 + src/tools/clippy/tests/ui/enums_clike.stderr | 52 + src/tools/clippy/tests/ui/eq_op.rs | 90 + src/tools/clippy/tests/ui/eq_op.stderr | 208 + src/tools/clippy/tests/ui/escape_analysis.rs | 131 + .../clippy/tests/ui/escape_analysis.stderr} | 0 src/tools/clippy/tests/ui/eta.rs | 51 + src/tools/clippy/tests/ui/eta.stderr | 36 + .../clippy/tests/ui/eval_order_dependence.rs | 48 + .../tests/ui/eval_order_dependence.stderr | 51 + src/tools/clippy/tests/ui/filter_methods.rs | 27 + .../clippy/tests/ui/filter_methods.stderr | 40 + src/tools/clippy/tests/ui/float_cmp.rs | 69 + src/tools/clippy/tests/ui/float_cmp.stderr | 99 + src/tools/clippy/tests/ui/for_loop.rs | 386 + src/tools/clippy/tests/ui/for_loop.stderr | 502 + src/tools/clippy/tests/ui/format.rs | 34 + src/tools/clippy/tests/ui/format.stderr | 22 + src/tools/clippy/tests/ui/formatting.rs | 105 + src/tools/clippy/tests/ui/formatting.stderr | 90 + src/tools/clippy/tests/ui/functions.rs | 78 + src/tools/clippy/tests/ui/functions.stderr | 79 + src/tools/clippy/tests/ui/ices.rs | 5 + src/tools/clippy/tests/ui/ices.stderr | 8 + src/tools/clippy/tests/ui/identity_op.rs | 30 + src/tools/clippy/tests/ui/identity_op.stderr | 46 + .../ui/if_let_redundant_pattern_matching.rs | 35 + .../if_let_redundant_pattern_matching.stderr | 28 + src/tools/clippy/tests/ui/if_not_else.rs | 19 + src/tools/clippy/tests/ui/if_not_else.stderr | 27 + .../tests/ui/inconsistent_digit_grouping.rs | 8 + .../ui/inconsistent_digit_grouping.stderr | 43 + src/tools/clippy/tests/ui/infinite_iter.rs | 40 + .../clippy/tests/ui/infinite_iter.stderr | 100 + .../tests/ui/invalid_upcast_comparisons.rs | 81 + .../ui/invalid_upcast_comparisons.stderr | 166 + src/tools/clippy/tests/ui/is_unit_expr.rs | 48 + src/tools/clippy/tests/ui/is_unit_expr.stderr | 55 + .../clippy/tests/ui/item_after_statement.rs | 30 + .../tests/ui/item_after_statement.stderr | 16 + .../clippy/tests/ui/large_digit_groups.rs | 8 + .../clippy/tests/ui/large_digit_groups.stderr | 51 + .../clippy/tests/ui/large_enum_variant.rs | 57 + .../clippy/tests/ui/large_enum_variant.stderr | 72 + src/tools/clippy/tests/ui/len_zero.rs | 184 + src/tools/clippy/tests/ui/len_zero.stderr | 90 + src/tools/clippy/tests/ui/let_if_seq.rs | 106 + src/tools/clippy/tests/ui/let_if_seq.stderr | 50 + src/tools/clippy/tests/ui/let_return.rs | 46 + src/tools/clippy/tests/ui/let_return.stderr | 27 + src/tools/clippy/tests/ui/let_unit.rs | 49 + src/tools/clippy/tests/ui/let_unit.stderr | 16 + src/tools/clippy/tests/ui/lifetimes.rs | 148 + src/tools/clippy/tests/ui/lifetimes.stderr | 90 + src/tools/clippy/tests/ui/lint_pass.rs | 24 + src/tools/clippy/tests/ui/lint_pass.stderr | 10 + src/tools/clippy/tests/ui/literals.rs | 34 + src/tools/clippy/tests/ui/literals.stderr | 91 + src/tools/clippy/tests/ui/map_clone.rs | 105 + src/tools/clippy/tests/ui/map_clone.stderr | 102 + src/tools/clippy/tests/ui/matches.rs | 283 + src/tools/clippy/tests/ui/matches.stderr | 269 + src/tools/clippy/tests/ui/mem_forget.rs | 27 + src/tools/clippy/tests/ui/mem_forget.stderr | 22 + src/tools/clippy/tests/ui/methods.rs | 525 + src/tools/clippy/tests/ui/methods.stderr | 716 + src/tools/clippy/tests/ui/min_max.rs | 33 + src/tools/clippy/tests/ui/min_max.stderr | 46 + src/tools/clippy/tests/ui/missing-doc.rs | 202 + src/tools/clippy/tests/ui/missing-doc.stderr | 274 + src/tools/clippy/tests/ui/module_inception.rs | 24 + .../clippy/tests/ui/module_inception.stderr | 20 + src/tools/clippy/tests/ui/modulo_one.rs | 9 + src/tools/clippy/tests/ui/modulo_one.stderr | 10 + src/tools/clippy/tests/ui/mut_from_ref.rs | 48 + src/tools/clippy/tests/ui/mut_from_ref.stderr | 63 + src/tools/clippy/tests/ui/mut_mut.rs | 53 + src/tools/clippy/tests/ui/mut_mut.stderr | 85 + src/tools/clippy/tests/ui/mut_reference.rs | 49 + .../clippy/tests/ui/mut_reference.stderr | 22 + src/tools/clippy/tests/ui/mutex_atomic.rs | 18 + src/tools/clippy/tests/ui/mutex_atomic.stderr | 48 + src/tools/clippy/tests/ui/needless_bool.rs | 51 + .../clippy/tests/ui/needless_bool.stderr | 70 + src/tools/clippy/tests/ui/needless_borrow.rs | 53 + .../clippy/tests/ui/needless_borrow.stderr | 42 + .../clippy/tests/ui/needless_borrowed_ref.rs | 48 + .../tests/ui/needless_borrowed_ref.stderr | 28 + .../clippy/tests/ui/needless_continue.rs | 50 + .../clippy/tests/ui/needless_continue.stderr | 59 + .../clippy/tests/ui/needless_pass_by_value.rs | 62 + .../tests/ui/needless_pass_by_value.stderr | 60 + .../ui/needless_pass_by_value_proc_macro.rs | 11 + src/tools/clippy/tests/ui/needless_return.rs | 48 + .../clippy/tests/ui/needless_return.stderr | 52 + src/tools/clippy/tests/ui/needless_update.rs | 17 + .../clippy/tests/ui/needless_update.stderr | 10 + src/tools/clippy/tests/ui/neg_multiply.rs | 38 + src/tools/clippy/tests/ui/neg_multiply.stderr | 16 + src/tools/clippy/tests/ui/never_loop.rs | 128 + src/tools/clippy/tests/ui/never_loop.stderr | 84 + .../clippy/tests/ui/new_without_default.rs | 86 + .../tests/ui/new_without_default.stderr | 42 + src/tools/clippy/tests/ui/no_effect.rs | 84 + src/tools/clippy/tests/ui/no_effect.stderr | 270 + .../clippy/tests/ui/non_expressive_names.rs | 136 + .../tests/ui/non_expressive_names.stderr | 133 + src/tools/clippy/tests/ui/ok_if_let.rs | 26 + src/tools/clippy/tests/ui/ok_if_let.stderr | 15 + src/tools/clippy/tests/ui/op_ref.rs | 24 + src/tools/clippy/tests/ui/op_ref.stderr | 14 + src/tools/clippy/tests/ui/open_options.rs | 16 + src/tools/clippy/tests/ui/open_options.stderr | 46 + .../tests/ui/overflow_check_conditional.rs | 61 + .../ui/overflow_check_conditional.stderr | 52 + src/tools/clippy/tests/ui/panic.rs | 51 + src/tools/clippy/tests/ui/panic.stderr | 22 + .../clippy/tests/ui/partialeq_ne_impl.rs | 13 + .../clippy/tests/ui/partialeq_ne_impl.stderr | 10 + src/tools/clippy/tests/ui/patterns.rs | 16 + src/tools/clippy/tests/ui/patterns.stderr | 10 + src/tools/clippy/tests/ui/precedence.rs | 25 + src/tools/clippy/tests/ui/precedence.stderr | 58 + src/tools/clippy/tests/ui/print.rs | 36 + src/tools/clippy/tests/ui/print.stderr | 54 + .../clippy/tests/ui/print_with_newline.rs | 20 + .../clippy/tests/ui/print_with_newline.stderr | 28 + src/tools/clippy/tests/ui/ptr_arg.rs | 38 + src/tools/clippy/tests/ui/ptr_arg.stderr | 22 + src/tools/clippy/tests/ui/range.rs | 34 + src/tools/clippy/tests/ui/range.stderr | 42 + .../clippy/tests/ui/redundant_closure_call.rs | 21 + .../tests/ui/redundant_closure_call.stderr | 34 + src/tools/clippy/tests/ui/reference.rs | 55 + src/tools/clippy/tests/ui/reference.stderr | 70 + src/tools/clippy/tests/ui/regex.rs | 87 + src/tools/clippy/tests/ui/regex.stderr | 153 + src/tools/clippy/tests/ui/serde.rs | 47 + src/tools/clippy/tests/ui/serde.stderr | 14 + src/tools/clippy/tests/ui/shadow.rs | 38 + src/tools/clippy/tests/ui/shadow.stderr | 138 + .../tests/ui/short_circuit_statement.rs | 18 + .../tests/ui/short_circuit_statement.stderr | 22 + src/tools/clippy/tests/ui/should_assert_eq.rs | 32 + .../clippy/tests/ui/should_assert_eq.stderr | 59 + src/tools/clippy/tests/ui/strings.rs | 67 + src/tools/clippy/tests/ui/strings.stderr | 76 + src/tools/clippy/tests/ui/stutter.rs | 14 + src/tools/clippy/tests/ui/stutter.stderr | 28 + src/tools/clippy/tests/ui/swap.rs | 59 + src/tools/clippy/tests/ui/swap.stderr | 69 + .../clippy/tests/ui/temporary_assignment.rs | 36 + .../tests/ui/temporary_assignment.stderr | 16 + src/tools/clippy/tests/ui/toplevel_ref_arg.rs | 28 + .../clippy/tests/ui/toplevel_ref_arg.stderr | 34 + src/tools/clippy/tests/ui/trailing_zeros.rs | 10 + .../clippy/tests/ui/trailing_zeros.stderr | 16 + .../clippy/tests/ui/trailing_zeros.stdout | 14 + src/tools/clippy/tests/ui/transmute.rs | 121 + src/tools/clippy/tests/ui/transmute.stderr | 158 + src/tools/clippy/tests/ui/transmute_32bit.rs | 16 + src/tools/clippy/tests/ui/transmute_64bit.rs | 13 + .../clippy/tests/ui/transmute_64bit.stderr | 16 + src/tools/clippy/tests/ui/unicode.rs | 26 + src/tools/clippy/tests/ui/unicode.stderr | 32 + src/tools/clippy/tests/ui/unit_cmp.rs | 21 + src/tools/clippy/tests/ui/unit_cmp.stderr | 16 + .../clippy/tests/ui/unneeded_field_pattern.rs | 26 + .../tests/ui/unneeded_field_pattern.stderr | 19 + .../clippy/tests/ui/unreadable_literal.rs | 8 + .../clippy/tests/ui/unreadable_literal.stderr | 35 + .../tests/ui/unsafe_removed_from_name.rs | 29 + .../tests/ui/unsafe_removed_from_name.stderr | 22 + src/tools/clippy/tests/ui/unused_io_amount.rs | 31 + .../clippy/tests/ui/unused_io_amount.stderr | 43 + src/tools/clippy/tests/ui/unused_labels.rs | 35 + .../clippy/tests/ui/unused_labels.stderr | 26 + src/tools/clippy/tests/ui/unused_lt.rs | 70 + src/tools/clippy/tests/ui/unused_lt.stderr | 22 + .../clippy/tests/ui/update-all-references.sh | 28 + .../clippy/tests/ui/update-references.sh | 50 + src/tools/clippy/tests/ui/use_self.rs | 68 + src/tools/clippy/tests/ui/use_self.stderr | 40 + .../tests/ui/used_underscore_binding.rs | 94 + .../tests/ui/used_underscore_binding.stderr | 34 + .../clippy/tests/ui/useless_attribute.rs | 17 + .../clippy/tests/ui/useless_attribute.stderr | 10 + src/tools/clippy/tests/ui/vec.rs | 56 + src/tools/clippy/tests/ui/vec.stderr | 40 + src/tools/clippy/tests/ui/while_loop.rs | 186 + src/tools/clippy/tests/ui/while_loop.stderr | 114 + .../clippy/tests/ui/wrong_self_convention.rs | 62 + .../tests/ui/wrong_self_convention.stderr | 76 + src/tools/clippy/tests/ui/zero_div_zero.rs | 16 + .../clippy/tests/ui/zero_div_zero.stderr | 61 + src/tools/clippy/tests/ui/zero_ptr.rs | 11 + src/tools/clippy/tests/ui/zero_ptr.stderr | 16 + .../tests/used_underscore_binding_macro.rs | 18 + src/tools/clippy/tests/versioncheck.rs | 15 + src/tools/clippy/util/cov.sh | 37 + src/tools/clippy/util/dogfood.sh | 5 + src/tools/clippy/util/export.py | 64 + src/tools/clippy/util/gh-pages/index.html | 206 + src/tools/clippy/util/gh-pages/versions.html | 86 + src/tools/clippy/util/lintlib.py | 95 + src/tools/clippy/util/update_lints.py | 227 + src/tools/compiletest/src/common.rs | 78 +- src/tools/compiletest/src/main.rs | 2 +- src/tools/compiletest/src/runtest.rs | 2 - src/tools/miri/.editorconfig | 25 + src/tools/miri/.travis.yml | 45 + src/tools/miri/Cargo.lock | 388 + src/tools/miri/Cargo.toml | 39 + src/tools/miri/LICENSE-APACHE | 201 + .../miri}/LICENSE-MIT | 2 +- src/tools/miri/README.md | 103 + src/tools/miri/appveyor.yml | 35 + src/tools/miri/benches/fibonacci.rs | 26 + .../miri/benches/helpers/fibonacci_helper.rs | 8 + .../helpers/fibonacci_helper_iterative.rs | 15 + src/tools/miri/benches/helpers/miri_helper.rs | 75 + src/tools/miri/benches/helpers/mod.rs | 7 + src/tools/miri/benches/helpers/repeat.rs | 4 + .../miri/benches/helpers/repeat_manual.rs | 7 + .../miri/benches/helpers/smoke_helper.rs | 2 + src/tools/miri/benches/repeat.rs | 16 + src/tools/miri/benches/smoke.rs | 35 + src/tools/miri/build.rs | 8 + src/tools/miri/cargo-miri-test/Cargo.lock | 14 + src/tools/miri/cargo-miri-test/Cargo.toml | 7 + src/tools/miri/cargo-miri-test/src/main.rs | 9 + src/tools/miri/cargo-miri-test/tests/foo.rs | 4 + src/tools/miri/miri/bin/cargo-miri.rs | 212 + src/tools/miri/miri/bin/miri.rs | 265 + src/tools/miri/miri/fn_call.rs | 653 + src/tools/miri/miri/helpers.rs | 73 + src/tools/miri/miri/intrinsic.rs | 685 + src/tools/miri/miri/lib.rs | 270 + src/tools/miri/miri/memory.rs | 16 + src/tools/miri/miri/operator.rs | 175 + src/tools/miri/miri/tls.rs | 142 + src/tools/miri/rustc_tests/Cargo.lock | 217 + src/tools/miri/rustc_tests/Cargo.toml | 7 + src/tools/miri/rustc_tests/src/main.rs | 292 + src/tools/miri/src/librustc_mir/Cargo.toml | 19 + .../miri/src/librustc_mir/interpret/cast.rs | 122 + .../src/librustc_mir/interpret/const_eval.rs | 258 + .../miri/src/librustc_mir/interpret/error.rs | 313 + .../librustc_mir/interpret/eval_context.rs | 2539 +++ .../miri/src/librustc_mir/interpret/lvalue.rs | 506 + .../src/librustc_mir/interpret/machine.rs | 81 + .../miri/src/librustc_mir/interpret/memory.rs | 1700 ++ .../miri/src/librustc_mir/interpret/mod.rs | 42 + .../src/librustc_mir/interpret/operator.rs | 268 + .../src/librustc_mir/interpret/range_map.rs | 250 + .../miri/src/librustc_mir/interpret/step.rs | 397 + .../librustc_mir/interpret/terminator/drop.rs | 83 + .../librustc_mir/interpret/terminator/mod.rs | 414 + .../miri/src/librustc_mir/interpret/traits.rs | 137 + .../src/librustc_mir/interpret/validation.rs | 727 + .../miri/src/librustc_mir/interpret/value.rs | 405 + src/tools/miri/src/librustc_mir/lib.rs | 26 + .../undefined_byte_read.rs | 9 + .../miri/tests/compile-fail/alignment.rs | 11 + src/tools/miri/tests/compile-fail/assume.rs | 10 + .../compile-fail/bitop-beyond-alignment.rs | 37 + .../compile-fail/cast_box_int_to_fn_ptr.rs | 11 + .../miri/tests/compile-fail/cast_fn_ptr.rs | 9 + .../miri/tests/compile-fail/cast_fn_ptr2.rs | 9 + .../tests/compile-fail/cast_fn_ptr_unsafe.rs | 10 + .../tests/compile-fail/cast_fn_ptr_unsafe2.rs | 10 + .../tests/compile-fail/cast_int_to_fn_ptr.rs | 10 + .../tests/compile-fail/copy_nonoverlapping.rs | 24 + .../miri/tests/compile-fail/ctlz_nonzero.rs | 15 + .../miri/tests/compile-fail/cttz_nonzero.rs | 15 + .../compile-fail/dangling_pointer_deref.rs | 8 + .../compile-fail/deallocate-bad-alignment.rs | 15 + .../tests/compile-fail/deallocate-bad-size.rs | 15 + .../tests/compile-fail/deallocate-twice.rs | 16 + .../miri/tests/compile-fail/deref_fn_ptr.rs | 8 + .../miri/tests/compile-fail/div-by-zero-2.rs} | 9 +- .../miri/tests/compile-fail/div-by-zero.rs | 21 + .../miri/tests/compile-fail/execute_memory.rs | 12 + .../miri/tests/compile-fail/fn_ptr_offset.rs | 14 + .../miri/tests/compile-fail/invalid_bool.rs | 4 + .../compile-fail/invalid_enum_discriminant.rs | 17 + .../miri/tests/compile-fail/match_char.rs | 8 + src/tools/miri/tests/compile-fail/memleak.rs | 5 + .../miri/tests/compile-fail/memleak_rc.rs | 12 + .../tests/compile-fail/modifying_constants.rs | 6 + .../tests/compile-fail/never_say_never.rs | 12 + .../compile-fail/never_transmute_humans.rs | 14 + .../compile-fail/never_transmute_void.rs | 16 + .../tests/compile-fail/null_pointer_deref.rs | 4 + src/tools/miri/tests/compile-fail/oom.rs | 7 + src/tools/miri/tests/compile-fail/oom2.rs | 10 + .../tests/compile-fail/out_of_bounds_ptr_1.rs | 8 + .../tests/compile-fail/out_of_bounds_ptr_2.rs | 7 + .../tests/compile-fail/out_of_bounds_read.rs | 5 + .../tests/compile-fail/out_of_bounds_read2.rs | 5 + .../tests/compile-fail/overflowing-lsh-neg.rs | 16 + .../tests/compile-fail/overflowing-rsh-2.rs | 16 + .../tests/compile-fail/overflowing-rsh.rs | 15 + .../compile-fail/overflowing-unchecked-rsh.rs | 21 + ..._of_relocation_makes_the_rest_undefined.rs | 11 + src/tools/miri/tests/compile-fail/panic.rs | 7 + .../tests/compile-fail/pointer_byte_read_1.rs | 7 + .../tests/compile-fail/pointer_byte_read_2.rs | 7 + ...o_different_allocations_are_unorderable.rs | 7 + .../miri/tests/compile-fail/ptr_bitops.rs | 7 + .../miri/tests/compile-fail/ptr_int_cast.rs | 8 + .../tests/compile-fail/ptr_offset_overflow.rs | 6 + .../compile-fail/reading_half_a_pointer.rs | 29 + .../reallocate-bad-alignment-2.rs | 16 + .../compile-fail/reallocate-bad-alignment.rs | 16 + .../tests/compile-fail/reallocate-bad-size.rs | 15 + .../compile-fail/reallocate-change-alloc.rs | 14 + .../tests/compile-fail/reallocate-dangling.rs | 16 + .../tests/compile-fail/reference_to_packed.rs | 19 + src/tools/miri/tests/compile-fail/repeat.rs | 5 + src/tools/miri/tests/compile-fail/repeat2.rs | 5 + .../miri/tests/compile-fail/stack_free.rs | 7 + .../miri/tests/compile-fail/stack_limit.rs | 20 + .../static_memory_modification.rs | 9 + .../static_memory_modification2.rs | 12 + .../static_memory_modification3.rs | 9 + src/tools/miri/tests/compile-fail/timeout.rs | 9 + .../compile-fail/transmute-pair-undef.rs | 20 + .../miri/tests/compile-fail/transmute_fat.rs | 15 + .../miri/tests/compile-fail/transmute_fat2.rs | 13 + .../tests/compile-fail/unaligned_ptr_cast.rs | 6 + .../tests/compile-fail/unaligned_ptr_cast2.rs | 7 + .../compile-fail/unaligned_ptr_cast_zst.rs | 6 + .../compile-fail/validation_aliasing_mut1.rs | 10 + .../compile-fail/validation_aliasing_mut2.rs | 10 + .../compile-fail/validation_aliasing_mut3.rs | 10 + .../compile-fail/validation_aliasing_mut4.rs | 13 + .../validation_buggy_as_mut_slice.rs | 20 + .../validation_buggy_split_at_mut.rs | 22 + .../compile-fail/validation_illegal_write.rs | 15 + .../compile-fail/validation_lock_confusion.rs | 24 + .../validation_pointer_smuggling.rs | 20 + .../tests/compile-fail/validation_recover1.rs | 16 + .../tests/compile-fail/validation_recover2.rs | 14 + .../tests/compile-fail/validation_recover3.rs | 15 + .../tests/compile-fail/validation_undef.rs | 14 + .../tests/compile-fail/wild_pointer_deref.rs | 5 + src/tools/miri/tests/compile-fail/zst.rs | 4 + src/tools/miri/tests/compiletest.rs | 213 + .../miri/tests/run-pass-fullmir/catch.rs | 8 + .../miri/tests/run-pass-fullmir/catch.stdout | 1 + .../run-pass-fullmir/foreign-fn-linkname.rs | 37 + .../miri/tests/run-pass-fullmir/format.rs | 4 + .../miri/tests/run-pass-fullmir/format.stdout | 1 + .../miri/tests/run-pass-fullmir/from_utf8.rs | 4 + .../miri/tests/run-pass-fullmir/hashmap.rs | 26 + src/tools/miri/tests/run-pass-fullmir/heap.rs | 35 + .../miri/tests/run-pass-fullmir/hello.rs | 4 + .../miri/tests/run-pass-fullmir/hello.stdout | 1 + .../tests/run-pass-fullmir/integer-ops.rs | 175 + .../tests/run-pass-fullmir/issue-15080.rs | 34 + .../miri/tests/run-pass-fullmir/issue-3794.rs | 42 + .../tests/run-pass-fullmir/issue-3794.stdout | 2 + .../run-pass-fullmir/loop-break-value.rs | 143 + .../run-pass-fullmir/move-arg-2-unique.rs | 22 + .../run-pass-fullmir/regions-mock-trans.rs | 66 + src/tools/miri/tests/run-pass-fullmir/u128.rs | 79 + .../run-pass-fullmir/unsized-tuple-impls.rs | 25 + src/tools/miri/tests/run-pass-fullmir/vecs.rs | 52 + src/tools/miri/tests/run-pass/arrays.rs | 45 + .../miri/tests/run-pass/associated-const.rs | 21 + src/tools/miri/tests/run-pass/assume_bug.rs | 3 + .../miri/tests/run-pass/atomic-access-bool.rs | 30 + .../tests/run-pass/atomic-compare_exchange.rs | 36 + src/tools/miri/tests/run-pass/aux_test.rs | 9 + .../miri/tests/run-pass/auxiliary/dep.rs | 1 + src/tools/miri/tests/run-pass/bad_substs.rs | 4 + src/tools/miri/tests/run-pass/binops.rs | 91 + src/tools/miri/tests/run-pass/bools.rs | 28 + .../miri/tests/run-pass/box_box_trait.rs | 29 + src/tools/miri/tests/run-pass/btreemap.rs | 17 + src/tools/miri/tests/run-pass/c_enums.rs | 32 + .../run-pass/call_drop_on_array_elements.rs | 22 + .../call_drop_on_fat_ptr_array_elements.rs | 20 + .../call_drop_on_zst_array_elements.rs | 21 + .../run-pass/call_drop_through_owned_slice.rs | 16 + .../call_drop_through_trait_object.rs | 20 + .../call_drop_through_trait_object_rc.rs | 22 + src/tools/miri/tests/run-pass/calls.rs | 45 + .../run-pass/cast-rfc0401-vtable-kinds.rs | 59 + src/tools/miri/tests/run-pass/cast_fn_ptr.rs | 9 + .../miri/tests/run-pass/cast_fn_ptr_unsafe.rs | 8 + src/tools/miri/tests/run-pass/char.rs | 9 + src/tools/miri/tests/run-pass/closure-drop.rs | 25 + .../miri/tests/run-pass/closure-field-ty.rs | 10 + src/tools/miri/tests/run-pass/closures.rs | 48 + .../miri/tests/run-pass/const-vec-of-fns.rs | 29 + src/tools/miri/tests/run-pass/constants.rs | 9 + .../run-pass/deriving-associated-types.rs | 208 + .../miri/tests/run-pass/drop_empty_slice.rs | 7 + .../miri/tests/run-pass/dst-field-align.rs | 77 + .../tests/run-pass/dst-irrefutable-bind.rs | 24 + src/tools/miri/tests/run-pass/dst-raw.rs | 113 + .../miri/tests/run-pass/dst-struct-sole.rs | 85 + src/tools/miri/tests/run-pass/dst-struct.rs | 134 + .../enum-nullable-const-null-with-fields.rs | 22 + src/tools/miri/tests/run-pass/enums.rs | 34 + .../miri/tests/run-pass/float_fast_math.rs | 30 + src/tools/miri/tests/run-pass/floats.rs | 11 + .../fn_item_as_closure_trait_object.rs | 6 + ..._item_with_args_as_closure_trait_object.rs | 8 + ...h_multiple_args_as_closure_trait_object.rs | 18 + .../fn_ptr_as_closure_trait_object.rs | 15 + .../miri/tests/run-pass/function_pointers.rs | 46 + .../tests/run-pass/generator_control_flow.rs | 65 + .../miri/tests/run-pass/intrinsics-integer.rs | 142 + .../miri/tests/run-pass/intrinsics-math.rs | 67 + src/tools/miri/tests/run-pass/intrinsics.rs | 10 + src/tools/miri/tests/run-pass/ints.rs | 58 + src/tools/miri/tests/run-pass/issue-15063.rs | 20 + .../miri/tests/run-pass/issue-15523-big.rs | 48 + src/tools/miri/tests/run-pass/issue-17877.rs | 25 + src/tools/miri/tests/run-pass/issue-20575.rs | 19 + src/tools/miri/tests/run-pass/issue-23261.rs | 70 + src/tools/miri/tests/run-pass/issue-26709.rs | 26 + src/tools/miri/tests/run-pass/issue-27901.rs | 20 + src/tools/miri/tests/run-pass/issue-29746.rs | 45 + src/tools/miri/tests/run-pass/issue-30530.rs | 35 + .../tests/run-pass/issue-31267-additional.rs | 29 + src/tools/miri/tests/run-pass/issue-33387.rs | 19 + src/tools/miri/tests/run-pass/issue-34571.rs | 20 + src/tools/miri/tests/run-pass/issue-35815.rs | 25 + .../run-pass/issue-36278-prefix-nesting.rs | 28 + src/tools/miri/tests/run-pass/issue-5917.rs | 17 + .../miri/tests/run-pass/issue-miri-184.rs | 4 + src/tools/miri/tests/run-pass/iter_slice.rs | 12 + .../tests/run-pass/last-use-in-cap-clause.rs | 25 + src/tools/miri/tests/run-pass/loops.rs | 35 + src/tools/miri/tests/run-pass/main_fn.rs | 5 + src/tools/miri/tests/run-pass/many_shr_bor.rs | 36 + src/tools/miri/tests/run-pass/match_slice.rs | 8 + .../miri/tests/run-pass/mir_coercions.rs | 80 + src/tools/miri/tests/run-pass/mir_fat_ptr.rs | 61 + .../miri/tests/run-pass/miri-issue-133.rs | 30 + .../miri/tests/run-pass/move-arg-3-unique.rs | 18 + .../miri/tests/run-pass/move-undef-primval.rs | 15 + .../miri/tests/run-pass/multi_arg_closure.rs | 8 + .../tests/run-pass/negative_discriminant.rs | 13 + .../run-pass/non_capture_closure_to_fn_ptr.rs | 14 + .../miri/tests/run-pass/observed_local_mut.rs | 21 + .../run-pass/option_box_transmute_ptr.rs | 15 + src/tools/miri/tests/run-pass/option_eq.rs | 3 + .../tests/run-pass/overloaded-calls-simple.rs | 33 + .../miri/tests/run-pass/packed_static.rs | 10 + .../miri/tests/run-pass/packed_struct.rs | 69 + src/tools/miri/tests/run-pass/pointers.rs | 60 + src/tools/miri/tests/run-pass/products.rs | 32 + .../miri/tests/run-pass/ptr_arith_offset.rs | 6 + .../run-pass/ptr_arith_offset_overflow.rs | 9 + .../miri/tests/run-pass/ptr_int_casts.rs | 35 + src/tools/miri/tests/run-pass/ptr_offset.rs | 6 + src/tools/miri/tests/run-pass/rc.rs | 39 + .../miri/tests/run-pass/recursive_static.rs | 12 + .../miri/tests/run-pass/ref-invalid-ptr.rs | 7 + .../regions-lifetime-nonfree-late-bound.rs | 45 + src/tools/miri/tests/run-pass/rfc1623.rs | 81 + .../miri/tests/run-pass/rust-lang-org.rs | 21 + .../run-pass/send-is-not-static-par-for.rs | 43 + .../miri/tests/run-pass/sendable-class.rs | 34 + .../simd-intrinsic-generic-elements.rs | 42 + .../run-pass/slice-of-zero-size-elements.rs | 58 + .../tests/run-pass/small_enum_size_bug.rs | 14 + .../miri/tests/run-pass/specialization.rs | 21 + .../run-pass/static_memory_modification.rs | 8 + src/tools/miri/tests/run-pass/static_mut.rs | 17 + src/tools/miri/tests/run-pass/std.rs | 33 + src/tools/miri/tests/run-pass/strings.rs | 27 + .../miri/tests/run-pass/subslice_array.rs | 14 + src/tools/miri/tests/run-pass/sums.rs | 59 + .../miri/tests/run-pass/tag-align-dyn-u64.rs | 37 + src/tools/miri/tests/run-pass/thread-local.rs | 67 + .../too-large-primval-write-problem.rs | 23 + src/tools/miri/tests/run-pass/traits.rs | 30 + src/tools/miri/tests/run-pass/trivial.rs | 11 + .../tests/run-pass/try-operator-custom.rs | 13 + .../tuple_like_enum_variant_constructor.rs | 3 + ...ke_enum_variant_constructor_pointer_opt.rs | 4 + ..._variant_constructor_struct_pointer_opt.rs | 33 + .../run-pass/tuple_like_struct_constructor.rs | 5 + .../miri/tests/run-pass/union-overwrite.rs | 81 + src/tools/miri/tests/run-pass/union.rs | 88 + src/tools/miri/tests/run-pass/unique-send.rs | 20 + .../validation_lifetime_resolution.rs | 30 + .../miri/tests/run-pass/vec-matching-fold.rs | 58 + src/tools/miri/tests/run-pass/write-bytes.rs | 45 + .../run-pass/zero-sized-binary-heap-push.rs | 28 + src/tools/miri/tests/run-pass/zst.rs | 18 + src/tools/miri/tests/run-pass/zst2.rs | 12 + src/tools/miri/tests/run-pass/zst_box.rs | 8 + .../miri/tests/run-pass/zst_variant_drop.rs | 23 + .../miri/tex/final-presentation/latexmkrc | 12 + .../final-presentation/rust-logo-512x512.png | Bin 0 -> 96029 bytes .../miri/tex/final-presentation/slides.tex | 444 + src/tools/miri/tex/report/latexmkrc | 12 + src/tools/miri/tex/report/miri-report.tex | 663 + src/tools/miri/xargo/Cargo.lock | 4 + src/tools/miri/xargo/Cargo.toml | 6 + src/tools/miri/xargo/Xargo.toml | 2 + src/tools/miri/xargo/build.sh | 3 + .../.cargo-ok => tools/miri/xargo/src/lib.rs} | 0 src/tools/rust-installer/Cargo.toml | 4 +- .../rust-installer/combine-installers.sh | 2 +- .../rust-installer/gen-install-script.sh | 2 +- src/tools/rust-installer/gen-installer.sh | 2 +- src/tools/rust-installer/install-template.sh | 76 +- src/tools/rust-installer/src/tarballer.rs | 47 +- src/tools/rust-installer/test.sh | 2 +- src/tools/rustbook/Cargo.toml | 2 +- src/tools/rustbook/src/main.rs | 36 +- src/tools/tidy/src/deps.rs | 9 + src/tools/tidy/src/lib.rs | 3 + src/tools/tidy/src/pal.rs | 4 +- src/tools/toolstate.toml | 36 + src/vendor/backtrace-sys/.cargo-checksum.json | 2 +- src/vendor/backtrace-sys/Cargo.toml | 6 +- src/vendor/backtrace-sys/Cargo.toml.orig | 4 +- src/vendor/backtrace-sys/build.rs | 10 +- src/vendor/backtrace/.cargo-checksum.json | 2 +- src/vendor/backtrace/.travis.yml | 21 + src/vendor/backtrace/Cargo.toml | 142 +- src/vendor/backtrace/Cargo.toml.orig | 98 + src/vendor/backtrace/src/backtrace/mod.rs | 3 +- .../backtrace/src/backtrace/unix_backtrace.rs | 2 +- src/vendor/backtrace/src/capture.rs | 77 +- src/vendor/backtrace/src/lib.rs | 15 +- src/vendor/backtrace/src/symbolize/dbghelp.rs | 9 +- src/vendor/backtrace/src/symbolize/gimli.rs | 170 + src/vendor/backtrace/src/symbolize/mod.rs | 10 +- src/vendor/backtrace/tests/long_fn_name.rs | 56 + src/vendor/backtrace/tests/smoke.rs | 5 +- .../bitflags-0.7.0/.cargo-checksum.json | 1 + src/vendor/bitflags-0.7.0/.cargo-ok | 0 src/vendor/bitflags-0.7.0/.gitignore | 2 + src/vendor/bitflags-0.7.0/.travis.yml | 24 + src/vendor/bitflags-0.7.0/Cargo.toml | 13 + .../{gcc => bitflags-0.7.0}/LICENSE-APACHE | 0 src/vendor/bitflags-0.7.0/LICENSE-MIT | 25 + src/vendor/bitflags-0.7.0/README.md | 24 + src/vendor/bitflags-0.7.0/src/lib.rs | 808 + src/vendor/bitflags-0.7.0/tests/external.rs | 21 + .../bitflags-0.7.0/tests/external_no_std.rs | 22 + .../bitflags-0.9.1/.cargo-checksum.json | 1 + src/vendor/bitflags-0.9.1/.cargo-ok | 0 src/vendor/bitflags-0.9.1/.gitignore | 2 + src/vendor/bitflags-0.9.1/.travis.yml | 29 + src/vendor/bitflags-0.9.1/Cargo.toml | 31 + src/vendor/bitflags-0.9.1/Cargo.toml.orig | 26 + .../LICENSE-APACHE | 0 src/vendor/bitflags-0.9.1/LICENSE-MIT | 25 + src/vendor/bitflags-0.9.1/README.md | 24 + .../bitflags-0.9.1/src/example_generated.rs | 16 + src/vendor/bitflags-0.9.1/src/lib.rs | 990 ++ .../tests/conflicting_trait_impls.rs | 20 + src/vendor/bitflags-0.9.1/tests/external.rs | 21 + .../bitflags-0.9.1/tests/external_no_std.rs | 22 + .../bitflags-0.9.1/tests/i128_bitflags.rs | 30 + src/vendor/bitflags/.cargo-checksum.json | 2 +- src/vendor/bitflags/.travis.yml | 2 + src/vendor/bitflags/Cargo.toml | 4 +- src/vendor/bitflags/Cargo.toml.orig | 2 +- src/vendor/bitflags/README.md | 11 +- src/vendor/bitflags/src/example_generated.rs | 6 +- src/vendor/bitflags/src/lib.rs | 391 +- .../bitflags/tests/conflicting_trait_impls.rs | 1 - src/vendor/bitflags/tests/external.rs | 6 +- src/vendor/bitflags/tests/external_no_std.rs | 5 +- src/vendor/bitflags/tests/i128_bitflags.rs | 21 +- src/vendor/cc/.cargo-checksum.json | 1 + src/vendor/cc/.cargo-ok | 0 src/vendor/{gcc => cc}/.gitignore | 0 src/vendor/{gcc => cc}/.travis.yml | 2 +- src/vendor/cc/Cargo.toml | 36 + .../{gcc/Cargo.toml => cc/Cargo.toml.orig} | 12 +- src/vendor/cc/LICENSE-APACHE | 201 + src/vendor/{gcc => cc}/LICENSE-MIT | 0 src/vendor/{gcc => cc}/README.md | 64 +- src/vendor/{gcc => cc}/appveyor.yml | 20 + src/vendor/{gcc => cc}/src/bin/gcc-shim.rs | 0 src/vendor/{gcc => cc}/src/com.rs | 0 src/vendor/{gcc => cc}/src/lib.rs | 877 +- src/vendor/{gcc => cc}/src/registry.rs | 0 src/vendor/{gcc => cc}/src/setup_config.rs | 0 src/vendor/{gcc => cc}/src/winapi.rs | 4 +- .../{gcc => cc}/src/windows_registry.rs | 1 + src/vendor/{gcc => cc}/tests/cc_env.rs | 2 +- src/vendor/{gcc => cc}/tests/support/mod.rs | 8 +- src/vendor/{gcc => cc}/tests/test.rs | 106 +- src/vendor/clap/.cargo-checksum.json | 2 +- src/vendor/clap/CHANGELOG.md | 32 + src/vendor/clap/CONTRIBUTORS.md | 70 +- src/vendor/clap/Cargo.toml | 51 +- src/vendor/clap/Cargo.toml.orig | 5 +- src/vendor/clap/README.md | 20 +- src/vendor/clap/SPONSORS.md | 7 + src/vendor/clap/src/app/help.rs | 25 +- src/vendor/clap/src/app/parser.rs | 37 +- src/vendor/clap/src/app/validator.rs | 9 +- src/vendor/clap/src/args/arg.rs | 6 +- src/vendor/clap/src/completions/fish.rs | 11 +- src/vendor/clap/src/completions/zsh.rs | 20 +- src/vendor/clap/src/lib.rs | 1 - src/vendor/clap/src/suggestions.rs | 7 +- src/vendor/cmake/.cargo-checksum.json | 2 +- src/vendor/cmake/Cargo.toml | 30 +- src/vendor/cmake/Cargo.toml.orig | 16 + src/vendor/cmake/src/lib.rs | 16 +- src/vendor/conv/.cargo-checksum.json | 1 + src/vendor/conv/.cargo-ok | 0 src/vendor/conv/.gitignore | 2 + src/vendor/conv/.travis.yml | 17 + src/vendor/conv/Cargo.toml | 22 + src/vendor/conv/LICENSE | 25 + src/vendor/conv/README.md | 129 + src/vendor/conv/src/errors.rs | 606 + src/vendor/conv/src/impls.rs | 591 + src/vendor/conv/src/lib.rs | 525 + src/vendor/conv/src/macros.rs | 148 + src/vendor/conv/src/misc.rs | 71 + src/vendor/conv/tests/conv_utils.rs | 40 + src/vendor/conv/tests/derive_try_from.rs | 45 + src/vendor/conv/tests/lang_char.rs | 121 + src/vendor/conv/tests/lang_floats.rs | 57 + src/vendor/conv/tests/lang_ints.rs | 395 + src/vendor/conv/tests/unwraps.rs | 31 + src/vendor/conv/tests/use_in_generics.rs | 14 + src/vendor/conv/tests/util/mod.rs | 509 + .../cssparser-macros/.cargo-checksum.json | 1 + src/vendor/cssparser-macros/.cargo-ok | 0 src/vendor/cssparser-macros/Cargo.toml | 18 + src/vendor/cssparser-macros/lib.rs | 97 + src/vendor/cssparser/.cargo-checksum.json | 1 + src/vendor/cssparser/.cargo-ok | 0 src/vendor/cssparser/.gitignore | 3 + src/vendor/cssparser/.travis.yml | 17 + src/vendor/cssparser/Cargo.toml | 60 + src/vendor/cssparser/Cargo.toml.orig | 39 + src/vendor/cssparser/LICENSE | 373 + src/vendor/cssparser/README.md | 64 + src/vendor/cssparser/build.rs | 40 + src/vendor/cssparser/build/match_byte.rs | 271 + src/vendor/cssparser/docs/.nojekyll | 0 src/vendor/cssparser/docs/404.html | 3 + src/vendor/cssparser/docs/index.html | 3 + src/vendor/cssparser/src/big-data-url.css | 1 + src/vendor/cssparser/src/color.rs | 555 + .../cssparser/src/css-parsing-tests/An+B.json | 156 + .../cssparser/src/css-parsing-tests/LICENSE | 8 + .../src/css-parsing-tests/README.rst | 304 + .../src/css-parsing-tests/color3.json | 260 + .../src/css-parsing-tests/color3_hsl.json | 14210 +++++++++++++++ .../css-parsing-tests/color3_keywords.json | 803 + .../component_value_list.json | 359 + .../css-parsing-tests/declaration_list.json | 44 + .../src/css-parsing-tests/make_color3_hsl.py | 50 + .../css-parsing-tests/make_color3_keywords.py | 191 + .../one_component_value.json | 27 + .../css-parsing-tests/one_declaration.json | 46 + .../src/css-parsing-tests/one_rule.json | 36 + .../src/css-parsing-tests/rule_list.json | 53 + .../src/css-parsing-tests/stylesheet.json | 51 + .../css-parsing-tests/stylesheet_bytes.json | 134 + .../src/css-parsing-tests/urange.json | 89 + src/vendor/cssparser/src/from_bytes.rs | 64 + src/vendor/cssparser/src/lib.rs | 121 + src/vendor/cssparser/src/macros.rs | 163 + src/vendor/cssparser/src/nth.rs | 88 + src/vendor/cssparser/src/parser.rs | 710 + .../cssparser/src/rules_and_declarations.rs | 439 + src/vendor/cssparser/src/serializer.rs | 411 + src/vendor/cssparser/src/tests.rs | 833 + src/vendor/cssparser/src/tokenizer.rs | 1080 ++ src/vendor/cssparser/src/unicode_range.rs | 194 + src/vendor/custom_derive/.cargo-checksum.json | 1 + src/vendor/custom_derive/.cargo-ok | 0 src/vendor/custom_derive/.gitignore | 4 + src/vendor/custom_derive/Cargo.toml | 33 + src/vendor/custom_derive/LICENSE | 237 + src/vendor/custom_derive/README.md | 24 + src/vendor/custom_derive/src/lib.rs | 462 + .../custom_derive/tests/empty_bi_derives.rs | 24 + .../custom_derive/tests/enum_iterator.rs | 73 + .../custom_derive/tests/enum_try_from.rs | 77 + .../custom_derive/tests/passthru_derive.rs | 19 + .../custom_derive/tests/stable_encodable.rs | 378 + .../custom_derive/tests/trailing_comma.rs | 24 + .../debug_unreachable/.cargo-checksum.json | 1 + src/vendor/debug_unreachable/.cargo-ok | 0 src/vendor/debug_unreachable/.gitignore | 19 + src/vendor/debug_unreachable/.travis.yml | 1 + src/vendor/debug_unreachable/Cargo.toml | 18 + src/vendor/debug_unreachable/README.md | 24 + .../debug_unreachable/examples/simple.rs | 11 + src/vendor/debug_unreachable/src/lib.rs | 22 + src/vendor/debug_unreachable/tests/check.rs | 9 + src/vendor/dtoa/.cargo-checksum.json | 2 +- src/vendor/dtoa/Cargo.toml | 20 +- src/vendor/dtoa/Cargo.toml.orig | 11 + src/vendor/dtoa/README.md | 2 +- src/vendor/dtoa/performance.png | Bin 70571 -> 0 bytes src/vendor/dtoa/src/lib.rs | 2 + .../error-chain-0.10.0/.cargo-checksum.json | 1 - src/vendor/error-chain-0.10.0/CHANGELOG.md | 100 - src/vendor/error-chain/.cargo-checksum.json | 1 + src/vendor/error-chain/.cargo-ok | 0 .../.gitignore | 0 .../.travis.yml | 3 +- src/vendor/error-chain/CHANGELOG.md | 120 + src/vendor/error-chain/Cargo.toml | 32 + .../Cargo.toml.orig} | 10 +- src/vendor/error-chain/LICENSE-APACHE | 201 + src/vendor/error-chain/LICENSE-MIT | 26 + .../README.md | 8 +- .../examples/all.rs | 4 +- src/vendor/error-chain/examples/chain_err.rs | 69 + .../examples/doc.rs | 3 +- .../examples/quickstart.rs | 15 +- .../examples/size.rs | 4 +- .../error-chain/src/bin/has_backtrace.rs | 18 + .../src/error_chain.rs | 58 +- .../src/example_generated.rs | 2 +- .../src/impl_error_chain_kind.rs} | 112 +- .../src/lib.rs | 352 +- .../src/quick_main.rs | 12 +- .../tests/quick_main.rs | 4 +- .../tests/tests.rs | 167 +- src/vendor/filetime/.cargo-checksum.json | 2 +- src/vendor/filetime/.travis.yml | 20 +- src/vendor/filetime/Cargo.toml | 36 +- src/vendor/filetime/Cargo.toml.orig | 22 + src/vendor/filetime/appveyor.yml | 2 +- src/vendor/filetime/src/lib.rs | 259 +- src/vendor/filetime/src/redox.rs | 57 + src/vendor/filetime/src/unix.rs | 94 + src/vendor/filetime/src/windows.rs | 87 + src/vendor/flate2/.cargo-checksum.json | 2 +- src/vendor/flate2/.travis.yml | 2 +- src/vendor/flate2/Cargo.toml | 73 +- src/vendor/flate2/Cargo.toml.orig | 40 + src/vendor/flate2/README.md | 4 +- src/vendor/flate2/appveyor.yml | 8 +- .../flate2/examples/deflatedecoder-bufread.rs | 24 + .../flate2/examples/deflatedecoder-read.rs | 24 + .../flate2/examples/deflatedecoder-write.rs | 26 + .../flate2/examples/deflateencoder-bufread.rs | 24 + .../flate2/examples/deflateencoder-read.rs | 20 + .../flate2/examples/deflateencoder-write.rs | 12 + src/vendor/flate2/examples/flatereadext.rs | 22 + src/vendor/flate2/examples/gzbuilder.rs | 24 + .../flate2/examples/gzdecoder-bufread.rs | 24 + src/vendor/flate2/examples/gzdecoder-read.rs | 24 + .../flate2/examples/gzencoder-bufread.rs | 24 + src/vendor/flate2/examples/gzencoder-read.rs | 20 + src/vendor/flate2/examples/gzencoder-write.rs | 12 + .../flate2/examples/gzmultidecoder-bufread.rs | 24 + .../flate2/examples/gzmultidecoder-read.rs | 24 + src/vendor/flate2/examples/hello_world.txt | 1 + .../flate2/examples/zlibdecoder-bufread.rs | 24 + .../flate2/examples/zlibdecoder-read.rs | 24 + .../flate2/examples/zlibdecoder-write.rs | 26 + .../flate2/examples/zlibencoder-bufread.rs | 24 + .../flate2/examples/zlibencoder-read.rs | 21 + .../flate2/examples/zlibencoder-write.rs | 12 + src/vendor/flate2/src/bufreader.rs | 11 + src/vendor/flate2/src/crc.rs | 12 +- src/vendor/flate2/src/deflate.rs | 857 - src/vendor/flate2/src/deflate/bufread.rs | 269 + src/vendor/flate2/src/deflate/mod.rs | 198 + src/vendor/flate2/src/deflate/read.rs | 268 + src/vendor/flate2/src/deflate/write.rs | 350 + src/vendor/flate2/src/ffi.rs | 12 + src/vendor/flate2/src/gz.rs | 1049 -- src/vendor/flate2/src/gz/bufread.rs | 547 + src/vendor/flate2/src/gz/mod.rs | 344 + src/vendor/flate2/src/gz/read.rs | 286 + src/vendor/flate2/src/gz/write.rs | 182 + src/vendor/flate2/src/lib.rs | 156 +- src/vendor/flate2/src/mem.rs | 29 +- src/vendor/flate2/src/zio.rs | 4 + src/vendor/flate2/src/zlib.rs | 810 - src/vendor/flate2/src/zlib/bufread.rs | 259 + src/vendor/flate2/src/zlib/mod.rs | 164 + src/vendor/flate2/src/zlib/read.rs | 266 + src/vendor/flate2/src/zlib/write.rs | 351 + src/vendor/flate2/tests/early-flush.rs | 20 + src/vendor/flate2/tests/zero-write.rs | 8 + src/vendor/fnv/.cargo-checksum.json | 1 + src/vendor/fnv/.cargo-ok | 0 src/vendor/fnv/.gitignore | 2 + src/vendor/fnv/.travis.yml | 8 + src/vendor/fnv/Cargo.toml | 13 + src/vendor/fnv/README.md | 81 + src/vendor/fnv/lib.rs | 349 + src/vendor/futf/.cargo-checksum.json | 1 + src/vendor/futf/.cargo-ok | 0 src/vendor/futf/.gitignore | 2 + src/vendor/futf/.travis.yml | 11 + src/vendor/futf/Cargo.toml | 12 + src/vendor/futf/LICENSE-APACHE | 201 + src/vendor/futf/LICENSE-MIT | 25 + src/vendor/futf/README.md | 18 + src/vendor/futf/src/lib.rs | 248 + src/vendor/futf/src/test.rs | 270 + src/vendor/gcc/.cargo-checksum.json | 1 - src/vendor/getopts/.cargo-checksum.json | 2 +- src/vendor/getopts/.travis.yml | 5 +- src/vendor/getopts/Cargo.toml | 31 +- src/vendor/getopts/Cargo.toml.orig | 17 + src/vendor/getopts/README.md | 4 +- src/vendor/getopts/src/lib.rs | 270 +- src/vendor/handlebars/.cargo-checksum.json | 2 +- src/vendor/handlebars/.travis.yml | 4 - src/vendor/handlebars/CHANGELOG.md | 37 + src/vendor/handlebars/Cargo.toml | 74 +- src/vendor/handlebars/Cargo.toml.orig | 38 + src/vendor/handlebars/README.md | 44 +- src/vendor/handlebars/benches/bench.rs | 51 +- src/vendor/handlebars/examples/decorator.rs | 137 +- src/vendor/handlebars/examples/error.rs | 96 +- src/vendor/handlebars/examples/partials.rs | 67 +- .../examples/partials_legacy/base0.hbs | 7 - .../examples/partials_legacy/base1.hbs | 7 - .../examples/partials_legacy/template2.hbs | 5 - src/vendor/handlebars/examples/quick.rs | 20 + src/vendor/handlebars/examples/render-cli.rs | 14 +- src/vendor/handlebars/examples/render.rs | 91 +- src/vendor/handlebars/examples/render_file.rs | 83 +- src/vendor/handlebars/src/context.rs | 259 +- .../handlebars/src/directives/inline.rs | 33 +- src/vendor/handlebars/src/directives/mod.rs | 258 +- src/vendor/handlebars/src/error.rs | 94 +- src/vendor/handlebars/src/grammar.rs | 346 +- .../handlebars/src/helpers/helper_each.rs | 192 +- .../handlebars/src/helpers/helper_if.rs | 38 +- .../handlebars/src/helpers/helper_log.rs | 16 +- .../handlebars/src/helpers/helper_lookup.rs | 36 +- .../handlebars/src/helpers/helper_partial.rs | 198 - .../handlebars/src/helpers/helper_raw.rs | 9 +- .../handlebars/src/helpers/helper_with.rs | 99 +- src/vendor/handlebars/src/helpers/mod.rs | 96 +- src/vendor/handlebars/src/lib.rs | 5 +- src/vendor/handlebars/src/partial.rs | 161 +- src/vendor/handlebars/src/registry.rs | 235 +- src/vendor/handlebars/src/render.rs | 414 +- src/vendor/handlebars/src/template.rs | 313 +- src/vendor/html-diff/.cargo-checksum.json | 1 + src/vendor/html-diff/.cargo-ok | 0 src/vendor/html-diff/.travis.yml | 19 + src/vendor/html-diff/Cargo.toml | 25 + src/vendor/html-diff/Cargo.toml.orig | 16 + src/vendor/html-diff/LICENSE | 21 + src/vendor/html-diff/README.md | 11 + src/vendor/html-diff/src/lib.rs | 442 + src/vendor/html-diff/src/main.rs | 43 + src/vendor/html-diff/test_files/basic.html | 7 + src/vendor/html-diff/test_files/basic.stdout | 3 + .../html-diff/test_files/basic_compare.html | 8 + src/vendor/html-diff/tests/test_files.rs | 61 + src/vendor/html5ever/.cargo-checksum.json | 1 + src/vendor/html5ever/.cargo-ok | 0 src/vendor/html5ever/Cargo.toml | 47 + src/vendor/html5ever/benches/tokenizer.rs | 162 + src/vendor/html5ever/build.rs | 28 + .../html5ever/data/bench/lipsum-zh.html | 19 + src/vendor/html5ever/data/bench/lipsum.html | 40 + .../html5ever/data/bench/medium-fragment.html | 24 + .../html5ever/data/bench/small-fragment.html | 7 + src/vendor/html5ever/data/bench/strong.html | 1 + .../html5ever/data/bench/tiny-fragment.html | 1 + src/vendor/html5ever/data/test/ignore | 1 + src/vendor/html5ever/examples/capi/tokenize.c | 74 + src/vendor/html5ever/examples/html2html.rs | 49 + .../html5ever/examples/noop-tokenize.rs | 43 + .../html5ever/examples/noop-tree-builder.rs | 112 + src/vendor/html5ever/examples/print-rcdom.rs | 77 + .../html5ever/examples/print-tree-actions.rs | 169 + src/vendor/html5ever/examples/tokenize.rs | 98 + src/vendor/html5ever/macros/match_token.rs | 482 + src/vendor/html5ever/src/driver.rs | 134 + src/vendor/html5ever/src/lib.rs | 35 + src/vendor/html5ever/src/macros.rs | 33 + src/vendor/html5ever/src/serialize/mod.rs | 221 + .../html5ever/src/tokenizer/char_ref/mod.rs | 436 + .../html5ever/src/tokenizer/interface.rs | 110 + src/vendor/html5ever/src/tokenizer/mod.rs | 1565 ++ src/vendor/html5ever/src/tokenizer/states.rs | 93 + .../html5ever/src/tree_builder/actions.rs | 1164 ++ src/vendor/html5ever/src/tree_builder/data.rs | 158 + src/vendor/html5ever/src/tree_builder/mod.rs | 641 + .../html5ever/src/tree_builder/rules.rs | 1462 ++ .../html5ever/src/tree_builder/tag_sets.rs | 102 + .../html5ever/src/tree_builder/types.rs | 90 + src/vendor/html5ever/src/util/str.rs | 58 + .../tests/foreach_html5lib_test/mod.rs | 41 + src/vendor/html5ever/tests/serializer.rs | 107 + src/vendor/html5ever/tests/tokenizer.rs | 426 + src/vendor/html5ever/tests/tree_builder.rs | 282 + src/vendor/itoa/.cargo-checksum.json | 2 +- src/vendor/itoa/.travis.yml | 17 +- src/vendor/itoa/Cargo.toml | 23 +- src/vendor/itoa/Cargo.toml.orig | 14 + src/vendor/itoa/README.md | 7 +- src/vendor/itoa/benches/bench.rs | 36 +- src/vendor/itoa/performance.png | Bin 74625 -> 0 bytes src/vendor/itoa/src/lib.rs | 203 +- src/vendor/itoa/src/udiv128.rs | 62 + src/vendor/itoa/tests/test.rs | 36 +- src/vendor/kuchiki/.cargo-checksum.json | 1 + src/vendor/kuchiki/.cargo-ok | 0 src/vendor/kuchiki/.gitignore | 3 + src/vendor/kuchiki/.travis.yml | 6 + src/vendor/kuchiki/Cargo.toml | 36 + src/vendor/kuchiki/Cargo.toml.orig | 20 + src/vendor/kuchiki/README.md | 10 + src/vendor/kuchiki/docs/.nojekyll | 0 src/vendor/kuchiki/docs/404.html | 3 + src/vendor/kuchiki/docs/index.html | 3 + src/vendor/kuchiki/examples/find_matches.rs | 48 + src/vendor/kuchiki/examples/stack-overflow.rs | 23 + src/vendor/kuchiki/src/attributes.rs | 47 + src/vendor/kuchiki/src/iter.rs | 422 + src/vendor/kuchiki/src/lib.rs | 41 + src/vendor/kuchiki/src/move_cell.rs | 127 + src/vendor/kuchiki/src/node_data_ref.rs | 89 + src/vendor/kuchiki/src/parser.rs | 170 + src/vendor/kuchiki/src/select.rs | 302 + src/vendor/kuchiki/src/serializer.rs | 79 + src/vendor/kuchiki/src/tests.rs | 119 + src/vendor/kuchiki/src/tree.rs | 460 + src/vendor/kuchiki/test_data/foo.html | 9 + src/vendor/libc/.cargo-checksum.json | 2 +- src/vendor/libc/.travis.yml | 19 +- src/vendor/libc/Cargo.toml | 10 +- src/vendor/libc/Cargo.toml.orig | 4 +- src/vendor/libc/README.md | 2 +- src/vendor/libc/appveyor.yml | 2 +- .../docker/aarch64-linux-android/Dockerfile | 13 + .../aarch64-unknown-linux-gnu/Dockerfile | 4 +- .../docker/arm-linux-androideabi/Dockerfile | 13 + .../arm-unknown-linux-gnueabihf/Dockerfile | 4 +- .../asmjs-unknown-emscripten/Dockerfile | 20 + .../ci/docker/i686-linux-android/Dockerfile | 13 + .../docker/i686-unknown-linux-gnu/Dockerfile | 2 +- .../docker/mips-unknown-linux-gnu/Dockerfile | 4 +- .../docker/mips-unknown-linux-musl/Dockerfile | 8 +- .../mips64-unknown-linux-gnuabi64/Dockerfile | 4 +- .../mipsel-unknown-linux-musl/Dockerfile | 8 +- .../powerpc-unknown-linux-gnu/Dockerfile | 4 +- .../powerpc64-unknown-linux-gnu/Dockerfile | 4 +- .../docker/s390x-unknown-linux-gnu/Dockerfile | 3 + .../wasm32-unknown-emscripten/Dockerfile | 21 + .../wasm32-unknown-emscripten/node-wrapper.sh | 11 + .../docker/x86_64-rumprun-netbsd/Dockerfile | 6 +- .../docker/x86_64-rumprun-netbsd/runtest.rs | 54 + .../docker/x86_64-unknown-freebsd/Dockerfile | 4 +- .../x86_64-unknown-linux-gnu/Dockerfile | 2 +- .../docker/x86_64-unknown-openbsd/Dockerfile | 8 - src/vendor/libc/ci/emscripten-entry.sh | 19 + src/vendor/libc/ci/emscripten.sh | 54 + src/vendor/libc/ci/run-docker.sh | 1 + src/vendor/libc/ci/run.sh | 132 +- src/vendor/libc/ci/runtest-android.rs | 41 + src/vendor/libc/src/lib.rs | 23 +- src/vendor/libc/src/macros.rs | 37 - src/vendor/libc/src/redox.rs | 31 + src/vendor/libc/src/unix/bsd/apple/mod.rs | 331 +- .../src/unix/bsd/freebsdlike/dragonfly/mod.rs | 259 +- .../unix/bsd/freebsdlike/freebsd/aarch64.rs | 2 + .../src/unix/bsd/freebsdlike/freebsd/mod.rs | 273 +- .../unix/bsd/freebsdlike/freebsd/x86_64.rs | 2 + .../libc/src/unix/bsd/freebsdlike/mod.rs | 21 +- src/vendor/libc/src/unix/bsd/mod.rs | 78 +- .../libc/src/unix/bsd/netbsdlike/mod.rs | 21 +- .../src/unix/bsd/netbsdlike/netbsd/mod.rs | 88 +- .../unix/bsd/netbsdlike/openbsdlike/mod.rs | 82 + src/vendor/libc/src/unix/haiku/b32.rs | 1 + src/vendor/libc/src/unix/haiku/b64.rs | 1 + src/vendor/libc/src/unix/haiku/mod.rs | 211 +- src/vendor/libc/src/unix/mod.rs | 53 +- src/vendor/libc/src/unix/newlib/mod.rs | 58 + .../libc/src/unix/notbsd/android/b32/arm.rs | 5 + .../libc/src/unix/notbsd/android/b32/mod.rs | 30 +- .../libc/src/unix/notbsd/android/b32/x86.rs | 406 + .../src/unix/notbsd/android/b64/aarch64.rs | 3 + .../libc/src/unix/notbsd/android/b64/mod.rs | 30 + .../src/unix/notbsd/android/b64/x86_64.rs | 368 +- .../libc/src/unix/notbsd/android/mod.rs | 170 +- src/vendor/libc/src/unix/notbsd/emscripten.rs | 1667 ++ .../libc/src/unix/notbsd/linux/mips/mips32.rs | 40 +- .../libc/src/unix/notbsd/linux/mips/mips64.rs | 35 +- src/vendor/libc/src/unix/notbsd/linux/mod.rs | 251 +- .../src/unix/notbsd/linux/musl/b32/arm.rs | 33 + .../src/unix/notbsd/linux/musl/b32/asmjs.rs | 348 - .../src/unix/notbsd/linux/musl/b32/mips.rs | 41 +- .../src/unix/notbsd/linux/musl/b32/mod.rs | 5 - .../src/unix/notbsd/linux/musl/b32/x86.rs | 433 +- .../src/unix/notbsd/linux/musl/b64/aarch64.rs | 3 + .../src/unix/notbsd/linux/musl/b64/mod.rs | 30 + .../unix/notbsd/linux/musl/b64/powerpc64.rs | 3 + .../src/unix/notbsd/linux/musl/b64/x86_64.rs | 361 +- .../libc/src/unix/notbsd/linux/musl/mod.rs | 35 +- .../src/unix/notbsd/linux/other/b32/arm.rs | 33 + .../unix/notbsd/linux/other/b32/powerpc.rs | 33 + .../src/unix/notbsd/linux/other/b32/x86.rs | 31 + .../unix/notbsd/linux/other/b64/aarch64.rs | 32 + .../unix/notbsd/linux/other/b64/powerpc64.rs | 32 + .../unix/notbsd/linux/other/b64/sparc64.rs | 30 + .../src/unix/notbsd/linux/other/b64/x86_64.rs | 40 +- .../libc/src/unix/notbsd/linux/other/mod.rs | 7 - .../libc/src/unix/notbsd/linux/s390x.rs | 39 +- src/vendor/libc/src/unix/notbsd/mod.rs | 212 +- src/vendor/libc/src/unix/solaris/mod.rs | 94 +- src/vendor/libc/src/unix/uclibc/mod.rs | 159 +- .../libc/src/unix/uclibc/x86_64/l4re.rs | 46 + src/vendor/libc/src/unix/uclibc/x86_64/mod.rs | 176 +- src/vendor/libc/src/windows.rs | 3 +- src/vendor/lzma-sys/.cargo-checksum.json | 2 +- src/vendor/lzma-sys/Cargo.toml | 9 +- src/vendor/lzma-sys/Cargo.toml.orig | 5 +- src/vendor/lzma-sys/build.rs | 13 +- src/vendor/mac/.cargo-checksum.json | 1 + src/vendor/mac/.cargo-ok | 0 src/vendor/mac/.gitignore | 19 + src/vendor/mac/.travis.yml | 19 + src/vendor/mac/Cargo.toml | 10 + src/vendor/mac/README.md | 27 + src/vendor/mac/src/cfg.rs | 90 + src/vendor/mac/src/format.rs | 50 + src/vendor/mac/src/inspect.rs | 93 + src/vendor/mac/src/lib.rs | 71 + src/vendor/mac/src/matches.rs | 44 + src/vendor/mac/src/mem.rs | 44 + src/vendor/mac/src/syntax_ext.rs | 31 + src/vendor/mac/src/test.rs | 24 + src/vendor/magenta-sys/.cargo-checksum.json | 1 + src/vendor/magenta-sys/.cargo-ok | 0 src/vendor/magenta-sys/BUILD.gn | 11 + src/vendor/magenta-sys/Cargo.toml | 10 + src/vendor/magenta-sys/examples/hello.rs | 14 + src/vendor/magenta-sys/src/definitions.rs | 882 + src/vendor/magenta-sys/src/lib.rs | 330 + src/vendor/magenta/.cargo-checksum.json | 1 + src/vendor/magenta/.cargo-ok | 0 src/vendor/magenta/.gitignore | 6 + src/vendor/magenta/AUTHORS | 8 + src/vendor/magenta/BUILD.gn | 14 + src/vendor/magenta/CONTRIBUTING.md | 9 + src/vendor/magenta/Cargo.toml | 11 + src/vendor/magenta/GETTING_STARTED.md | 175 + src/vendor/magenta/LICENSE | 27 + src/vendor/magenta/PATENTS | 22 + src/vendor/magenta/README.md | 8 + src/vendor/magenta/examples/BUILD.gn | 9 + src/vendor/magenta/src/channel.rs | 392 + src/vendor/magenta/src/event.rs | 53 + src/vendor/magenta/src/eventpair.rs | 88 + src/vendor/magenta/src/fifo.rs | 116 + src/vendor/magenta/src/job.rs | 23 + src/vendor/magenta/src/lib.rs | 758 + src/vendor/magenta/src/port.rs | 362 + src/vendor/magenta/src/process.rs | 23 + src/vendor/magenta/src/socket.rs | 173 + src/vendor/magenta/src/thread.rs | 23 + src/vendor/magenta/src/timer.rs | 101 + src/vendor/magenta/src/vmo.rs | 284 + src/vendor/magenta/tools/BUILD.gn | 51 + src/vendor/magenta/tools/README.md | 26 + src/vendor/magenta/tools/clang_wrapper.cc | 197 + src/vendor/magenta/tools/gen_status.py | 49 + src/vendor/markup5ever/.cargo-checksum.json | 1 + src/vendor/markup5ever/.cargo-ok | 0 src/vendor/markup5ever/Cargo.toml | 52 + src/vendor/markup5ever/Cargo.toml.orig | 29 + src/vendor/markup5ever/build.rs | 103 + src/vendor/markup5ever/data/entities.json | 2233 +++ src/vendor/markup5ever/data/mod.rs | 25 + src/vendor/markup5ever/interface/mod.rs | 159 + .../markup5ever/interface/tree_builder.rs | 221 + src/vendor/markup5ever/lib.rs | 106 + src/vendor/markup5ever/local_names.txt | 988 ++ src/vendor/markup5ever/rcdom.rs | 372 + src/vendor/markup5ever/serialize.rs | 40 + src/vendor/markup5ever/util/buffer_queue.rs | 162 + src/vendor/markup5ever/util/smallcharset.rs | 59 + src/vendor/matches/.cargo-checksum.json | 1 + src/vendor/matches/.cargo-ok | 0 src/vendor/matches/Cargo.toml | 12 + src/vendor/matches/LICENSE | 25 + src/vendor/matches/lib.rs | 130 + src/vendor/mdbook/.cargo-checksum.json | 2 +- src/vendor/mdbook/.gitignore | 15 +- src/vendor/mdbook/.travis.yml | 82 +- src/vendor/mdbook/CONTRIBUTING.md | 4 +- src/vendor/mdbook/Cargo.toml | 134 +- src/vendor/mdbook/Cargo.toml.orig | 57 + src/vendor/mdbook/README.md | 10 +- src/vendor/mdbook/appveyor.yml | 129 +- src/vendor/mdbook/build.rs | 94 +- src/vendor/mdbook/ci/before_deploy.sh | 64 +- src/vendor/mdbook/{ => ci}/deploy.sh | 84 +- src/vendor/mdbook/ci/install.sh | 58 - src/vendor/mdbook/ci/script.sh | 45 - src/vendor/mdbook/src/bin/build.rs | 42 + src/vendor/mdbook/src/bin/init.rs | 77 + src/vendor/mdbook/src/bin/mdbook.rs | 365 +- src/vendor/mdbook/src/bin/serve.rs | 115 + src/vendor/mdbook/src/bin/test.rs | 22 + src/vendor/mdbook/src/bin/watch.rs | 113 + src/vendor/mdbook/src/book/bookconfig.rs | 230 - src/vendor/mdbook/src/book/bookconfig_test.rs | 371 - src/vendor/mdbook/src/book/bookitem.rs | 3 +- src/vendor/mdbook/src/book/mod.rs | 345 +- src/vendor/mdbook/src/config/bookconfig.rs | 229 + src/vendor/mdbook/src/config/htmlconfig.rs | 173 + src/vendor/mdbook/src/config/jsonconfig.rs | 42 + src/vendor/mdbook/src/config/mod.rs | 11 + src/vendor/mdbook/src/config/playpenconfig.rs | 68 + src/vendor/mdbook/src/config/tomlconfig.rs | 61 + src/vendor/mdbook/src/lib.rs | 67 +- src/vendor/mdbook/src/preprocess/links.rs | 240 + src/vendor/mdbook/src/preprocess/mod.rs | 1 + .../renderer/html_handlebars/hbs_renderer.rs | 688 +- .../renderer/html_handlebars/helpers/mod.rs | 5 +- .../html_handlebars/helpers/navigation.rs | 178 +- .../html_handlebars/helpers/playpen.rs | 200 - .../renderer/html_handlebars/helpers/toc.rs | 65 +- src/vendor/mdbook/src/renderer/mod.rs | 18 +- src/vendor/mdbook/src/theme/ayu-highlight.css | 71 + src/vendor/mdbook/src/theme/book.css | 266 +- src/vendor/mdbook/src/theme/book.js | 262 +- src/vendor/mdbook/src/theme/index.hbs | 62 +- src/vendor/mdbook/src/theme/mod.rs | 176 +- src/vendor/mdbook/src/theme/stylus/book.styl | 10 - .../mdbook/src/theme/stylus/general.styl | 40 - src/vendor/mdbook/src/theme/stylus/menu.styl | 39 - .../mdbook/src/theme/stylus/nav-icons.styl | 23 - src/vendor/mdbook/src/theme/stylus/page.styl | 55 - src/vendor/mdbook/src/theme/stylus/print.styl | 52 - .../mdbook/src/theme/stylus/sidebar.styl | 64 - .../mdbook/src/theme/stylus/theme-popup.styl | 56 - .../mdbook/src/theme/stylus/themes/base.styl | 128 - .../mdbook/src/theme/stylus/themes/coal.styl | 28 - .../mdbook/src/theme/stylus/themes/index.styl | 4 - .../mdbook/src/theme/stylus/themes/light.styl | 28 - .../mdbook/src/theme/stylus/themes/navy.styl | 28 - .../mdbook/src/theme/stylus/themes/rust.styl | 28 - .../mdbook/src/theme/stylus/variables.styl | 1 - src/vendor/mdbook/src/utils/fs.rs | 46 +- src/vendor/mdbook/src/utils/mod.rs | 228 +- src/vendor/mdbook/tests/config.rs | 46 + src/vendor/mdbook/tests/dummy/book/SUMMARY.md | 9 + .../mdbook/tests/dummy/book/conclusion.md | 1 + .../mdbook/tests/dummy/book/first/index.md | 5 + .../mdbook/tests/dummy/book/first/nested.md | 9 + src/vendor/mdbook/tests/dummy/book/intro.md | 3 + src/vendor/mdbook/tests/dummy/book/second.md | 1 + src/vendor/mdbook/tests/dummy/mod.rs | 90 + src/vendor/mdbook/tests/helpers/mod.rs | 24 + src/vendor/mdbook/tests/init.rs | 47 + src/vendor/mdbook/tests/jsonconfig.rs | 87 + src/vendor/mdbook/tests/rendered_output.rs | 127 + src/vendor/mdbook/tests/testing.rs | 28 + src/vendor/mdbook/tests/tomlconfig.rs | 200 + src/vendor/miniz-sys/.cargo-checksum.json | 2 +- src/vendor/miniz-sys/Cargo.toml | 35 +- src/vendor/miniz-sys/Cargo.toml.orig | 24 + src/vendor/miniz-sys/build.rs | 4 +- src/vendor/miniz-sys/miniz.c | 9972 +++++------ src/vendor/open/.cargo-checksum.json | 2 +- src/vendor/open/Cargo.toml | 23 +- src/vendor/open/Cargo.toml.orig | 15 + src/vendor/open/src/lib.rs | 31 +- src/vendor/phf/.cargo-checksum.json | 1 + src/vendor/phf/.cargo-ok | 0 src/vendor/phf/Cargo.toml | 20 + src/vendor/phf/src/lib.rs | 53 + src/vendor/phf/src/map.rs | 198 + src/vendor/phf/src/ordered_map.rs | 228 + src/vendor/phf/src/ordered_set.rs | 137 + src/vendor/phf/src/set.rs | 115 + src/vendor/phf_codegen/.cargo-checksum.json | 1 + src/vendor/phf_codegen/.cargo-ok | 0 src/vendor/phf_codegen/Cargo.toml | 12 + src/vendor/phf_codegen/src/lib.rs | 341 + src/vendor/phf_generator/.cargo-checksum.json | 1 + src/vendor/phf_generator/.cargo-ok | 0 src/vendor/phf_generator/Cargo.toml | 12 + src/vendor/phf_generator/src/lib.rs | 123 + src/vendor/phf_shared/.cargo-checksum.json | 1 + src/vendor/phf_shared/.cargo-ok | 0 src/vendor/phf_shared/Cargo.toml | 20 + src/vendor/phf_shared/src/lib.rs | 200 + src/vendor/pkg-config/.cargo-checksum.json | 1 + src/vendor/pkg-config/.cargo-ok | 0 src/vendor/pkg-config/.gitignore | 2 + src/vendor/pkg-config/.travis.yml | 23 + src/vendor/pkg-config/Cargo.toml | 16 + src/vendor/pkg-config/LICENSE-APACHE | 201 + src/vendor/pkg-config/LICENSE-MIT | 25 + src/vendor/pkg-config/README.md | 44 + src/vendor/pkg-config/src/lib.rs | 510 + src/vendor/pkg-config/tests/foo.pc | 16 + src/vendor/pkg-config/tests/framework.pc | 16 + src/vendor/pkg-config/tests/test.rs | 99 + .../precomputed-hash/.cargo-checksum.json | 1 + src/vendor/precomputed-hash/.cargo-ok | 0 src/vendor/precomputed-hash/.gitignore | 3 + src/vendor/precomputed-hash/Cargo.toml | 9 + src/vendor/precomputed-hash/LICENSE | 21 + src/vendor/precomputed-hash/src/lib.rs | 9 + .../.cargo-checksum.json | 1 + src/vendor/procedural-masquerade/.cargo-ok | 0 src/vendor/procedural-masquerade/Cargo.toml | 12 + src/vendor/procedural-masquerade/lib.rs | 252 + .../.cargo-checksum.json | 1 + src/vendor/pulldown-cmark-0.0.14/.cargo-ok | 0 src/vendor/pulldown-cmark-0.0.14/.gitignore | 3 + .../pulldown-cmark-0.0.14/CONTRIBUTING.md | 24 + src/vendor/pulldown-cmark-0.0.14/Cargo.toml | 23 + src/vendor/pulldown-cmark-0.0.14/LICENSE | 21 + src/vendor/pulldown-cmark-0.0.14/README.md | 124 + src/vendor/pulldown-cmark-0.0.14/build.rs | 149 + .../pulldown-cmark-0.0.14/specs/footnotes.txt | 153 + .../pulldown-cmark-0.0.14/specs/table.txt | 214 + .../pulldown-cmark-0.0.14/src/entities.rs | 4284 +++++ .../pulldown-cmark-0.0.14/src/escape.rs | 120 + src/vendor/pulldown-cmark-0.0.14/src/html.rs | 257 + src/vendor/pulldown-cmark-0.0.14/src/lib.rs | 42 + src/vendor/pulldown-cmark-0.0.14/src/main.rs | 258 + src/vendor/pulldown-cmark-0.0.14/src/parse.rs | 1721 ++ .../pulldown-cmark-0.0.14/src/passes.rs | 102 + .../pulldown-cmark-0.0.14/src/puncttable.rs | 348 + .../pulldown-cmark-0.0.14/src/scanners.rs | 695 + src/vendor/pulldown-cmark-0.0.14/src/utils.rs | 53 + .../pulldown-cmark-0.0.14/tests/footnotes.rs | 247 + .../pulldown-cmark-0.0.14/tests/html.rs | 208 + .../pulldown-cmark-0.0.14/tests/spec.rs | 14280 ++++++++++++++++ .../pulldown-cmark-0.0.14/tests/table.rs | 355 + .../third_party/CommonMark/LICENSE | 105 + .../third_party/CommonMark/README.google | 12 + .../third_party/CommonMark/spec.txt | 9353 ++++++++++ .../tools/mk_entities.py | 72 + .../tools/mk_puncttable.py | 130 + .../pulldown-cmark/.cargo-checksum.json | 2 +- src/vendor/pulldown-cmark/Cargo.toml | 33 +- src/vendor/pulldown-cmark/Cargo.toml.orig | 23 + src/vendor/pulldown-cmark/build.rs | 14 +- src/vendor/pulldown-cmark/specs/footnotes.txt | 8 +- src/vendor/pulldown-cmark/src/html.rs | 27 + src/vendor/pulldown-cmark/src/lib.rs | 2 +- src/vendor/pulldown-cmark/src/main.rs | 36 +- src/vendor/pulldown-cmark/src/parse.rs | 60 +- src/vendor/pulldown-cmark/src/scanners.rs | 11 +- src/vendor/pulldown-cmark/tests/errors.rs | 38 + src/vendor/pulldown-cmark/tests/footnotes.rs | 29 +- src/vendor/pulldown-cmark/tests/spec.rs | 2254 ++- src/vendor/pulldown-cmark/tests/table.rs | 39 +- .../third_party/CommonMark/spec.txt | 27 +- src/vendor/quick-error/.cargo-checksum.json | 2 +- src/vendor/quick-error/.travis.yml | 53 +- src/vendor/quick-error/Cargo.toml | 26 +- src/vendor/quick-error/Cargo.toml.orig | 13 + src/vendor/quick-error/bulk.yaml | 8 + src/vendor/quick-error/src/lib.rs | 8 + src/vendor/quick-error/vagga.yaml | 11 +- src/vendor/rand/.cargo-checksum.json | 2 +- src/vendor/rand/.travis.yml | 26 +- src/vendor/rand/Cargo.toml | 42 +- src/vendor/rand/Cargo.toml.orig | 30 + src/vendor/rand/README.md | 38 +- src/vendor/rand/appveyor.yml | 32 +- src/vendor/rand/benches/bench.rs | 11 +- src/vendor/rand/src/chacha.rs | 2 +- .../rand/src/distributions/exponential.rs | 4 +- src/vendor/rand/src/distributions/gamma.rs | 26 +- src/vendor/rand/src/distributions/mod.rs | 5 +- src/vendor/rand/src/distributions/normal.rs | 6 +- src/vendor/rand/src/distributions/range.rs | 4 +- src/vendor/rand/src/isaac.rs | 12 + src/vendor/rand/src/lib.rs | 58 +- src/vendor/rand/src/os.rs | 124 +- src/vendor/rand/src/rand_impls.rs | 17 + src/vendor/rand/src/read.rs | 1 + src/vendor/rand/src/reseeding.rs | 3 +- src/vendor/redox_syscall/.cargo-checksum.json | 1 + src/vendor/redox_syscall/.cargo-ok | 0 src/vendor/redox_syscall/.gitignore | 2 + src/vendor/redox_syscall/Cargo.toml | 23 + src/vendor/redox_syscall/Cargo.toml.orig | 11 + src/vendor/redox_syscall/LICENSE | 22 + src/vendor/redox_syscall/README.md | 7 + src/vendor/redox_syscall/src/arch/arm.rs | 73 + src/vendor/redox_syscall/src/arch/x86.rs | 73 + src/vendor/redox_syscall/src/arch/x86_64.rs | 74 + src/vendor/redox_syscall/src/call.rs | 334 + src/vendor/redox_syscall/src/data.rs | 167 + src/vendor/redox_syscall/src/error.rs | 315 + src/vendor/redox_syscall/src/flag.rs | 106 + src/vendor/redox_syscall/src/io/dma.rs | 76 + src/vendor/redox_syscall/src/io/io.rs | 67 + src/vendor/redox_syscall/src/io/mmio.rs | 31 + src/vendor/redox_syscall/src/io/mod.rs | 11 + src/vendor/redox_syscall/src/io/pio.rs | 89 + src/vendor/redox_syscall/src/lib.rs | 47 + src/vendor/redox_syscall/src/number.rs | 70 + src/vendor/redox_syscall/src/scheme.rs | 266 + src/vendor/selectors/.cargo-checksum.json | 1 + src/vendor/selectors/.cargo-ok | 0 src/vendor/selectors/Cargo.toml | 37 + src/vendor/selectors/README.md | 25 + src/vendor/selectors/arcslice.rs | 326 + src/vendor/selectors/attr.rs | 190 + src/vendor/selectors/bloom.rs | 310 + src/vendor/selectors/build.rs | 75 + src/vendor/selectors/gecko_like_types.rs | 26 + src/vendor/selectors/lib.rs | 25 + src/vendor/selectors/matching.rs | 596 + src/vendor/selectors/parser.rs | 2165 +++ src/vendor/selectors/size_of_tests.rs | 70 + src/vendor/selectors/tree.rs | 77 + src/vendor/selectors/visitor.rs | 47 + src/vendor/serde/.cargo-checksum.json | 2 +- src/vendor/serde/Cargo.toml | 14 +- src/vendor/serde/Cargo.toml.orig | 2 +- src/vendor/serde/src/de/impls.rs | 29 +- src/vendor/serde/src/de/mod.rs | 4 +- src/vendor/serde/src/lib.rs | 40 +- src/vendor/serde/src/private/de.rs | 42 +- src/vendor/serde/src/ser/impls.rs | 4 +- src/vendor/serde/src/ser/mod.rs | 14 + src/vendor/serde_derive/.cargo-checksum.json | 2 +- src/vendor/serde_derive/Cargo.toml | 10 +- src/vendor/serde_derive/Cargo.toml.orig | 4 +- src/vendor/serde_derive/src/bound.rs | 30 +- src/vendor/serde_derive/src/de.rs | 355 +- src/vendor/serde_derive/src/lib.rs | 2 +- src/vendor/serde_derive/src/ser.rs | 212 +- .../.cargo-checksum.json | 2 +- src/vendor/serde_derive_internals/Cargo.toml | 36 +- .../serde_derive_internals/Cargo.toml.orig | 19 + src/vendor/serde_derive_internals/src/attr.rs | 40 + src/vendor/serde_derive_internals/src/case.rs | 27 +- .../serde_derive_internals/src/check.rs | 56 + src/vendor/serde_derive_internals/src/lib.rs | 2 +- src/vendor/serde_json/.cargo-checksum.json | 2 +- src/vendor/serde_json/Cargo.toml | 63 +- src/vendor/serde_json/Cargo.toml.orig | 39 + src/vendor/serde_json/README.md | 10 + src/vendor/serde_json/src/de.rs | 219 +- src/vendor/serde_json/src/error.rs | 8 + src/vendor/serde_json/src/lib.rs | 5 +- src/vendor/serde_json/src/read.rs | 108 +- src/vendor/serde_json/src/ser.rs | 27 +- src/vendor/serde_json/src/value/partial_eq.rs | 1 + src/vendor/siphasher/.cargo-checksum.json | 1 + src/vendor/siphasher/.cargo-ok | 0 src/vendor/siphasher/.gitignore | 7 + src/vendor/siphasher/.travis.yml | 10 + src/vendor/siphasher/COPYING | 6 + src/vendor/siphasher/Cargo.toml | 23 + src/vendor/siphasher/README.md | 37 + src/vendor/siphasher/src/lib.rs | 15 + src/vendor/siphasher/src/sip.rs | 407 + src/vendor/siphasher/src/sip128.rs | 478 + src/vendor/siphasher/src/tests.rs | 323 + src/vendor/siphasher/src/tests128.rs | 111 + src/vendor/smallvec/.cargo-checksum.json | 1 + src/vendor/smallvec/.cargo-ok | 0 src/vendor/smallvec/.gitignore | 2 + src/vendor/smallvec/.travis.yml | 13 + src/vendor/smallvec/Cargo.toml | 20 + src/vendor/smallvec/README.md | 6 + src/vendor/smallvec/benches/bench.rs | 111 + src/vendor/smallvec/lib.rs | 1424 ++ src/vendor/string_cache/.cargo-checksum.json | 1 + src/vendor/string_cache/.cargo-ok | 0 src/vendor/string_cache/.gitignore | 4 + src/vendor/string_cache/.travis.yml | 19 + src/vendor/string_cache/Cargo.toml | 53 + src/vendor/string_cache/Cargo.toml.orig | 43 + src/vendor/string_cache/LICENSE-APACHE | 201 + src/vendor/string_cache/LICENSE-MIT | 25 + src/vendor/string_cache/README.md | 78 + src/vendor/string_cache/build.rs | 13 + src/vendor/string_cache/src/atom.rs | 913 + src/vendor/string_cache/src/bench.rs | 216 + src/vendor/string_cache/src/event.rs | 28 + src/vendor/string_cache/src/lib.rs | 38 + .../string_cache_codegen/.cargo-checksum.json | 1 + src/vendor/string_cache_codegen/.cargo-ok | 0 src/vendor/string_cache_codegen/Cargo.toml | 19 + src/vendor/string_cache_codegen/lib.rs | 153 + .../string_cache_shared/.cargo-checksum.json | 1 + src/vendor/string_cache_shared/.cargo-ok | 0 src/vendor/string_cache_shared/Cargo.toml | 11 + src/vendor/string_cache_shared/lib.rs | 23 + src/vendor/tempdir/.cargo-checksum.json | 1 + src/vendor/tempdir/.cargo-ok | 0 src/vendor/tempdir/.gitignore | 2 + src/vendor/tempdir/.travis.yml | 24 + src/vendor/tempdir/Cargo.toml | 17 + src/vendor/tempdir/LICENSE-APACHE | 201 + src/vendor/tempdir/LICENSE-MIT | 25 + src/vendor/tempdir/README.md | 53 + src/vendor/tempdir/src/lib.rs | 330 + src/vendor/tempdir/tests/smoke.rs | 229 + src/vendor/tendril/.cargo-checksum.json | 1 + src/vendor/tendril/.cargo-ok | 0 src/vendor/tendril/.gitignore | 2 + src/vendor/tendril/.travis.yml | 13 + src/vendor/tendril/Cargo.toml | 37 + src/vendor/tendril/Cargo.toml.orig | 22 + src/vendor/tendril/LICENSE-APACHE | 201 + src/vendor/tendril/LICENSE-MIT | 25 + src/vendor/tendril/README.md | 115 + src/vendor/tendril/examples/fuzz.rs | 152 + src/vendor/tendril/src/bench.rs | 162 + src/vendor/tendril/src/buf32.rs | 117 + src/vendor/tendril/src/fmt.rs | 516 + src/vendor/tendril/src/lib.rs | 32 + src/vendor/tendril/src/stream.rs | 522 + src/vendor/tendril/src/tendril.rs | 2254 +++ src/vendor/tendril/src/utf8_decode.rs | 79 + src/vendor/tendril/src/util.rs | 57 + src/vendor/textwrap/.appveyor.yml | 21 - src/vendor/textwrap/.cargo-checksum.json | 2 +- src/vendor/textwrap/Cargo.toml | 17 +- src/vendor/textwrap/Cargo.toml.orig | 5 +- src/vendor/textwrap/README.md | 24 +- src/vendor/textwrap/benches/linear.rs | 18 +- src/vendor/textwrap/examples/layout.rs | 6 +- src/vendor/textwrap/examples/termwidth.rs | 8 +- src/vendor/textwrap/src/lib.rs | 562 +- .../unicode-segmentation/.cargo-checksum.json | 1 - src/vendor/unicode-segmentation/.gitignore | 5 - src/vendor/unicode-segmentation/.travis.yml | 21 - src/vendor/unicode-segmentation/COPYRIGHT | 7 - src/vendor/unicode-segmentation/Cargo.toml | 25 - src/vendor/unicode-segmentation/README.md | 62 - .../unicode-segmentation/scripts/unicode.py | 353 - .../scripts/unicode_gen_breaktests.py | 197 - .../unicode-segmentation/src/grapheme.rs | 661 - src/vendor/unicode-segmentation/src/lib.rs | 204 - src/vendor/unicode-segmentation/src/tables.rs | 1325 -- src/vendor/unicode-segmentation/src/test.rs | 176 - .../unicode-segmentation/src/testdata.rs | 2058 --- src/vendor/unicode-segmentation/src/word.rs | 649 - .../unreachable-0.1.1/.cargo-checksum.json | 1 + src/vendor/unreachable-0.1.1/.cargo-ok | 0 src/vendor/unreachable-0.1.1/.gitignore | 19 + src/vendor/unreachable-0.1.1/.travis.yml | 19 + src/vendor/unreachable-0.1.1/Cargo.toml | 13 + src/vendor/unreachable-0.1.1/README.md | 24 + src/vendor/unreachable-0.1.1/src/lib.rs | 77 + src/vendor/utf-8/.cargo-checksum.json | 1 + src/vendor/utf-8/.cargo-ok | 0 src/vendor/utf-8/.gitignore | 2 + src/vendor/utf-8/Cargo.toml | 21 + src/vendor/utf-8/README.md | 5 + src/vendor/utf-8/benches/from_utf8_lossy.rs | 30 + src/vendor/utf-8/src/lib.rs | 137 + src/vendor/utf-8/src/lossy.rs | 92 + src/vendor/utf-8/src/polyfill.rs | 106 + src/vendor/utf-8/tests/decoder.rs | 34 + src/vendor/utf-8/tests/shared/data.rs | 16 + .../tests/shared/string_from_utf8_lossy.rs | 29 + src/vendor/utf-8/tests/str_from_utf8.rs | 64 + .../utf-8/tests/string_from_utf8_lossy.rs | 14 + version | 2 +- 3140 files changed, 260758 insertions(+), 54532 deletions(-) create mode 100644 config.toml.example create mode 100644 git-commit-hash rename src/bootstrap/{cc.rs => cc_detect.rs} (79%) create mode 100755 src/bootstrap/configure.py create mode 100644 src/bootstrap/toolstate.rs create mode 100644 src/ci/docker/cross2/Dockerfile create mode 100755 src/ci/docker/cross2/build-fuchsia-toolchain.sh create mode 100755 src/ci/docker/cross2/build-solaris-toolchain.sh rename src/ci/docker/{dist-fuchsia => cross2}/shared.sh (100%) create mode 100644 src/ci/docker/disabled/dist-x86_64-haiku/Dockerfile create mode 100755 src/ci/docker/disabled/dist-x86_64-haiku/build-toolchain.sh create mode 100755 src/ci/docker/disabled/dist-x86_64-haiku/fetch-packages.sh create mode 100755 src/ci/docker/disabled/dist-x86_64-haiku/llvm-config.sh delete mode 100644 src/ci/docker/dist-fuchsia/Dockerfile delete mode 100755 src/ci/docker/dist-fuchsia/build-toolchain.sh delete mode 100644 src/ci/docker/dist-fuchsia/compiler-rt-dso-handle.patch rename {man => src/doc/man}/rustc.1 (100%) rename {man => src/doc/man}/rustdoc.1 (100%) create mode 100644 src/doc/reference/src/dynamically-sized-types.md create mode 100644 src/doc/reference/src/expressions/array-expr.md create mode 100644 src/doc/reference/src/expressions/block-expr.md create mode 100644 src/doc/reference/src/expressions/call-expr.md create mode 100644 src/doc/reference/src/expressions/closure-expr.md create mode 100644 src/doc/reference/src/expressions/enum-variant-expr.md create mode 100644 src/doc/reference/src/expressions/field-expr.md create mode 100644 src/doc/reference/src/expressions/if-expr.md create mode 100644 src/doc/reference/src/expressions/literal-expr.md create mode 100644 src/doc/reference/src/expressions/loop-expr.md create mode 100644 src/doc/reference/src/expressions/match-expr.md create mode 100644 src/doc/reference/src/expressions/method-call-expr.md create mode 100644 src/doc/reference/src/expressions/operator-expr.md create mode 100644 src/doc/reference/src/expressions/path-expr.md create mode 100644 src/doc/reference/src/expressions/range-expr.md create mode 100644 src/doc/reference/src/expressions/return-expr.md create mode 100644 src/doc/reference/src/expressions/struct-expr.md create mode 100644 src/doc/reference/src/expressions/tuple-expr.md create mode 100644 src/doc/reference/src/interior-mutability.md create mode 100644 src/doc/reference/src/items/constant-items.md create mode 100644 src/doc/reference/src/items/enumerations.md create mode 100644 src/doc/reference/src/items/extern-crates.md create mode 100644 src/doc/reference/src/items/external-blocks.md create mode 100644 src/doc/reference/src/items/functions.md create mode 100644 src/doc/reference/src/items/implementations.md create mode 100644 src/doc/reference/src/items/modules.md create mode 100644 src/doc/reference/src/items/static-items.md create mode 100644 src/doc/reference/src/items/structs.md create mode 100644 src/doc/reference/src/items/traits.md create mode 100644 src/doc/reference/src/items/type-aliases.md create mode 100644 src/doc/reference/src/items/unions.md create mode 100644 src/doc/reference/src/items/use-declarations.md create mode 100644 src/doc/reference/theme/index.hbs create mode 100644 src/doc/unstable-book/src/language-features/doc-masked.md create mode 100644 src/doc/unstable-book/src/language-features/fn-must-use.md create mode 100644 src/doc/unstable-book/src/language-features/generators.md create mode 100644 src/doc/unstable-book/src/language-features/match-beginning-vert.md create mode 100644 src/doc/unstable-book/src/language-features/match_default_bindings.md create mode 100644 src/doc/unstable-book/src/language-features/trace-macros.md create mode 100644 src/doc/unstable-book/src/library-features/entry-and-modify.md create mode 100644 src/doc/unstable-book/src/library-features/entry-or-default.md create mode 100644 src/libbacktrace/macho.c create mode 100644 src/libcompiler_builtins/src/x86.rs create mode 100644 src/libcore/ops/generator.rs delete mode 100644 src/liblibc/ci/docker/aarch64-unknown-linux-musl/Dockerfile rename src/liblibc/src/{redox/mod.rs => redox.rs} (55%) delete mode 100644 src/liblibc/src/redox/net.rs create mode 100644 src/libproc_macro/diagnostic.rs rename src/{libcore/benches/mem.rs => librustc/benches/dispatch.rs} (65%) create mode 100644 src/librustc/benches/lib.rs create mode 100644 src/librustc/benches/pattern.rs delete mode 100644 src/librustc/dep_graph/edges.rs create mode 100644 src/librustc/dep_graph/prev.rs create mode 100644 src/librustc/dep_graph/serialized.rs create mode 100644 src/librustc/hir/README.md create mode 100644 src/librustc/hir/map/README.md create mode 100644 src/librustc/ich/impls_misc.rs delete mode 100644 src/librustc/infer/error_reporting/anon_anon_conflict.rs create mode 100644 src/librustc/infer/error_reporting/different_lifetimes.rs delete mode 100644 src/librustc/middle/effect.rs create mode 100644 src/librustc/middle/exported_symbols.rs create mode 100644 src/librustc/middle/trans.rs create mode 100644 src/librustc/traits/on_unimplemented.rs create mode 100644 src/librustc/ty/README.md delete mode 100644 src/librustc/ty/maps.rs create mode 100644 src/librustc/ty/maps/README.md create mode 100644 src/librustc/ty/maps/config.rs create mode 100644 src/librustc/ty/maps/keys.rs create mode 100644 src/librustc/ty/maps/mod.rs create mode 100644 src/librustc/ty/maps/plumbing.rs create mode 100644 src/librustc/ty/maps/values.rs create mode 100644 src/librustc_back/README.md create mode 100644 src/librustc_back/target/aarch64_unknown_linux_musl.rs delete mode 100644 src/librustc_back/target/le32_unknown_nacl.rs delete mode 100644 src/librustc_bitflags/Cargo.toml delete mode 100644 src/librustc_bitflags/lib.rs create mode 100644 src/librustc_cratesio_shim/Cargo.toml create mode 100644 src/librustc_cratesio_shim/src/lib.rs create mode 100644 src/librustc_driver/README.md delete mode 100644 src/librustc_incremental/calculate_svh/mod.rs delete mode 100644 src/librustc_incremental/persist/hash.rs delete mode 100644 src/librustc_incremental/persist/preds/compress/README.md delete mode 100644 src/librustc_incremental/persist/preds/compress/classify/mod.rs delete mode 100644 src/librustc_incremental/persist/preds/compress/classify/test.rs delete mode 100644 src/librustc_incremental/persist/preds/compress/construct.rs delete mode 100644 src/librustc_incremental/persist/preds/compress/dag_id.rs delete mode 100644 src/librustc_incremental/persist/preds/compress/mod.rs delete mode 100644 src/librustc_incremental/persist/preds/compress/test.rs delete mode 100644 src/librustc_incremental/persist/preds/compress/test_macro.rs delete mode 100644 src/librustc_incremental/persist/preds/mod.rs create mode 100644 src/librustc_metadata/link_args.rs create mode 100644 src/librustc_metadata/native_libs.rs create mode 100644 src/librustc_mir/dataflow/impls/storage_liveness.rs create mode 100644 src/librustc_mir/transform/check_unsafety.rs create mode 100644 src/librustc_mir/transform/generator.rs rename src/librustc_mir/transform/{nll.rs => nll/mod.rs} (91%) create mode 100644 src/librustc_mir/util/liveness.rs create mode 100644 src/librustc_trans/back/bytecode.rs create mode 100644 src/librustc_trans/back/command.rs create mode 100644 src/librustc_trans_utils/trans_crate.rs create mode 100644 src/librustc_typeck/README.md create mode 100644 src/librustc_typeck/check/generator_interior.rs delete mode 100644 src/libstd/os/nacl/fs.rs delete mode 100644 src/libstd/os/nacl/raw.rs create mode 100644 src/libstd/sys/unix/l4re.rs rename src/libstd/sys/unix/process/{magenta.rs => zircon.rs} (54%) create mode 100644 src/libsyntax/README.md create mode 100644 src/libsyntax_pos/span_encoding.rs create mode 100644 src/test/codegen-units/partitioning/local-inlining-but-not-all.rs create mode 100644 src/test/codegen/abi-main-signature-16bit-c-int.rs create mode 100644 src/test/codegen/abi-main-signature-32bit-c-int.rs create mode 100644 src/test/codegen/issue-34947-pow-i32.rs create mode 100644 src/test/codegen/remap_path_prefix/aux_mod.rs create mode 100644 src/test/compile-fail/E0594.rs create mode 100644 src/test/compile-fail/E0637.rs delete mode 100644 src/test/compile-fail/closure-expected-type/issue-38714.rs create mode 100644 src/test/compile-fail/coherence-overlap-downstream-inherent.rs create mode 100644 src/test/compile-fail/coherence-overlap-downstream.rs create mode 100644 src/test/compile-fail/coherence-overlap-issue-23516-inherent.rs create mode 100644 src/test/compile-fail/coherence-overlap-upstream-inherent.rs create mode 100644 src/test/compile-fail/coherence-overlap-upstream.rs create mode 100644 src/test/compile-fail/const-block-non-item-statement-3.rs create mode 100644 src/test/compile-fail/const-fn-feature-flags.rs create mode 100644 src/test/compile-fail/feature-gate-clone-closures.rs create mode 100644 src/test/compile-fail/feature-gate-copy-closures.rs create mode 100644 src/test/compile-fail/feature-gate-doc_masked.rs create mode 100644 src/test/compile-fail/feature-gate-dotdoteq_in_patterns.rs create mode 100644 src/test/compile-fail/feature-gate-generators.rs create mode 100644 src/test/compile-fail/feature-gate-match_beginning_vert.rs create mode 100644 src/test/compile-fail/feature-gate-match_default_bindings.rs create mode 100644 src/test/compile-fail/feature-gate-repr128.rs rename src/test/{run-pass/const-fn-cross-crate.rs => compile-fail/feature-gate-rustc_const_unstable.rs} (68%) create mode 100644 src/test/compile-fail/feature-gate-underscore-lifetimes.rs create mode 100644 src/test/compile-fail/hygiene/assoc_item_ctxt.rs create mode 100644 src/test/compile-fail/hygiene/assoc_ty_bindings.rs create mode 100644 src/test/compile-fail/issue-22706.rs create mode 100644 src/test/compile-fail/issue-22933-3.rs create mode 100644 src/test/compile-fail/issue-30355.rs create mode 100644 src/test/compile-fail/issue-31924-non-snake-ffi.rs create mode 100644 src/test/compile-fail/issue-33241.rs create mode 100644 src/test/compile-fail/issue-37887.rs create mode 100644 src/test/compile-fail/issue-41229-ref-str.rs create mode 100644 src/test/compile-fail/issue-44021.rs create mode 100644 src/test/compile-fail/issue-44373.rs create mode 100644 src/test/compile-fail/issue-44578.rs create mode 100644 src/test/compile-fail/issue-45087-unreachable-unsafe.rs create mode 100644 src/test/compile-fail/issue-45729-unsafe-in-generator.rs create mode 100644 src/test/compile-fail/issue-45801.rs rename src/test/compile-fail/{lifetime-underscore.rs => label-underscore.rs} (83%) create mode 100644 src/test/compile-fail/not-clone-closure.rs create mode 100644 src/test/compile-fail/not-copy-closure.rs create mode 100644 src/test/compile-fail/static-drop-scope.rs create mode 100644 src/test/compile-fail/synthetic-param.rs create mode 100644 src/test/compile-fail/type-path-err-node-types.rs create mode 100644 src/test/compile-fail/underscore-lifetime-binders.rs create mode 100644 src/test/compile-fail/underscore-lifetime-elison-mismatch.rs create mode 100644 src/test/compile-fail/unsafe-move-val-init.rs create mode 100644 src/test/mir-opt/end_region_cyclic.rs create mode 100644 src/test/mir-opt/end_region_destruction_extents_1.rs create mode 100644 src/test/parse-fail/issue-43692.rs create mode 100644 src/test/parse-fail/struct-literal-restrictions-in-lamda.rs create mode 100644 src/test/run-fail/issue-44216-add-instant.rs create mode 100644 src/test/run-fail/issue-44216-add-system-time.rs create mode 100644 src/test/run-fail/issue-44216-sub-instant.rs create mode 100644 src/test/run-fail/issue-44216-sub-system-time.rs create mode 100644 src/test/run-make/long-linker-command-lines/Makefile create mode 100644 src/test/run-make/long-linker-command-lines/foo.rs create mode 100644 src/test/run-make/min-global-align/Makefile create mode 100644 src/test/run-make/min-global-align/min_global_align.rs create mode 100644 src/test/run-make/sysroot-crates-are-unstable/test.py create mode 100644 src/test/run-pass-fulldeps/pprust-expr-roundtrip.rs rename src/test/{ui/mismatched_types/E0281.rs => run-pass-fulldeps/proc-macro/attr-cfg.rs} (55%) create mode 100644 src/test/run-pass-fulldeps/proc-macro/attr-on-trait.rs create mode 100644 src/test/run-pass-fulldeps/proc-macro/auxiliary/attr-cfg.rs create mode 100644 src/test/run-pass-fulldeps/proc-macro/auxiliary/attr-on-trait.rs create mode 100644 src/test/run-pass-fulldeps/proc-macro/auxiliary/derive-attr-cfg.rs create mode 100644 src/test/run-pass-fulldeps/proc-macro/auxiliary/span-api-tests.rs create mode 100644 src/test/run-pass-fulldeps/proc-macro/auxiliary/span-test-macros.rs create mode 100644 src/test/run-pass-fulldeps/proc-macro/derive-attr-cfg.rs create mode 100644 src/test/run-pass-fulldeps/proc-macro/span-api-tests.rs create mode 100644 src/test/run-pass/auxiliary/thin-lto-inlines-aux.rs create mode 100644 src/test/run-pass/char_unicode.rs create mode 100644 src/test/run-pass/clone-closure.rs rename src/test/{compile-fail/unboxed-closures-infer-explicit-call-too-early.rs => run-pass/closure-returning-closure.rs} (79%) create mode 100644 src/test/run-pass/const-fn-feature-flags.rs rename src/test/{compile-fail => run-pass}/const-fn-stability-calls.rs (75%) create mode 100644 src/test/run-pass/copy-closure.rs create mode 100644 src/test/run-pass/generator/auxiliary/xcrate-reachable.rs create mode 100644 src/test/run-pass/generator/auxiliary/xcrate.rs create mode 100644 src/test/run-pass/generator/borrow-in-tail-expr.rs create mode 100644 src/test/run-pass/generator/conditional-drop.rs create mode 100644 src/test/run-pass/generator/control-flow.rs create mode 100644 src/test/run-pass/generator/drop-env.rs create mode 100644 src/test/run-pass/generator/iterator-count.rs create mode 100644 src/test/run-pass/generator/match-bindings.rs create mode 100644 src/test/run-pass/generator/panic-drops.rs create mode 100644 src/test/run-pass/generator/panic-safe.rs create mode 100644 src/test/run-pass/generator/resume-after-return.rs create mode 100644 src/test/run-pass/generator/smoke.rs create mode 100644 src/test/run-pass/generator/xcrate-reachable.rs create mode 100644 src/test/run-pass/generator/xcrate.rs create mode 100644 src/test/run-pass/generator/yield-in-args-rev.rs create mode 100644 src/test/run-pass/generator/yield-in-box.rs create mode 100644 src/test/run-pass/generator/yield-subtype.rs rename src/test/{ui/lint/fn_must_use.rs => run-pass/hygiene/specialization.rs} (57%) create mode 100644 src/test/run-pass/inc-range-pat.rs rename src/test/{compile-fail => run-pass}/issue-17718-const-destructors.rs (90%) create mode 100644 src/test/run-pass/issue-33185.rs create mode 100644 src/test/run-pass/issue-35376.rs create mode 100644 src/test/run-pass/issue-43692.rs create mode 100644 src/test/run-pass/issue-44373.rs create mode 100644 src/test/run-pass/issue-44730.rs create mode 100644 src/test/run-pass/issue-45731.rs rename src/test/{compile-fail => run-pass}/issue-9243.rs (91%) create mode 100644 src/test/run-pass/lto-many-codegen-units.rs create mode 100644 src/test/run-pass/method-argument-inference-associated-type.rs create mode 100644 src/test/run-pass/mir-inlining/no-trait-method-issue-40473.rs create mode 100644 src/test/run-pass/op-assign-builtins-by-ref.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/box.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/constref.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/enum.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/for.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/general.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/lit.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/range.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/ref-region.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/slice.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/struct.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/tuple-struct.rs create mode 100644 src/test/run-pass/rfc-2005-default-binding-mode/tuple.rs create mode 100644 src/test/run-pass/semistatement-in-lambda.rs create mode 100644 src/test/run-pass/thin-lto-inlines.rs create mode 100644 src/test/run-pass/thin-lto-inlines2.rs create mode 100644 src/test/run-pass/unboxed-closures-infer-explicit-call-early.rs create mode 100644 src/test/run-pass/underscore-lifetimes.rs create mode 100644 src/test/rustdoc/codeblock-title.rs create mode 100644 src/test/rustdoc/const-fn.rs create mode 100644 src/test/rustdoc/fn-pointer-arg-name.rs create mode 100644 src/test/rustdoc/issue-43893.rs create mode 100644 src/test/rustdoc/pub-method.rs create mode 100644 src/test/ui-fulldeps/proc-macro/auxiliary/three-equals.rs create mode 100644 src/test/ui-fulldeps/proc-macro/three-equals.rs create mode 100644 src/test/ui-fulldeps/proc-macro/three-equals.stderr create mode 100644 src/test/ui/codemap_tests/tab_2.rs create mode 100644 src/test/ui/codemap_tests/tab_2.stderr create mode 100644 src/test/ui/codemap_tests/tab_3.rs create mode 100644 src/test/ui/codemap_tests/tab_3.stderr create mode 100644 src/test/ui/const-expr-addr-operator.rs create mode 100644 src/test/ui/const-expr-addr-operator.stderr create mode 100644 src/test/ui/deref-suggestion.rs create mode 100644 src/test/ui/deref-suggestion.stderr create mode 100644 src/test/ui/generator/borrowing.rs create mode 100644 src/test/ui/generator/borrowing.stderr create mode 100644 src/test/ui/generator/no-arguments-on-generators.rs create mode 100644 src/test/ui/generator/no-arguments-on-generators.stderr create mode 100644 src/test/ui/generator/not-send-sync.rs create mode 100644 src/test/ui/generator/not-send-sync.stderr create mode 100644 src/test/ui/generator/ref-escapes-but-not-over-yield.rs create mode 100644 src/test/ui/generator/ref-escapes-but-not-over-yield.stderr create mode 100644 src/test/ui/generator/yield-in-args.rs create mode 100644 src/test/ui/generator/yield-in-args.stderr create mode 100644 src/test/ui/generator/yield-in-const.rs create mode 100644 src/test/ui/generator/yield-in-const.stderr create mode 100644 src/test/ui/generator/yield-in-function.rs create mode 100644 src/test/ui/generator/yield-in-function.stderr create mode 100644 src/test/ui/generator/yield-in-static.rs create mode 100644 src/test/ui/generator/yield-in-static.stderr create mode 100644 src/test/ui/generator/yield-while-iterating.rs create mode 100644 src/test/ui/generator/yield-while-iterating.stderr create mode 100644 src/test/ui/generator/yield-while-local-borrowed.rs create mode 100644 src/test/ui/generator/yield-while-local-borrowed.stderr create mode 100644 src/test/ui/generator/yield-while-ref-reborrowed.rs create mode 100644 src/test/ui/generator/yield-while-ref-reborrowed.stderr create mode 100644 src/test/ui/issue-36400.rs create mode 100644 src/test/ui/issue-36400.stderr create mode 100644 src/test/ui/issue-45107-unnecessary-unsafe-in-closure.rs create mode 100644 src/test/ui/issue-45107-unnecessary-unsafe-in-closure.stderr create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-4.stderr create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-both-are-structs-4.rs create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-both-are-structs-4.stderr create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-both-are-structs-earlybound-regions.rs create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-both-are-structs-earlybound-regions.stderr create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-both-are-structs-latebound-regions.rs create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-both-are-structs-latebound-regions.stderr create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-earlybound-regions.rs create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-earlybound-regions.stderr create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-latebound-regions.rs create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-latebound-regions.stderr create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-one-is-struct-4.rs create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-one-is-struct-4.stderr create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-using-fn-items.rs create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-using-fn-items.stderr create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-using-trait-objects.rs create mode 100644 src/test/ui/lifetime-errors/ex3-both-anon-regions-using-trait-objects.stderr rename src/test/{compile-fail/issue-16747.rs => ui/lifetimes/lifetime-doesnt-live-long-enough.rs} (66%) create mode 100644 src/test/ui/lifetimes/lifetime-doesnt-live-long-enough.stderr delete mode 100644 src/test/ui/lint/fn_must_use.stderr create mode 100644 src/test/ui/lint/not_found.rs create mode 100644 src/test/ui/lint/not_found.stderr create mode 100644 src/test/ui/lint/suggestions.rs create mode 100644 src/test/ui/lint/suggestions.stderr create mode 100644 src/test/ui/lint/unused_parens_json_suggestion.rs create mode 100644 src/test/ui/lint/unused_parens_json_suggestion.stderr create mode 100644 src/test/ui/macros/trace_faulty_macros.rs create mode 100644 src/test/ui/macros/trace_faulty_macros.stderr delete mode 100644 src/test/ui/mismatched_types/E0281.stderr create mode 100644 src/test/ui/mismatched_types/E0631.rs create mode 100644 src/test/ui/mismatched_types/E0631.stderr create mode 100644 src/test/ui/mismatched_types/closure-arg-type-mismatch.rs create mode 100644 src/test/ui/mismatched_types/closure-arg-type-mismatch.stderr create mode 100644 src/test/ui/rfc-2005-default-binding-mode/const.rs create mode 100644 src/test/ui/rfc-2005-default-binding-mode/const.stderr create mode 100644 src/test/ui/rfc-2005-default-binding-mode/enum.rs create mode 100644 src/test/ui/rfc-2005-default-binding-mode/enum.stderr create mode 100644 src/test/ui/rfc-2005-default-binding-mode/explicit-mut.rs create mode 100644 src/test/ui/rfc-2005-default-binding-mode/explicit-mut.stderr create mode 100644 src/test/ui/rfc-2005-default-binding-mode/for.rs create mode 100644 src/test/ui/rfc-2005-default-binding-mode/for.stderr create mode 100644 src/test/ui/rfc-2005-default-binding-mode/issue-44912-or.rs create mode 100644 src/test/ui/rfc-2005-default-binding-mode/issue-44912-or.stderr create mode 100644 src/test/ui/rfc-2005-default-binding-mode/lit.rs create mode 100644 src/test/ui/rfc-2005-default-binding-mode/lit.stderr create mode 100644 src/test/ui/rfc-2005-default-binding-mode/no-double-error.rs create mode 100644 src/test/ui/rfc-2005-default-binding-mode/no-double-error.stderr create mode 100644 src/test/ui/rfc-2005-default-binding-mode/slice.rs create mode 100644 src/test/ui/rfc-2005-default-binding-mode/slice.stderr create mode 100644 src/test/ui/rfc_1940-must_use_on_functions/fn_must_use.rs create mode 100644 src/test/ui/rfc_1940-must_use_on_functions/fn_must_use.stderr create mode 100644 src/test/ui/span/gated-features-attr-spans.rs create mode 100644 src/test/ui/span/gated-features-attr-spans.stderr create mode 100644 src/test/ui/span/issue-43927-non-ADT-derive.rs create mode 100644 src/test/ui/span/issue-43927-non-ADT-derive.stderr create mode 100644 src/test/ui/span/missing-unit-argument.rs create mode 100644 src/test/ui/span/missing-unit-argument.stderr create mode 100644 src/test/ui/span/unused-warning-point-at-signature.rs create mode 100644 src/test/ui/span/unused-warning-point-at-signature.stderr create mode 100644 src/test/ui/str-lit-type-mismatch.rs create mode 100644 src/test/ui/str-lit-type-mismatch.stderr create mode 100644 src/test/ui/suggestions/issue-43420-no-over-suggest.rs create mode 100644 src/test/ui/suggestions/issue-43420-no-over-suggest.stderr create mode 100644 src/test/ui/suggestions/suggest-methods.rs create mode 100644 src/test/ui/suggestions/suggest-methods.stderr create mode 100644 src/test/ui/suggestions/try-on-option.rs create mode 100644 src/test/ui/suggestions/try-on-option.stderr create mode 100644 src/test/ui/type-check/issue-41314.rs create mode 100644 src/test/ui/type-check/issue-41314.stderr create mode 100644 src/tools/clippy/.editorconfig create mode 100644 src/tools/clippy/.github/ISSUE_TEMPLATE.md create mode 100755 src/tools/clippy/.github/deploy.sh create mode 100644 src/tools/clippy/.github/deploy_key.enc create mode 100644 src/tools/clippy/.remarkrc.json create mode 100644 src/tools/clippy/.travis.yml create mode 100644 src/tools/clippy/CHANGELOG.md create mode 100644 src/tools/clippy/CONTRIBUTING.md create mode 100644 src/tools/clippy/Cargo.toml create mode 100644 src/tools/clippy/LICENSE create mode 100644 src/tools/clippy/PUBLISH.md create mode 100644 src/tools/clippy/README.md create mode 100644 src/tools/clippy/appveyor.yml create mode 100644 src/tools/clippy/clippy_lints/Cargo.toml create mode 100644 src/tools/clippy/clippy_lints/README.md create mode 100644 src/tools/clippy/clippy_lints/src/approx_const.rs create mode 100644 src/tools/clippy/clippy_lints/src/arithmetic.rs create mode 100644 src/tools/clippy/clippy_lints/src/array_indexing.rs create mode 100644 src/tools/clippy/clippy_lints/src/assign_ops.rs create mode 100644 src/tools/clippy/clippy_lints/src/attrs.rs create mode 100644 src/tools/clippy/clippy_lints/src/bit_mask.rs create mode 100644 src/tools/clippy/clippy_lints/src/blacklisted_name.rs create mode 100644 src/tools/clippy/clippy_lints/src/block_in_if_condition.rs create mode 100644 src/tools/clippy/clippy_lints/src/booleans.rs create mode 100644 src/tools/clippy/clippy_lints/src/bytecount.rs create mode 100644 src/tools/clippy/clippy_lints/src/collapsible_if.rs create mode 100644 src/tools/clippy/clippy_lints/src/consts.rs create mode 100644 src/tools/clippy/clippy_lints/src/copies.rs create mode 100644 src/tools/clippy/clippy_lints/src/cyclomatic_complexity.rs create mode 100644 src/tools/clippy/clippy_lints/src/deprecated_lints.rs create mode 100644 src/tools/clippy/clippy_lints/src/derive.rs create mode 100644 src/tools/clippy/clippy_lints/src/doc.rs create mode 100644 src/tools/clippy/clippy_lints/src/double_parens.rs create mode 100644 src/tools/clippy/clippy_lints/src/drop_forget_ref.rs create mode 100644 src/tools/clippy/clippy_lints/src/empty_enum.rs create mode 100644 src/tools/clippy/clippy_lints/src/entry.rs create mode 100644 src/tools/clippy/clippy_lints/src/enum_clike.rs create mode 100644 src/tools/clippy/clippy_lints/src/enum_glob_use.rs create mode 100644 src/tools/clippy/clippy_lints/src/enum_variants.rs create mode 100644 src/tools/clippy/clippy_lints/src/eq_op.rs create mode 100644 src/tools/clippy/clippy_lints/src/escape.rs create mode 100644 src/tools/clippy/clippy_lints/src/eta_reduction.rs create mode 100644 src/tools/clippy/clippy_lints/src/eval_order_dependence.rs create mode 100644 src/tools/clippy/clippy_lints/src/format.rs create mode 100644 src/tools/clippy/clippy_lints/src/formatting.rs create mode 100644 src/tools/clippy/clippy_lints/src/functions.rs create mode 100644 src/tools/clippy/clippy_lints/src/identity_op.rs create mode 100644 src/tools/clippy/clippy_lints/src/if_let_redundant_pattern_matching.rs create mode 100644 src/tools/clippy/clippy_lints/src/if_not_else.rs create mode 100644 src/tools/clippy/clippy_lints/src/infinite_iter.rs create mode 100644 src/tools/clippy/clippy_lints/src/is_unit_expr.rs create mode 100644 src/tools/clippy/clippy_lints/src/items_after_statements.rs create mode 100644 src/tools/clippy/clippy_lints/src/large_enum_variant.rs create mode 100644 src/tools/clippy/clippy_lints/src/len_zero.rs create mode 100644 src/tools/clippy/clippy_lints/src/let_if_seq.rs create mode 100644 src/tools/clippy/clippy_lints/src/lib.rs create mode 100644 src/tools/clippy/clippy_lints/src/lifetimes.rs create mode 100644 src/tools/clippy/clippy_lints/src/literal_digit_grouping.rs create mode 100644 src/tools/clippy/clippy_lints/src/loops.rs create mode 100644 src/tools/clippy/clippy_lints/src/map_clone.rs create mode 100644 src/tools/clippy/clippy_lints/src/matches.rs create mode 100644 src/tools/clippy/clippy_lints/src/mem_forget.rs create mode 100644 src/tools/clippy/clippy_lints/src/methods.rs create mode 100644 src/tools/clippy/clippy_lints/src/minmax.rs create mode 100644 src/tools/clippy/clippy_lints/src/misc.rs create mode 100644 src/tools/clippy/clippy_lints/src/misc_early.rs create mode 100644 src/tools/clippy/clippy_lints/src/missing_doc.rs create mode 100644 src/tools/clippy/clippy_lints/src/mut_mut.rs create mode 100644 src/tools/clippy/clippy_lints/src/mut_reference.rs create mode 100644 src/tools/clippy/clippy_lints/src/mutex_atomic.rs create mode 100644 src/tools/clippy/clippy_lints/src/needless_bool.rs create mode 100644 src/tools/clippy/clippy_lints/src/needless_borrow.rs create mode 100644 src/tools/clippy/clippy_lints/src/needless_borrowed_ref.rs create mode 100644 src/tools/clippy/clippy_lints/src/needless_continue.rs create mode 100644 src/tools/clippy/clippy_lints/src/needless_pass_by_value.rs create mode 100644 src/tools/clippy/clippy_lints/src/needless_update.rs create mode 100644 src/tools/clippy/clippy_lints/src/neg_multiply.rs create mode 100644 src/tools/clippy/clippy_lints/src/new_without_default.rs create mode 100644 src/tools/clippy/clippy_lints/src/no_effect.rs create mode 100644 src/tools/clippy/clippy_lints/src/non_expressive_names.rs create mode 100644 src/tools/clippy/clippy_lints/src/ok_if_let.rs create mode 100644 src/tools/clippy/clippy_lints/src/open_options.rs create mode 100644 src/tools/clippy/clippy_lints/src/overflow_check_conditional.rs create mode 100644 src/tools/clippy/clippy_lints/src/panic.rs create mode 100644 src/tools/clippy/clippy_lints/src/partialeq_ne_impl.rs create mode 100644 src/tools/clippy/clippy_lints/src/precedence.rs create mode 100644 src/tools/clippy/clippy_lints/src/print.rs create mode 100644 src/tools/clippy/clippy_lints/src/ptr.rs create mode 100644 src/tools/clippy/clippy_lints/src/ranges.rs create mode 100644 src/tools/clippy/clippy_lints/src/reference.rs create mode 100644 src/tools/clippy/clippy_lints/src/regex.rs create mode 100644 src/tools/clippy/clippy_lints/src/returns.rs create mode 100644 src/tools/clippy/clippy_lints/src/serde_api.rs create mode 100644 src/tools/clippy/clippy_lints/src/shadow.rs create mode 100644 src/tools/clippy/clippy_lints/src/should_assert_eq.rs create mode 100644 src/tools/clippy/clippy_lints/src/strings.rs create mode 100644 src/tools/clippy/clippy_lints/src/swap.rs create mode 100644 src/tools/clippy/clippy_lints/src/temporary_assignment.rs create mode 100644 src/tools/clippy/clippy_lints/src/transmute.rs create mode 100644 src/tools/clippy/clippy_lints/src/types.rs create mode 100644 src/tools/clippy/clippy_lints/src/unicode.rs create mode 100644 src/tools/clippy/clippy_lints/src/unsafe_removed_from_name.rs create mode 100644 src/tools/clippy/clippy_lints/src/unused_io_amount.rs create mode 100644 src/tools/clippy/clippy_lints/src/unused_label.rs create mode 100644 src/tools/clippy/clippy_lints/src/use_self.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/author.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/comparisons.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/conf.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/constants.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/higher.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/hir_utils.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/inspector.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/internal_lints.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/mod.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/paths.rs create mode 100644 src/tools/clippy/clippy_lints/src/utils/sugg.rs create mode 100644 src/tools/clippy/clippy_lints/src/vec.rs create mode 100644 src/tools/clippy/clippy_lints/src/zero_div_zero.rs create mode 100644 src/tools/clippy/clippy_workspace_tests/Cargo.toml create mode 100644 src/tools/clippy/clippy_workspace_tests/src/main.rs create mode 100644 src/tools/clippy/clippy_workspace_tests/subcrate/Cargo.toml rename src/{vendor/error-chain-0.10.0/.cargo-ok => tools/clippy/clippy_workspace_tests/subcrate/src/lib.rs} (100%) create mode 100644 src/tools/clippy/mini-macro/Cargo.toml create mode 100644 src/tools/clippy/mini-macro/src/lib.rs create mode 100755 src/tools/clippy/pre_publish.sh create mode 100644 src/tools/clippy/publish.files create mode 100644 src/tools/clippy/rls.toml create mode 100644 src/tools/clippy/rustfmt.toml create mode 100644 src/tools/clippy/src/lib.rs create mode 100644 src/tools/clippy/src/main.rs create mode 100644 src/tools/clippy/tests/auxiliary/conf_french_blacklisted_name.toml create mode 100644 src/tools/clippy/tests/auxiliary/conf_unknown_key.toml create mode 100644 src/tools/clippy/tests/auxiliary/conf_whitelisted.toml create mode 100644 src/tools/clippy/tests/camel_case.rs create mode 100644 src/tools/clippy/tests/cc_seme.rs create mode 100644 src/tools/clippy/tests/compile-test.rs create mode 100644 src/tools/clippy/tests/dogfood.rs create mode 100644 src/tools/clippy/tests/ice_exacte_size.rs create mode 100644 src/tools/clippy/tests/issue-825.rs create mode 100644 src/tools/clippy/tests/matches.rs create mode 100644 src/tools/clippy/tests/needless_continue_helpers.rs create mode 100644 src/tools/clippy/tests/run-pass/associated-constant-ice.rs create mode 100644 src/tools/clippy/tests/run-pass/conf_whitelisted.rs create mode 100644 src/tools/clippy/tests/run-pass/enum-glob-import-crate.rs create mode 100644 src/tools/clippy/tests/run-pass/ice-1588.rs create mode 100644 src/tools/clippy/tests/run-pass/ice-1969.rs create mode 100644 src/tools/clippy/tests/run-pass/ice-700.rs create mode 100644 src/tools/clippy/tests/run-pass/mut_mut_macro.rs create mode 100644 src/tools/clippy/tests/run-pass/needless_lifetimes_impl_trait.rs create mode 100644 src/tools/clippy/tests/run-pass/procedural_macro.rs create mode 100644 src/tools/clippy/tests/run-pass/regressions.rs create mode 100644 src/tools/clippy/tests/run-pass/returns.rs create mode 100644 src/tools/clippy/tests/run-pass/single-match-else.rs create mode 100644 src/tools/clippy/tests/trim_multiline.rs create mode 100644 src/tools/clippy/tests/ui-posix/conf_non_existant.rs create mode 100644 src/tools/clippy/tests/ui-posix/conf_non_existant.stderr create mode 100755 src/tools/clippy/tests/ui-posix/update-all-references.sh create mode 100755 src/tools/clippy/tests/ui-posix/update-references.sh create mode 100644 src/tools/clippy/tests/ui-windows/conf_non_existant.rs create mode 100644 src/tools/clippy/tests/ui-windows/conf_non_existant.stderr create mode 100755 src/tools/clippy/tests/ui-windows/update-all-references.sh create mode 100755 src/tools/clippy/tests/ui-windows/update-references.sh create mode 100644 src/tools/clippy/tests/ui/absurd-extreme-comparisons.rs create mode 100644 src/tools/clippy/tests/ui/absurd-extreme-comparisons.stderr create mode 100644 src/tools/clippy/tests/ui/approx_const.rs create mode 100644 src/tools/clippy/tests/ui/approx_const.stderr create mode 100644 src/tools/clippy/tests/ui/arithmetic.rs create mode 100644 src/tools/clippy/tests/ui/arithmetic.stderr create mode 100644 src/tools/clippy/tests/ui/array_indexing.rs create mode 100644 src/tools/clippy/tests/ui/array_indexing.stderr create mode 100644 src/tools/clippy/tests/ui/assign_ops.rs create mode 100644 src/tools/clippy/tests/ui/assign_ops.stderr create mode 100644 src/tools/clippy/tests/ui/assign_ops2.rs create mode 100644 src/tools/clippy/tests/ui/assign_ops2.stderr create mode 100644 src/tools/clippy/tests/ui/attrs.rs create mode 100644 src/tools/clippy/tests/ui/attrs.stderr create mode 100644 src/tools/clippy/tests/ui/bit_masks.rs create mode 100644 src/tools/clippy/tests/ui/bit_masks.stderr create mode 100644 src/tools/clippy/tests/ui/blacklisted_name.rs create mode 100644 src/tools/clippy/tests/ui/blacklisted_name.stderr create mode 100644 src/tools/clippy/tests/ui/block_in_if_condition.rs create mode 100644 src/tools/clippy/tests/ui/block_in_if_condition.stderr create mode 100644 src/tools/clippy/tests/ui/bool_comparison.rs create mode 100644 src/tools/clippy/tests/ui/bool_comparison.stderr create mode 100644 src/tools/clippy/tests/ui/booleans.rs create mode 100644 src/tools/clippy/tests/ui/booleans.stderr create mode 100644 src/tools/clippy/tests/ui/borrow_box.rs create mode 100644 src/tools/clippy/tests/ui/borrow_box.stderr create mode 100644 src/tools/clippy/tests/ui/box_vec.rs create mode 100644 src/tools/clippy/tests/ui/box_vec.stderr create mode 100644 src/tools/clippy/tests/ui/builtin-type-shadow.rs create mode 100644 src/tools/clippy/tests/ui/builtin-type-shadow.stderr create mode 100644 src/tools/clippy/tests/ui/bytecount.rs create mode 100644 src/tools/clippy/tests/ui/bytecount.stderr create mode 100644 src/tools/clippy/tests/ui/cast.rs create mode 100644 src/tools/clippy/tests/ui/cast.stderr create mode 100644 src/tools/clippy/tests/ui/char_lit_as_u8.rs create mode 100644 src/tools/clippy/tests/ui/char_lit_as_u8.stderr create mode 100644 src/tools/clippy/tests/ui/cmp_nan.rs create mode 100644 src/tools/clippy/tests/ui/cmp_nan.stderr create mode 100644 src/tools/clippy/tests/ui/cmp_null.rs create mode 100644 src/tools/clippy/tests/ui/cmp_null.stderr create mode 100644 src/tools/clippy/tests/ui/cmp_owned.rs create mode 100644 src/tools/clippy/tests/ui/cmp_owned.stderr create mode 100644 src/tools/clippy/tests/ui/collapsible_if.rs create mode 100644 src/tools/clippy/tests/ui/collapsible_if.stderr create mode 100644 src/tools/clippy/tests/ui/complex_types.rs create mode 100644 src/tools/clippy/tests/ui/complex_types.stderr create mode 100644 src/tools/clippy/tests/ui/conf_bad_arg.rs create mode 100644 src/tools/clippy/tests/ui/conf_bad_arg.stderr create mode 100644 src/tools/clippy/tests/ui/conf_bad_toml.rs create mode 100644 src/tools/clippy/tests/ui/conf_bad_toml.stderr create mode 100644 src/tools/clippy/tests/ui/conf_bad_toml.toml create mode 100644 src/tools/clippy/tests/ui/conf_bad_type.rs create mode 100644 src/tools/clippy/tests/ui/conf_bad_type.stderr create mode 100644 src/tools/clippy/tests/ui/conf_bad_type.toml create mode 100644 src/tools/clippy/tests/ui/conf_french_blacklisted_name.rs create mode 100644 src/tools/clippy/tests/ui/conf_french_blacklisted_name.stderr create mode 100644 src/tools/clippy/tests/ui/conf_path_non_string.rs create mode 100644 src/tools/clippy/tests/ui/conf_path_non_string.stderr create mode 100644 src/tools/clippy/tests/ui/conf_unknown_key.rs create mode 100644 src/tools/clippy/tests/ui/conf_unknown_key.stderr create mode 100644 src/tools/clippy/tests/ui/copies.rs create mode 100644 src/tools/clippy/tests/ui/copies.stderr create mode 100644 src/tools/clippy/tests/ui/cyclomatic_complexity.rs create mode 100644 src/tools/clippy/tests/ui/cyclomatic_complexity.stderr create mode 100644 src/tools/clippy/tests/ui/cyclomatic_complexity_attr_used.rs create mode 100644 src/tools/clippy/tests/ui/cyclomatic_complexity_attr_used.stderr create mode 100644 src/tools/clippy/tests/ui/deprecated.rs create mode 100644 src/tools/clippy/tests/ui/deprecated.stderr create mode 100644 src/tools/clippy/tests/ui/derive.rs create mode 100644 src/tools/clippy/tests/ui/derive.stderr create mode 100644 src/tools/clippy/tests/ui/diverging_sub_expression.rs create mode 100644 src/tools/clippy/tests/ui/diverging_sub_expression.stderr create mode 100644 src/tools/clippy/tests/ui/dlist.rs create mode 100644 src/tools/clippy/tests/ui/dlist.stderr create mode 100644 src/tools/clippy/tests/ui/doc.rs create mode 100644 src/tools/clippy/tests/ui/doc.stderr create mode 100644 src/tools/clippy/tests/ui/double_neg.rs create mode 100644 src/tools/clippy/tests/ui/double_neg.stderr create mode 100644 src/tools/clippy/tests/ui/double_parens.rs create mode 100644 src/tools/clippy/tests/ui/double_parens.stderr create mode 100644 src/tools/clippy/tests/ui/drop_forget_copy.rs create mode 100644 src/tools/clippy/tests/ui/drop_forget_copy.stderr create mode 100644 src/tools/clippy/tests/ui/drop_forget_ref.rs create mode 100644 src/tools/clippy/tests/ui/drop_forget_ref.stderr create mode 100644 src/tools/clippy/tests/ui/duplicate_underscore_argument.rs create mode 100644 src/tools/clippy/tests/ui/duplicate_underscore_argument.stderr create mode 100644 src/tools/clippy/tests/ui/empty_enum.rs create mode 100644 src/tools/clippy/tests/ui/empty_enum.stderr create mode 100644 src/tools/clippy/tests/ui/entry.rs create mode 100644 src/tools/clippy/tests/ui/entry.stderr create mode 100644 src/tools/clippy/tests/ui/enum_glob_use.rs create mode 100644 src/tools/clippy/tests/ui/enum_glob_use.stderr create mode 100644 src/tools/clippy/tests/ui/enum_variants.rs create mode 100644 src/tools/clippy/tests/ui/enum_variants.stderr create mode 100644 src/tools/clippy/tests/ui/enums_clike.rs create mode 100644 src/tools/clippy/tests/ui/enums_clike.stderr create mode 100644 src/tools/clippy/tests/ui/eq_op.rs create mode 100644 src/tools/clippy/tests/ui/eq_op.stderr create mode 100644 src/tools/clippy/tests/ui/escape_analysis.rs rename src/{vendor/gcc/.cargo-ok => tools/clippy/tests/ui/escape_analysis.stderr} (100%) create mode 100644 src/tools/clippy/tests/ui/eta.rs create mode 100644 src/tools/clippy/tests/ui/eta.stderr create mode 100644 src/tools/clippy/tests/ui/eval_order_dependence.rs create mode 100644 src/tools/clippy/tests/ui/eval_order_dependence.stderr create mode 100644 src/tools/clippy/tests/ui/filter_methods.rs create mode 100644 src/tools/clippy/tests/ui/filter_methods.stderr create mode 100644 src/tools/clippy/tests/ui/float_cmp.rs create mode 100644 src/tools/clippy/tests/ui/float_cmp.stderr create mode 100644 src/tools/clippy/tests/ui/for_loop.rs create mode 100644 src/tools/clippy/tests/ui/for_loop.stderr create mode 100644 src/tools/clippy/tests/ui/format.rs create mode 100644 src/tools/clippy/tests/ui/format.stderr create mode 100644 src/tools/clippy/tests/ui/formatting.rs create mode 100644 src/tools/clippy/tests/ui/formatting.stderr create mode 100644 src/tools/clippy/tests/ui/functions.rs create mode 100644 src/tools/clippy/tests/ui/functions.stderr create mode 100644 src/tools/clippy/tests/ui/ices.rs create mode 100644 src/tools/clippy/tests/ui/ices.stderr create mode 100644 src/tools/clippy/tests/ui/identity_op.rs create mode 100644 src/tools/clippy/tests/ui/identity_op.stderr create mode 100644 src/tools/clippy/tests/ui/if_let_redundant_pattern_matching.rs create mode 100644 src/tools/clippy/tests/ui/if_let_redundant_pattern_matching.stderr create mode 100644 src/tools/clippy/tests/ui/if_not_else.rs create mode 100644 src/tools/clippy/tests/ui/if_not_else.stderr create mode 100644 src/tools/clippy/tests/ui/inconsistent_digit_grouping.rs create mode 100644 src/tools/clippy/tests/ui/inconsistent_digit_grouping.stderr create mode 100644 src/tools/clippy/tests/ui/infinite_iter.rs create mode 100644 src/tools/clippy/tests/ui/infinite_iter.stderr create mode 100644 src/tools/clippy/tests/ui/invalid_upcast_comparisons.rs create mode 100644 src/tools/clippy/tests/ui/invalid_upcast_comparisons.stderr create mode 100644 src/tools/clippy/tests/ui/is_unit_expr.rs create mode 100644 src/tools/clippy/tests/ui/is_unit_expr.stderr create mode 100644 src/tools/clippy/tests/ui/item_after_statement.rs create mode 100644 src/tools/clippy/tests/ui/item_after_statement.stderr create mode 100644 src/tools/clippy/tests/ui/large_digit_groups.rs create mode 100644 src/tools/clippy/tests/ui/large_digit_groups.stderr create mode 100644 src/tools/clippy/tests/ui/large_enum_variant.rs create mode 100644 src/tools/clippy/tests/ui/large_enum_variant.stderr create mode 100644 src/tools/clippy/tests/ui/len_zero.rs create mode 100644 src/tools/clippy/tests/ui/len_zero.stderr create mode 100644 src/tools/clippy/tests/ui/let_if_seq.rs create mode 100644 src/tools/clippy/tests/ui/let_if_seq.stderr create mode 100644 src/tools/clippy/tests/ui/let_return.rs create mode 100644 src/tools/clippy/tests/ui/let_return.stderr create mode 100644 src/tools/clippy/tests/ui/let_unit.rs create mode 100644 src/tools/clippy/tests/ui/let_unit.stderr create mode 100644 src/tools/clippy/tests/ui/lifetimes.rs create mode 100644 src/tools/clippy/tests/ui/lifetimes.stderr create mode 100644 src/tools/clippy/tests/ui/lint_pass.rs create mode 100644 src/tools/clippy/tests/ui/lint_pass.stderr create mode 100644 src/tools/clippy/tests/ui/literals.rs create mode 100644 src/tools/clippy/tests/ui/literals.stderr create mode 100644 src/tools/clippy/tests/ui/map_clone.rs create mode 100644 src/tools/clippy/tests/ui/map_clone.stderr create mode 100644 src/tools/clippy/tests/ui/matches.rs create mode 100644 src/tools/clippy/tests/ui/matches.stderr create mode 100644 src/tools/clippy/tests/ui/mem_forget.rs create mode 100644 src/tools/clippy/tests/ui/mem_forget.stderr create mode 100644 src/tools/clippy/tests/ui/methods.rs create mode 100644 src/tools/clippy/tests/ui/methods.stderr create mode 100644 src/tools/clippy/tests/ui/min_max.rs create mode 100644 src/tools/clippy/tests/ui/min_max.stderr create mode 100644 src/tools/clippy/tests/ui/missing-doc.rs create mode 100644 src/tools/clippy/tests/ui/missing-doc.stderr create mode 100644 src/tools/clippy/tests/ui/module_inception.rs create mode 100644 src/tools/clippy/tests/ui/module_inception.stderr create mode 100644 src/tools/clippy/tests/ui/modulo_one.rs create mode 100644 src/tools/clippy/tests/ui/modulo_one.stderr create mode 100644 src/tools/clippy/tests/ui/mut_from_ref.rs create mode 100644 src/tools/clippy/tests/ui/mut_from_ref.stderr create mode 100644 src/tools/clippy/tests/ui/mut_mut.rs create mode 100644 src/tools/clippy/tests/ui/mut_mut.stderr create mode 100644 src/tools/clippy/tests/ui/mut_reference.rs create mode 100644 src/tools/clippy/tests/ui/mut_reference.stderr create mode 100644 src/tools/clippy/tests/ui/mutex_atomic.rs create mode 100644 src/tools/clippy/tests/ui/mutex_atomic.stderr create mode 100644 src/tools/clippy/tests/ui/needless_bool.rs create mode 100644 src/tools/clippy/tests/ui/needless_bool.stderr create mode 100644 src/tools/clippy/tests/ui/needless_borrow.rs create mode 100644 src/tools/clippy/tests/ui/needless_borrow.stderr create mode 100644 src/tools/clippy/tests/ui/needless_borrowed_ref.rs create mode 100644 src/tools/clippy/tests/ui/needless_borrowed_ref.stderr create mode 100644 src/tools/clippy/tests/ui/needless_continue.rs create mode 100644 src/tools/clippy/tests/ui/needless_continue.stderr create mode 100644 src/tools/clippy/tests/ui/needless_pass_by_value.rs create mode 100644 src/tools/clippy/tests/ui/needless_pass_by_value.stderr create mode 100644 src/tools/clippy/tests/ui/needless_pass_by_value_proc_macro.rs create mode 100644 src/tools/clippy/tests/ui/needless_return.rs create mode 100644 src/tools/clippy/tests/ui/needless_return.stderr create mode 100644 src/tools/clippy/tests/ui/needless_update.rs create mode 100644 src/tools/clippy/tests/ui/needless_update.stderr create mode 100644 src/tools/clippy/tests/ui/neg_multiply.rs create mode 100644 src/tools/clippy/tests/ui/neg_multiply.stderr create mode 100644 src/tools/clippy/tests/ui/never_loop.rs create mode 100644 src/tools/clippy/tests/ui/never_loop.stderr create mode 100644 src/tools/clippy/tests/ui/new_without_default.rs create mode 100644 src/tools/clippy/tests/ui/new_without_default.stderr create mode 100644 src/tools/clippy/tests/ui/no_effect.rs create mode 100644 src/tools/clippy/tests/ui/no_effect.stderr create mode 100644 src/tools/clippy/tests/ui/non_expressive_names.rs create mode 100644 src/tools/clippy/tests/ui/non_expressive_names.stderr create mode 100644 src/tools/clippy/tests/ui/ok_if_let.rs create mode 100644 src/tools/clippy/tests/ui/ok_if_let.stderr create mode 100644 src/tools/clippy/tests/ui/op_ref.rs create mode 100644 src/tools/clippy/tests/ui/op_ref.stderr create mode 100644 src/tools/clippy/tests/ui/open_options.rs create mode 100644 src/tools/clippy/tests/ui/open_options.stderr create mode 100644 src/tools/clippy/tests/ui/overflow_check_conditional.rs create mode 100644 src/tools/clippy/tests/ui/overflow_check_conditional.stderr create mode 100644 src/tools/clippy/tests/ui/panic.rs create mode 100644 src/tools/clippy/tests/ui/panic.stderr create mode 100644 src/tools/clippy/tests/ui/partialeq_ne_impl.rs create mode 100644 src/tools/clippy/tests/ui/partialeq_ne_impl.stderr create mode 100644 src/tools/clippy/tests/ui/patterns.rs create mode 100644 src/tools/clippy/tests/ui/patterns.stderr create mode 100644 src/tools/clippy/tests/ui/precedence.rs create mode 100644 src/tools/clippy/tests/ui/precedence.stderr create mode 100644 src/tools/clippy/tests/ui/print.rs create mode 100644 src/tools/clippy/tests/ui/print.stderr create mode 100644 src/tools/clippy/tests/ui/print_with_newline.rs create mode 100644 src/tools/clippy/tests/ui/print_with_newline.stderr create mode 100644 src/tools/clippy/tests/ui/ptr_arg.rs create mode 100644 src/tools/clippy/tests/ui/ptr_arg.stderr create mode 100644 src/tools/clippy/tests/ui/range.rs create mode 100644 src/tools/clippy/tests/ui/range.stderr create mode 100644 src/tools/clippy/tests/ui/redundant_closure_call.rs create mode 100644 src/tools/clippy/tests/ui/redundant_closure_call.stderr create mode 100644 src/tools/clippy/tests/ui/reference.rs create mode 100644 src/tools/clippy/tests/ui/reference.stderr create mode 100644 src/tools/clippy/tests/ui/regex.rs create mode 100644 src/tools/clippy/tests/ui/regex.stderr create mode 100644 src/tools/clippy/tests/ui/serde.rs create mode 100644 src/tools/clippy/tests/ui/serde.stderr create mode 100644 src/tools/clippy/tests/ui/shadow.rs create mode 100644 src/tools/clippy/tests/ui/shadow.stderr create mode 100644 src/tools/clippy/tests/ui/short_circuit_statement.rs create mode 100644 src/tools/clippy/tests/ui/short_circuit_statement.stderr create mode 100644 src/tools/clippy/tests/ui/should_assert_eq.rs create mode 100644 src/tools/clippy/tests/ui/should_assert_eq.stderr create mode 100644 src/tools/clippy/tests/ui/strings.rs create mode 100644 src/tools/clippy/tests/ui/strings.stderr create mode 100644 src/tools/clippy/tests/ui/stutter.rs create mode 100644 src/tools/clippy/tests/ui/stutter.stderr create mode 100644 src/tools/clippy/tests/ui/swap.rs create mode 100644 src/tools/clippy/tests/ui/swap.stderr create mode 100644 src/tools/clippy/tests/ui/temporary_assignment.rs create mode 100644 src/tools/clippy/tests/ui/temporary_assignment.stderr create mode 100644 src/tools/clippy/tests/ui/toplevel_ref_arg.rs create mode 100644 src/tools/clippy/tests/ui/toplevel_ref_arg.stderr create mode 100644 src/tools/clippy/tests/ui/trailing_zeros.rs create mode 100644 src/tools/clippy/tests/ui/trailing_zeros.stderr create mode 100644 src/tools/clippy/tests/ui/trailing_zeros.stdout create mode 100644 src/tools/clippy/tests/ui/transmute.rs create mode 100644 src/tools/clippy/tests/ui/transmute.stderr create mode 100644 src/tools/clippy/tests/ui/transmute_32bit.rs create mode 100644 src/tools/clippy/tests/ui/transmute_64bit.rs create mode 100644 src/tools/clippy/tests/ui/transmute_64bit.stderr create mode 100644 src/tools/clippy/tests/ui/unicode.rs create mode 100644 src/tools/clippy/tests/ui/unicode.stderr create mode 100644 src/tools/clippy/tests/ui/unit_cmp.rs create mode 100644 src/tools/clippy/tests/ui/unit_cmp.stderr create mode 100644 src/tools/clippy/tests/ui/unneeded_field_pattern.rs create mode 100644 src/tools/clippy/tests/ui/unneeded_field_pattern.stderr create mode 100644 src/tools/clippy/tests/ui/unreadable_literal.rs create mode 100644 src/tools/clippy/tests/ui/unreadable_literal.stderr create mode 100644 src/tools/clippy/tests/ui/unsafe_removed_from_name.rs create mode 100644 src/tools/clippy/tests/ui/unsafe_removed_from_name.stderr create mode 100644 src/tools/clippy/tests/ui/unused_io_amount.rs create mode 100644 src/tools/clippy/tests/ui/unused_io_amount.stderr create mode 100644 src/tools/clippy/tests/ui/unused_labels.rs create mode 100644 src/tools/clippy/tests/ui/unused_labels.stderr create mode 100644 src/tools/clippy/tests/ui/unused_lt.rs create mode 100644 src/tools/clippy/tests/ui/unused_lt.stderr create mode 100755 src/tools/clippy/tests/ui/update-all-references.sh create mode 100755 src/tools/clippy/tests/ui/update-references.sh create mode 100644 src/tools/clippy/tests/ui/use_self.rs create mode 100644 src/tools/clippy/tests/ui/use_self.stderr create mode 100644 src/tools/clippy/tests/ui/used_underscore_binding.rs create mode 100644 src/tools/clippy/tests/ui/used_underscore_binding.stderr create mode 100644 src/tools/clippy/tests/ui/useless_attribute.rs create mode 100644 src/tools/clippy/tests/ui/useless_attribute.stderr create mode 100644 src/tools/clippy/tests/ui/vec.rs create mode 100644 src/tools/clippy/tests/ui/vec.stderr create mode 100644 src/tools/clippy/tests/ui/while_loop.rs create mode 100644 src/tools/clippy/tests/ui/while_loop.stderr create mode 100644 src/tools/clippy/tests/ui/wrong_self_convention.rs create mode 100644 src/tools/clippy/tests/ui/wrong_self_convention.stderr create mode 100644 src/tools/clippy/tests/ui/zero_div_zero.rs create mode 100644 src/tools/clippy/tests/ui/zero_div_zero.stderr create mode 100644 src/tools/clippy/tests/ui/zero_ptr.rs create mode 100644 src/tools/clippy/tests/ui/zero_ptr.stderr create mode 100644 src/tools/clippy/tests/used_underscore_binding_macro.rs create mode 100644 src/tools/clippy/tests/versioncheck.rs create mode 100755 src/tools/clippy/util/cov.sh create mode 100755 src/tools/clippy/util/dogfood.sh create mode 100755 src/tools/clippy/util/export.py create mode 100644 src/tools/clippy/util/gh-pages/index.html create mode 100644 src/tools/clippy/util/gh-pages/versions.html create mode 100644 src/tools/clippy/util/lintlib.py create mode 100755 src/tools/clippy/util/update_lints.py create mode 100644 src/tools/miri/.editorconfig create mode 100644 src/tools/miri/.travis.yml create mode 100644 src/tools/miri/Cargo.lock create mode 100644 src/tools/miri/Cargo.toml create mode 100644 src/tools/miri/LICENSE-APACHE rename src/{vendor/unicode-segmentation => tools/miri}/LICENSE-MIT (95%) create mode 100644 src/tools/miri/README.md create mode 100644 src/tools/miri/appveyor.yml create mode 100644 src/tools/miri/benches/fibonacci.rs create mode 100644 src/tools/miri/benches/helpers/fibonacci_helper.rs create mode 100644 src/tools/miri/benches/helpers/fibonacci_helper_iterative.rs create mode 100644 src/tools/miri/benches/helpers/miri_helper.rs create mode 100644 src/tools/miri/benches/helpers/mod.rs create mode 100644 src/tools/miri/benches/helpers/repeat.rs create mode 100644 src/tools/miri/benches/helpers/repeat_manual.rs create mode 100644 src/tools/miri/benches/helpers/smoke_helper.rs create mode 100644 src/tools/miri/benches/repeat.rs create mode 100644 src/tools/miri/benches/smoke.rs create mode 100644 src/tools/miri/build.rs create mode 100644 src/tools/miri/cargo-miri-test/Cargo.lock create mode 100644 src/tools/miri/cargo-miri-test/Cargo.toml create mode 100644 src/tools/miri/cargo-miri-test/src/main.rs create mode 100644 src/tools/miri/cargo-miri-test/tests/foo.rs create mode 100644 src/tools/miri/miri/bin/cargo-miri.rs create mode 100644 src/tools/miri/miri/bin/miri.rs create mode 100644 src/tools/miri/miri/fn_call.rs create mode 100644 src/tools/miri/miri/helpers.rs create mode 100644 src/tools/miri/miri/intrinsic.rs create mode 100644 src/tools/miri/miri/lib.rs create mode 100644 src/tools/miri/miri/memory.rs create mode 100644 src/tools/miri/miri/operator.rs create mode 100644 src/tools/miri/miri/tls.rs create mode 100644 src/tools/miri/rustc_tests/Cargo.lock create mode 100644 src/tools/miri/rustc_tests/Cargo.toml create mode 100644 src/tools/miri/rustc_tests/src/main.rs create mode 100644 src/tools/miri/src/librustc_mir/Cargo.toml create mode 100644 src/tools/miri/src/librustc_mir/interpret/cast.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/const_eval.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/error.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/eval_context.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/lvalue.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/machine.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/memory.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/mod.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/operator.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/range_map.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/step.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/terminator/drop.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/terminator/mod.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/traits.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/validation.rs create mode 100644 src/tools/miri/src/librustc_mir/interpret/value.rs create mode 100644 src/tools/miri/src/librustc_mir/lib.rs create mode 100644 src/tools/miri/tests/compile-fail-fullmir/undefined_byte_read.rs create mode 100644 src/tools/miri/tests/compile-fail/alignment.rs create mode 100644 src/tools/miri/tests/compile-fail/assume.rs create mode 100644 src/tools/miri/tests/compile-fail/bitop-beyond-alignment.rs create mode 100644 src/tools/miri/tests/compile-fail/cast_box_int_to_fn_ptr.rs create mode 100644 src/tools/miri/tests/compile-fail/cast_fn_ptr.rs create mode 100644 src/tools/miri/tests/compile-fail/cast_fn_ptr2.rs create mode 100644 src/tools/miri/tests/compile-fail/cast_fn_ptr_unsafe.rs create mode 100644 src/tools/miri/tests/compile-fail/cast_fn_ptr_unsafe2.rs create mode 100644 src/tools/miri/tests/compile-fail/cast_int_to_fn_ptr.rs create mode 100644 src/tools/miri/tests/compile-fail/copy_nonoverlapping.rs create mode 100644 src/tools/miri/tests/compile-fail/ctlz_nonzero.rs create mode 100644 src/tools/miri/tests/compile-fail/cttz_nonzero.rs create mode 100644 src/tools/miri/tests/compile-fail/dangling_pointer_deref.rs create mode 100644 src/tools/miri/tests/compile-fail/deallocate-bad-alignment.rs create mode 100644 src/tools/miri/tests/compile-fail/deallocate-bad-size.rs create mode 100644 src/tools/miri/tests/compile-fail/deallocate-twice.rs create mode 100644 src/tools/miri/tests/compile-fail/deref_fn_ptr.rs rename src/{libstd/os/nacl/mod.rs => tools/miri/tests/compile-fail/div-by-zero-2.rs} (81%) create mode 100644 src/tools/miri/tests/compile-fail/div-by-zero.rs create mode 100644 src/tools/miri/tests/compile-fail/execute_memory.rs create mode 100644 src/tools/miri/tests/compile-fail/fn_ptr_offset.rs create mode 100644 src/tools/miri/tests/compile-fail/invalid_bool.rs create mode 100644 src/tools/miri/tests/compile-fail/invalid_enum_discriminant.rs create mode 100644 src/tools/miri/tests/compile-fail/match_char.rs create mode 100644 src/tools/miri/tests/compile-fail/memleak.rs create mode 100644 src/tools/miri/tests/compile-fail/memleak_rc.rs create mode 100644 src/tools/miri/tests/compile-fail/modifying_constants.rs create mode 100644 src/tools/miri/tests/compile-fail/never_say_never.rs create mode 100644 src/tools/miri/tests/compile-fail/never_transmute_humans.rs create mode 100644 src/tools/miri/tests/compile-fail/never_transmute_void.rs create mode 100644 src/tools/miri/tests/compile-fail/null_pointer_deref.rs create mode 100644 src/tools/miri/tests/compile-fail/oom.rs create mode 100644 src/tools/miri/tests/compile-fail/oom2.rs create mode 100644 src/tools/miri/tests/compile-fail/out_of_bounds_ptr_1.rs create mode 100644 src/tools/miri/tests/compile-fail/out_of_bounds_ptr_2.rs create mode 100644 src/tools/miri/tests/compile-fail/out_of_bounds_read.rs create mode 100644 src/tools/miri/tests/compile-fail/out_of_bounds_read2.rs create mode 100644 src/tools/miri/tests/compile-fail/overflowing-lsh-neg.rs create mode 100644 src/tools/miri/tests/compile-fail/overflowing-rsh-2.rs create mode 100644 src/tools/miri/tests/compile-fail/overflowing-rsh.rs create mode 100644 src/tools/miri/tests/compile-fail/overflowing-unchecked-rsh.rs create mode 100644 src/tools/miri/tests/compile-fail/overwriting_part_of_relocation_makes_the_rest_undefined.rs create mode 100644 src/tools/miri/tests/compile-fail/panic.rs create mode 100644 src/tools/miri/tests/compile-fail/pointer_byte_read_1.rs create mode 100644 src/tools/miri/tests/compile-fail/pointer_byte_read_2.rs create mode 100644 src/tools/miri/tests/compile-fail/pointers_to_different_allocations_are_unorderable.rs create mode 100644 src/tools/miri/tests/compile-fail/ptr_bitops.rs create mode 100644 src/tools/miri/tests/compile-fail/ptr_int_cast.rs create mode 100644 src/tools/miri/tests/compile-fail/ptr_offset_overflow.rs create mode 100644 src/tools/miri/tests/compile-fail/reading_half_a_pointer.rs create mode 100644 src/tools/miri/tests/compile-fail/reallocate-bad-alignment-2.rs create mode 100644 src/tools/miri/tests/compile-fail/reallocate-bad-alignment.rs create mode 100644 src/tools/miri/tests/compile-fail/reallocate-bad-size.rs create mode 100644 src/tools/miri/tests/compile-fail/reallocate-change-alloc.rs create mode 100644 src/tools/miri/tests/compile-fail/reallocate-dangling.rs create mode 100644 src/tools/miri/tests/compile-fail/reference_to_packed.rs create mode 100644 src/tools/miri/tests/compile-fail/repeat.rs create mode 100644 src/tools/miri/tests/compile-fail/repeat2.rs create mode 100644 src/tools/miri/tests/compile-fail/stack_free.rs create mode 100644 src/tools/miri/tests/compile-fail/stack_limit.rs create mode 100644 src/tools/miri/tests/compile-fail/static_memory_modification.rs create mode 100644 src/tools/miri/tests/compile-fail/static_memory_modification2.rs create mode 100644 src/tools/miri/tests/compile-fail/static_memory_modification3.rs create mode 100644 src/tools/miri/tests/compile-fail/timeout.rs create mode 100644 src/tools/miri/tests/compile-fail/transmute-pair-undef.rs create mode 100644 src/tools/miri/tests/compile-fail/transmute_fat.rs create mode 100644 src/tools/miri/tests/compile-fail/transmute_fat2.rs create mode 100644 src/tools/miri/tests/compile-fail/unaligned_ptr_cast.rs create mode 100644 src/tools/miri/tests/compile-fail/unaligned_ptr_cast2.rs create mode 100644 src/tools/miri/tests/compile-fail/unaligned_ptr_cast_zst.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_aliasing_mut1.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_aliasing_mut2.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_aliasing_mut3.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_aliasing_mut4.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_buggy_as_mut_slice.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_buggy_split_at_mut.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_illegal_write.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_lock_confusion.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_pointer_smuggling.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_recover1.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_recover2.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_recover3.rs create mode 100644 src/tools/miri/tests/compile-fail/validation_undef.rs create mode 100644 src/tools/miri/tests/compile-fail/wild_pointer_deref.rs create mode 100644 src/tools/miri/tests/compile-fail/zst.rs create mode 100644 src/tools/miri/tests/compiletest.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/catch.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/catch.stdout create mode 100644 src/tools/miri/tests/run-pass-fullmir/foreign-fn-linkname.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/format.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/format.stdout create mode 100644 src/tools/miri/tests/run-pass-fullmir/from_utf8.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/hashmap.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/heap.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/hello.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/hello.stdout create mode 100644 src/tools/miri/tests/run-pass-fullmir/integer-ops.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/issue-15080.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/issue-3794.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/issue-3794.stdout create mode 100644 src/tools/miri/tests/run-pass-fullmir/loop-break-value.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/move-arg-2-unique.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/regions-mock-trans.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/u128.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/unsized-tuple-impls.rs create mode 100644 src/tools/miri/tests/run-pass-fullmir/vecs.rs create mode 100644 src/tools/miri/tests/run-pass/arrays.rs create mode 100644 src/tools/miri/tests/run-pass/associated-const.rs create mode 100644 src/tools/miri/tests/run-pass/assume_bug.rs create mode 100644 src/tools/miri/tests/run-pass/atomic-access-bool.rs create mode 100644 src/tools/miri/tests/run-pass/atomic-compare_exchange.rs create mode 100644 src/tools/miri/tests/run-pass/aux_test.rs create mode 100644 src/tools/miri/tests/run-pass/auxiliary/dep.rs create mode 100644 src/tools/miri/tests/run-pass/bad_substs.rs create mode 100644 src/tools/miri/tests/run-pass/binops.rs create mode 100644 src/tools/miri/tests/run-pass/bools.rs create mode 100644 src/tools/miri/tests/run-pass/box_box_trait.rs create mode 100644 src/tools/miri/tests/run-pass/btreemap.rs create mode 100644 src/tools/miri/tests/run-pass/c_enums.rs create mode 100644 src/tools/miri/tests/run-pass/call_drop_on_array_elements.rs create mode 100644 src/tools/miri/tests/run-pass/call_drop_on_fat_ptr_array_elements.rs create mode 100644 src/tools/miri/tests/run-pass/call_drop_on_zst_array_elements.rs create mode 100644 src/tools/miri/tests/run-pass/call_drop_through_owned_slice.rs create mode 100644 src/tools/miri/tests/run-pass/call_drop_through_trait_object.rs create mode 100644 src/tools/miri/tests/run-pass/call_drop_through_trait_object_rc.rs create mode 100644 src/tools/miri/tests/run-pass/calls.rs create mode 100644 src/tools/miri/tests/run-pass/cast-rfc0401-vtable-kinds.rs create mode 100644 src/tools/miri/tests/run-pass/cast_fn_ptr.rs create mode 100644 src/tools/miri/tests/run-pass/cast_fn_ptr_unsafe.rs create mode 100644 src/tools/miri/tests/run-pass/char.rs create mode 100644 src/tools/miri/tests/run-pass/closure-drop.rs create mode 100644 src/tools/miri/tests/run-pass/closure-field-ty.rs create mode 100644 src/tools/miri/tests/run-pass/closures.rs create mode 100644 src/tools/miri/tests/run-pass/const-vec-of-fns.rs create mode 100644 src/tools/miri/tests/run-pass/constants.rs create mode 100644 src/tools/miri/tests/run-pass/deriving-associated-types.rs create mode 100644 src/tools/miri/tests/run-pass/drop_empty_slice.rs create mode 100644 src/tools/miri/tests/run-pass/dst-field-align.rs create mode 100644 src/tools/miri/tests/run-pass/dst-irrefutable-bind.rs create mode 100644 src/tools/miri/tests/run-pass/dst-raw.rs create mode 100644 src/tools/miri/tests/run-pass/dst-struct-sole.rs create mode 100644 src/tools/miri/tests/run-pass/dst-struct.rs create mode 100644 src/tools/miri/tests/run-pass/enum-nullable-const-null-with-fields.rs create mode 100644 src/tools/miri/tests/run-pass/enums.rs create mode 100644 src/tools/miri/tests/run-pass/float_fast_math.rs create mode 100644 src/tools/miri/tests/run-pass/floats.rs create mode 100644 src/tools/miri/tests/run-pass/fn_item_as_closure_trait_object.rs create mode 100644 src/tools/miri/tests/run-pass/fn_item_with_args_as_closure_trait_object.rs create mode 100644 src/tools/miri/tests/run-pass/fn_item_with_multiple_args_as_closure_trait_object.rs create mode 100644 src/tools/miri/tests/run-pass/fn_ptr_as_closure_trait_object.rs create mode 100644 src/tools/miri/tests/run-pass/function_pointers.rs create mode 100644 src/tools/miri/tests/run-pass/generator_control_flow.rs create mode 100644 src/tools/miri/tests/run-pass/intrinsics-integer.rs create mode 100644 src/tools/miri/tests/run-pass/intrinsics-math.rs create mode 100755 src/tools/miri/tests/run-pass/intrinsics.rs create mode 100644 src/tools/miri/tests/run-pass/ints.rs create mode 100644 src/tools/miri/tests/run-pass/issue-15063.rs create mode 100644 src/tools/miri/tests/run-pass/issue-15523-big.rs create mode 100644 src/tools/miri/tests/run-pass/issue-17877.rs create mode 100644 src/tools/miri/tests/run-pass/issue-20575.rs create mode 100644 src/tools/miri/tests/run-pass/issue-23261.rs create mode 100644 src/tools/miri/tests/run-pass/issue-26709.rs create mode 100644 src/tools/miri/tests/run-pass/issue-27901.rs create mode 100644 src/tools/miri/tests/run-pass/issue-29746.rs create mode 100644 src/tools/miri/tests/run-pass/issue-30530.rs create mode 100644 src/tools/miri/tests/run-pass/issue-31267-additional.rs create mode 100644 src/tools/miri/tests/run-pass/issue-33387.rs create mode 100644 src/tools/miri/tests/run-pass/issue-34571.rs create mode 100644 src/tools/miri/tests/run-pass/issue-35815.rs create mode 100644 src/tools/miri/tests/run-pass/issue-36278-prefix-nesting.rs create mode 100644 src/tools/miri/tests/run-pass/issue-5917.rs create mode 100644 src/tools/miri/tests/run-pass/issue-miri-184.rs create mode 100644 src/tools/miri/tests/run-pass/iter_slice.rs create mode 100644 src/tools/miri/tests/run-pass/last-use-in-cap-clause.rs create mode 100644 src/tools/miri/tests/run-pass/loops.rs create mode 100644 src/tools/miri/tests/run-pass/main_fn.rs create mode 100644 src/tools/miri/tests/run-pass/many_shr_bor.rs create mode 100644 src/tools/miri/tests/run-pass/match_slice.rs create mode 100644 src/tools/miri/tests/run-pass/mir_coercions.rs create mode 100644 src/tools/miri/tests/run-pass/mir_fat_ptr.rs create mode 100644 src/tools/miri/tests/run-pass/miri-issue-133.rs create mode 100644 src/tools/miri/tests/run-pass/move-arg-3-unique.rs create mode 100644 src/tools/miri/tests/run-pass/move-undef-primval.rs create mode 100644 src/tools/miri/tests/run-pass/multi_arg_closure.rs create mode 100644 src/tools/miri/tests/run-pass/negative_discriminant.rs create mode 100644 src/tools/miri/tests/run-pass/non_capture_closure_to_fn_ptr.rs create mode 100644 src/tools/miri/tests/run-pass/observed_local_mut.rs create mode 100644 src/tools/miri/tests/run-pass/option_box_transmute_ptr.rs create mode 100644 src/tools/miri/tests/run-pass/option_eq.rs create mode 100644 src/tools/miri/tests/run-pass/overloaded-calls-simple.rs create mode 100644 src/tools/miri/tests/run-pass/packed_static.rs create mode 100644 src/tools/miri/tests/run-pass/packed_struct.rs create mode 100644 src/tools/miri/tests/run-pass/pointers.rs create mode 100644 src/tools/miri/tests/run-pass/products.rs create mode 100644 src/tools/miri/tests/run-pass/ptr_arith_offset.rs create mode 100644 src/tools/miri/tests/run-pass/ptr_arith_offset_overflow.rs create mode 100644 src/tools/miri/tests/run-pass/ptr_int_casts.rs create mode 100644 src/tools/miri/tests/run-pass/ptr_offset.rs create mode 100644 src/tools/miri/tests/run-pass/rc.rs create mode 100644 src/tools/miri/tests/run-pass/recursive_static.rs create mode 100644 src/tools/miri/tests/run-pass/ref-invalid-ptr.rs create mode 100644 src/tools/miri/tests/run-pass/regions-lifetime-nonfree-late-bound.rs create mode 100644 src/tools/miri/tests/run-pass/rfc1623.rs create mode 100644 src/tools/miri/tests/run-pass/rust-lang-org.rs create mode 100644 src/tools/miri/tests/run-pass/send-is-not-static-par-for.rs create mode 100644 src/tools/miri/tests/run-pass/sendable-class.rs create mode 100644 src/tools/miri/tests/run-pass/simd-intrinsic-generic-elements.rs create mode 100644 src/tools/miri/tests/run-pass/slice-of-zero-size-elements.rs create mode 100644 src/tools/miri/tests/run-pass/small_enum_size_bug.rs create mode 100644 src/tools/miri/tests/run-pass/specialization.rs create mode 100644 src/tools/miri/tests/run-pass/static_memory_modification.rs create mode 100644 src/tools/miri/tests/run-pass/static_mut.rs create mode 100644 src/tools/miri/tests/run-pass/std.rs create mode 100644 src/tools/miri/tests/run-pass/strings.rs create mode 100644 src/tools/miri/tests/run-pass/subslice_array.rs create mode 100644 src/tools/miri/tests/run-pass/sums.rs create mode 100644 src/tools/miri/tests/run-pass/tag-align-dyn-u64.rs create mode 100644 src/tools/miri/tests/run-pass/thread-local.rs create mode 100644 src/tools/miri/tests/run-pass/too-large-primval-write-problem.rs create mode 100644 src/tools/miri/tests/run-pass/traits.rs create mode 100644 src/tools/miri/tests/run-pass/trivial.rs create mode 100644 src/tools/miri/tests/run-pass/try-operator-custom.rs create mode 100644 src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor.rs create mode 100644 src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor_pointer_opt.rs create mode 100644 src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor_struct_pointer_opt.rs create mode 100644 src/tools/miri/tests/run-pass/tuple_like_struct_constructor.rs create mode 100644 src/tools/miri/tests/run-pass/union-overwrite.rs create mode 100644 src/tools/miri/tests/run-pass/union.rs create mode 100644 src/tools/miri/tests/run-pass/unique-send.rs create mode 100644 src/tools/miri/tests/run-pass/validation_lifetime_resolution.rs create mode 100644 src/tools/miri/tests/run-pass/vec-matching-fold.rs create mode 100644 src/tools/miri/tests/run-pass/write-bytes.rs create mode 100644 src/tools/miri/tests/run-pass/zero-sized-binary-heap-push.rs create mode 100644 src/tools/miri/tests/run-pass/zst.rs create mode 100644 src/tools/miri/tests/run-pass/zst2.rs create mode 100644 src/tools/miri/tests/run-pass/zst_box.rs create mode 100644 src/tools/miri/tests/run-pass/zst_variant_drop.rs create mode 100644 src/tools/miri/tex/final-presentation/latexmkrc create mode 100644 src/tools/miri/tex/final-presentation/rust-logo-512x512.png create mode 100644 src/tools/miri/tex/final-presentation/slides.tex create mode 100644 src/tools/miri/tex/report/latexmkrc create mode 100644 src/tools/miri/tex/report/miri-report.tex create mode 100644 src/tools/miri/xargo/Cargo.lock create mode 100644 src/tools/miri/xargo/Cargo.toml create mode 100644 src/tools/miri/xargo/Xargo.toml create mode 100755 src/tools/miri/xargo/build.sh rename src/{vendor/unicode-segmentation/.cargo-ok => tools/miri/xargo/src/lib.rs} (100%) create mode 100644 src/tools/toolstate.toml create mode 100644 src/vendor/backtrace/Cargo.toml.orig create mode 100644 src/vendor/backtrace/src/symbolize/gimli.rs create mode 100644 src/vendor/backtrace/tests/long_fn_name.rs create mode 100644 src/vendor/bitflags-0.7.0/.cargo-checksum.json create mode 100644 src/vendor/bitflags-0.7.0/.cargo-ok create mode 100644 src/vendor/bitflags-0.7.0/.gitignore create mode 100644 src/vendor/bitflags-0.7.0/.travis.yml create mode 100644 src/vendor/bitflags-0.7.0/Cargo.toml rename src/vendor/{gcc => bitflags-0.7.0}/LICENSE-APACHE (100%) create mode 100644 src/vendor/bitflags-0.7.0/LICENSE-MIT create mode 100644 src/vendor/bitflags-0.7.0/README.md create mode 100644 src/vendor/bitflags-0.7.0/src/lib.rs create mode 100644 src/vendor/bitflags-0.7.0/tests/external.rs create mode 100644 src/vendor/bitflags-0.7.0/tests/external_no_std.rs create mode 100644 src/vendor/bitflags-0.9.1/.cargo-checksum.json create mode 100644 src/vendor/bitflags-0.9.1/.cargo-ok create mode 100644 src/vendor/bitflags-0.9.1/.gitignore create mode 100644 src/vendor/bitflags-0.9.1/.travis.yml create mode 100644 src/vendor/bitflags-0.9.1/Cargo.toml create mode 100644 src/vendor/bitflags-0.9.1/Cargo.toml.orig rename src/vendor/{unicode-segmentation => bitflags-0.9.1}/LICENSE-APACHE (100%) create mode 100644 src/vendor/bitflags-0.9.1/LICENSE-MIT create mode 100644 src/vendor/bitflags-0.9.1/README.md create mode 100644 src/vendor/bitflags-0.9.1/src/example_generated.rs create mode 100644 src/vendor/bitflags-0.9.1/src/lib.rs create mode 100644 src/vendor/bitflags-0.9.1/tests/conflicting_trait_impls.rs create mode 100644 src/vendor/bitflags-0.9.1/tests/external.rs create mode 100644 src/vendor/bitflags-0.9.1/tests/external_no_std.rs create mode 100644 src/vendor/bitflags-0.9.1/tests/i128_bitflags.rs create mode 100644 src/vendor/cc/.cargo-checksum.json create mode 100644 src/vendor/cc/.cargo-ok rename src/vendor/{gcc => cc}/.gitignore (100%) rename src/vendor/{gcc => cc}/.travis.yml (98%) create mode 100644 src/vendor/cc/Cargo.toml rename src/vendor/{gcc/Cargo.toml => cc/Cargo.toml.orig} (69%) create mode 100644 src/vendor/cc/LICENSE-APACHE rename src/vendor/{gcc => cc}/LICENSE-MIT (100%) rename src/vendor/{gcc => cc}/README.md (72%) rename src/vendor/{gcc => cc}/appveyor.yml (56%) rename src/vendor/{gcc => cc}/src/bin/gcc-shim.rs (100%) rename src/vendor/{gcc => cc}/src/com.rs (100%) rename src/vendor/{gcc => cc}/src/lib.rs (61%) rename src/vendor/{gcc => cc}/src/registry.rs (100%) rename src/vendor/{gcc => cc}/src/setup_config.rs (100%) rename src/vendor/{gcc => cc}/src/winapi.rs (99%) rename src/vendor/{gcc => cc}/src/windows_registry.rs (99%) rename src/vendor/{gcc => cc}/tests/cc_env.rs (98%) rename src/vendor/{gcc => cc}/tests/support/mod.rs (95%) rename src/vendor/{gcc => cc}/tests/test.rs (73%) create mode 100644 src/vendor/clap/SPONSORS.md create mode 100644 src/vendor/cmake/Cargo.toml.orig create mode 100644 src/vendor/conv/.cargo-checksum.json create mode 100644 src/vendor/conv/.cargo-ok create mode 100644 src/vendor/conv/.gitignore create mode 100644 src/vendor/conv/.travis.yml create mode 100644 src/vendor/conv/Cargo.toml create mode 100644 src/vendor/conv/LICENSE create mode 100644 src/vendor/conv/README.md create mode 100644 src/vendor/conv/src/errors.rs create mode 100644 src/vendor/conv/src/impls.rs create mode 100644 src/vendor/conv/src/lib.rs create mode 100644 src/vendor/conv/src/macros.rs create mode 100644 src/vendor/conv/src/misc.rs create mode 100644 src/vendor/conv/tests/conv_utils.rs create mode 100644 src/vendor/conv/tests/derive_try_from.rs create mode 100644 src/vendor/conv/tests/lang_char.rs create mode 100644 src/vendor/conv/tests/lang_floats.rs create mode 100644 src/vendor/conv/tests/lang_ints.rs create mode 100644 src/vendor/conv/tests/unwraps.rs create mode 100644 src/vendor/conv/tests/use_in_generics.rs create mode 100644 src/vendor/conv/tests/util/mod.rs create mode 100644 src/vendor/cssparser-macros/.cargo-checksum.json create mode 100644 src/vendor/cssparser-macros/.cargo-ok create mode 100644 src/vendor/cssparser-macros/Cargo.toml create mode 100644 src/vendor/cssparser-macros/lib.rs create mode 100644 src/vendor/cssparser/.cargo-checksum.json create mode 100644 src/vendor/cssparser/.cargo-ok create mode 100644 src/vendor/cssparser/.gitignore create mode 100644 src/vendor/cssparser/.travis.yml create mode 100644 src/vendor/cssparser/Cargo.toml create mode 100644 src/vendor/cssparser/Cargo.toml.orig create mode 100644 src/vendor/cssparser/LICENSE create mode 100644 src/vendor/cssparser/README.md create mode 100644 src/vendor/cssparser/build.rs create mode 100644 src/vendor/cssparser/build/match_byte.rs create mode 100644 src/vendor/cssparser/docs/.nojekyll create mode 100644 src/vendor/cssparser/docs/404.html create mode 100644 src/vendor/cssparser/docs/index.html create mode 100644 src/vendor/cssparser/src/big-data-url.css create mode 100644 src/vendor/cssparser/src/color.rs create mode 100644 src/vendor/cssparser/src/css-parsing-tests/An+B.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/LICENSE create mode 100644 src/vendor/cssparser/src/css-parsing-tests/README.rst create mode 100644 src/vendor/cssparser/src/css-parsing-tests/color3.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/color3_hsl.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/color3_keywords.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/component_value_list.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/declaration_list.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/make_color3_hsl.py create mode 100644 src/vendor/cssparser/src/css-parsing-tests/make_color3_keywords.py create mode 100644 src/vendor/cssparser/src/css-parsing-tests/one_component_value.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/one_declaration.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/one_rule.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/rule_list.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/stylesheet.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/stylesheet_bytes.json create mode 100644 src/vendor/cssparser/src/css-parsing-tests/urange.json create mode 100644 src/vendor/cssparser/src/from_bytes.rs create mode 100644 src/vendor/cssparser/src/lib.rs create mode 100644 src/vendor/cssparser/src/macros.rs create mode 100644 src/vendor/cssparser/src/nth.rs create mode 100644 src/vendor/cssparser/src/parser.rs create mode 100644 src/vendor/cssparser/src/rules_and_declarations.rs create mode 100644 src/vendor/cssparser/src/serializer.rs create mode 100644 src/vendor/cssparser/src/tests.rs create mode 100644 src/vendor/cssparser/src/tokenizer.rs create mode 100644 src/vendor/cssparser/src/unicode_range.rs create mode 100644 src/vendor/custom_derive/.cargo-checksum.json create mode 100644 src/vendor/custom_derive/.cargo-ok create mode 100644 src/vendor/custom_derive/.gitignore create mode 100644 src/vendor/custom_derive/Cargo.toml create mode 100644 src/vendor/custom_derive/LICENSE create mode 100644 src/vendor/custom_derive/README.md create mode 100644 src/vendor/custom_derive/src/lib.rs create mode 100644 src/vendor/custom_derive/tests/empty_bi_derives.rs create mode 100644 src/vendor/custom_derive/tests/enum_iterator.rs create mode 100644 src/vendor/custom_derive/tests/enum_try_from.rs create mode 100644 src/vendor/custom_derive/tests/passthru_derive.rs create mode 100644 src/vendor/custom_derive/tests/stable_encodable.rs create mode 100644 src/vendor/custom_derive/tests/trailing_comma.rs create mode 100644 src/vendor/debug_unreachable/.cargo-checksum.json create mode 100644 src/vendor/debug_unreachable/.cargo-ok create mode 100644 src/vendor/debug_unreachable/.gitignore create mode 100644 src/vendor/debug_unreachable/.travis.yml create mode 100644 src/vendor/debug_unreachable/Cargo.toml create mode 100644 src/vendor/debug_unreachable/README.md create mode 100644 src/vendor/debug_unreachable/examples/simple.rs create mode 100644 src/vendor/debug_unreachable/src/lib.rs create mode 100644 src/vendor/debug_unreachable/tests/check.rs create mode 100644 src/vendor/dtoa/Cargo.toml.orig delete mode 100644 src/vendor/dtoa/performance.png delete mode 100644 src/vendor/error-chain-0.10.0/.cargo-checksum.json delete mode 100644 src/vendor/error-chain-0.10.0/CHANGELOG.md create mode 100644 src/vendor/error-chain/.cargo-checksum.json create mode 100644 src/vendor/error-chain/.cargo-ok rename src/vendor/{error-chain-0.10.0 => error-chain}/.gitignore (100%) rename src/vendor/{error-chain-0.10.0 => error-chain}/.travis.yml (97%) create mode 100644 src/vendor/error-chain/CHANGELOG.md create mode 100644 src/vendor/error-chain/Cargo.toml rename src/vendor/{error-chain-0.10.0/Cargo.toml => error-chain/Cargo.toml.orig} (71%) create mode 100644 src/vendor/error-chain/LICENSE-APACHE create mode 100644 src/vendor/error-chain/LICENSE-MIT rename src/vendor/{error-chain-0.10.0 => error-chain}/README.md (68%) rename src/vendor/{error-chain-0.10.0 => error-chain}/examples/all.rs (95%) create mode 100644 src/vendor/error-chain/examples/chain_err.rs rename src/vendor/{error-chain-0.10.0 => error-chain}/examples/doc.rs (94%) rename src/vendor/{error-chain-0.10.0 => error-chain}/examples/quickstart.rs (84%) rename src/vendor/{error-chain-0.10.0 => error-chain}/examples/size.rs (93%) create mode 100644 src/vendor/error-chain/src/bin/has_backtrace.rs rename src/vendor/{error-chain-0.10.0 => error-chain}/src/error_chain.rs (85%) rename src/vendor/{error-chain-0.10.0 => error-chain}/src/example_generated.rs (98%) rename src/vendor/{error-chain-0.10.0/src/quick_error.rs => error-chain/src/impl_error_chain_kind.rs} (83%) rename src/vendor/{error-chain-0.10.0 => error-chain}/src/lib.rs (57%) rename src/vendor/{error-chain-0.10.0 => error-chain}/src/quick_main.rs (88%) rename src/vendor/{error-chain-0.10.0 => error-chain}/tests/quick_main.rs (84%) rename src/vendor/{error-chain-0.10.0 => error-chain}/tests/tests.rs (76%) create mode 100644 src/vendor/filetime/Cargo.toml.orig create mode 100644 src/vendor/filetime/src/redox.rs create mode 100644 src/vendor/filetime/src/unix.rs create mode 100644 src/vendor/filetime/src/windows.rs create mode 100644 src/vendor/flate2/Cargo.toml.orig create mode 100644 src/vendor/flate2/examples/deflatedecoder-bufread.rs create mode 100644 src/vendor/flate2/examples/deflatedecoder-read.rs create mode 100644 src/vendor/flate2/examples/deflatedecoder-write.rs create mode 100644 src/vendor/flate2/examples/deflateencoder-bufread.rs create mode 100644 src/vendor/flate2/examples/deflateencoder-read.rs create mode 100644 src/vendor/flate2/examples/deflateencoder-write.rs create mode 100644 src/vendor/flate2/examples/flatereadext.rs create mode 100644 src/vendor/flate2/examples/gzbuilder.rs create mode 100644 src/vendor/flate2/examples/gzdecoder-bufread.rs create mode 100644 src/vendor/flate2/examples/gzdecoder-read.rs create mode 100644 src/vendor/flate2/examples/gzencoder-bufread.rs create mode 100644 src/vendor/flate2/examples/gzencoder-read.rs create mode 100644 src/vendor/flate2/examples/gzencoder-write.rs create mode 100644 src/vendor/flate2/examples/gzmultidecoder-bufread.rs create mode 100644 src/vendor/flate2/examples/gzmultidecoder-read.rs create mode 100644 src/vendor/flate2/examples/hello_world.txt create mode 100644 src/vendor/flate2/examples/zlibdecoder-bufread.rs create mode 100644 src/vendor/flate2/examples/zlibdecoder-read.rs create mode 100644 src/vendor/flate2/examples/zlibdecoder-write.rs create mode 100644 src/vendor/flate2/examples/zlibencoder-bufread.rs create mode 100644 src/vendor/flate2/examples/zlibencoder-read.rs create mode 100644 src/vendor/flate2/examples/zlibencoder-write.rs delete mode 100644 src/vendor/flate2/src/deflate.rs create mode 100644 src/vendor/flate2/src/deflate/bufread.rs create mode 100644 src/vendor/flate2/src/deflate/mod.rs create mode 100644 src/vendor/flate2/src/deflate/read.rs create mode 100644 src/vendor/flate2/src/deflate/write.rs delete mode 100644 src/vendor/flate2/src/gz.rs create mode 100644 src/vendor/flate2/src/gz/bufread.rs create mode 100644 src/vendor/flate2/src/gz/mod.rs create mode 100644 src/vendor/flate2/src/gz/read.rs create mode 100644 src/vendor/flate2/src/gz/write.rs delete mode 100644 src/vendor/flate2/src/zlib.rs create mode 100644 src/vendor/flate2/src/zlib/bufread.rs create mode 100644 src/vendor/flate2/src/zlib/mod.rs create mode 100644 src/vendor/flate2/src/zlib/read.rs create mode 100644 src/vendor/flate2/src/zlib/write.rs create mode 100644 src/vendor/flate2/tests/early-flush.rs create mode 100644 src/vendor/flate2/tests/zero-write.rs create mode 100644 src/vendor/fnv/.cargo-checksum.json create mode 100644 src/vendor/fnv/.cargo-ok create mode 100644 src/vendor/fnv/.gitignore create mode 100644 src/vendor/fnv/.travis.yml create mode 100644 src/vendor/fnv/Cargo.toml create mode 100644 src/vendor/fnv/README.md create mode 100644 src/vendor/fnv/lib.rs create mode 100644 src/vendor/futf/.cargo-checksum.json create mode 100644 src/vendor/futf/.cargo-ok create mode 100644 src/vendor/futf/.gitignore create mode 100644 src/vendor/futf/.travis.yml create mode 100644 src/vendor/futf/Cargo.toml create mode 100644 src/vendor/futf/LICENSE-APACHE create mode 100644 src/vendor/futf/LICENSE-MIT create mode 100644 src/vendor/futf/README.md create mode 100644 src/vendor/futf/src/lib.rs create mode 100644 src/vendor/futf/src/test.rs delete mode 100644 src/vendor/gcc/.cargo-checksum.json create mode 100644 src/vendor/getopts/Cargo.toml.orig create mode 100644 src/vendor/handlebars/Cargo.toml.orig delete mode 100644 src/vendor/handlebars/examples/partials_legacy/base0.hbs delete mode 100644 src/vendor/handlebars/examples/partials_legacy/base1.hbs delete mode 100644 src/vendor/handlebars/examples/partials_legacy/template2.hbs create mode 100644 src/vendor/handlebars/examples/quick.rs delete mode 100644 src/vendor/handlebars/src/helpers/helper_partial.rs create mode 100644 src/vendor/html-diff/.cargo-checksum.json create mode 100644 src/vendor/html-diff/.cargo-ok create mode 100644 src/vendor/html-diff/.travis.yml create mode 100644 src/vendor/html-diff/Cargo.toml create mode 100644 src/vendor/html-diff/Cargo.toml.orig create mode 100644 src/vendor/html-diff/LICENSE create mode 100644 src/vendor/html-diff/README.md create mode 100644 src/vendor/html-diff/src/lib.rs create mode 100644 src/vendor/html-diff/src/main.rs create mode 100644 src/vendor/html-diff/test_files/basic.html create mode 100644 src/vendor/html-diff/test_files/basic.stdout create mode 100644 src/vendor/html-diff/test_files/basic_compare.html create mode 100644 src/vendor/html-diff/tests/test_files.rs create mode 100644 src/vendor/html5ever/.cargo-checksum.json create mode 100644 src/vendor/html5ever/.cargo-ok create mode 100644 src/vendor/html5ever/Cargo.toml create mode 100644 src/vendor/html5ever/benches/tokenizer.rs create mode 100644 src/vendor/html5ever/build.rs create mode 100644 src/vendor/html5ever/data/bench/lipsum-zh.html create mode 100644 src/vendor/html5ever/data/bench/lipsum.html create mode 100644 src/vendor/html5ever/data/bench/medium-fragment.html create mode 100644 src/vendor/html5ever/data/bench/small-fragment.html create mode 100644 src/vendor/html5ever/data/bench/strong.html create mode 100644 src/vendor/html5ever/data/bench/tiny-fragment.html create mode 100644 src/vendor/html5ever/data/test/ignore create mode 100644 src/vendor/html5ever/examples/capi/tokenize.c create mode 100644 src/vendor/html5ever/examples/html2html.rs create mode 100644 src/vendor/html5ever/examples/noop-tokenize.rs create mode 100644 src/vendor/html5ever/examples/noop-tree-builder.rs create mode 100644 src/vendor/html5ever/examples/print-rcdom.rs create mode 100644 src/vendor/html5ever/examples/print-tree-actions.rs create mode 100644 src/vendor/html5ever/examples/tokenize.rs create mode 100644 src/vendor/html5ever/macros/match_token.rs create mode 100644 src/vendor/html5ever/src/driver.rs create mode 100644 src/vendor/html5ever/src/lib.rs create mode 100644 src/vendor/html5ever/src/macros.rs create mode 100644 src/vendor/html5ever/src/serialize/mod.rs create mode 100644 src/vendor/html5ever/src/tokenizer/char_ref/mod.rs create mode 100644 src/vendor/html5ever/src/tokenizer/interface.rs create mode 100644 src/vendor/html5ever/src/tokenizer/mod.rs create mode 100644 src/vendor/html5ever/src/tokenizer/states.rs create mode 100644 src/vendor/html5ever/src/tree_builder/actions.rs create mode 100644 src/vendor/html5ever/src/tree_builder/data.rs create mode 100644 src/vendor/html5ever/src/tree_builder/mod.rs create mode 100644 src/vendor/html5ever/src/tree_builder/rules.rs create mode 100644 src/vendor/html5ever/src/tree_builder/tag_sets.rs create mode 100644 src/vendor/html5ever/src/tree_builder/types.rs create mode 100644 src/vendor/html5ever/src/util/str.rs create mode 100644 src/vendor/html5ever/tests/foreach_html5lib_test/mod.rs create mode 100644 src/vendor/html5ever/tests/serializer.rs create mode 100644 src/vendor/html5ever/tests/tokenizer.rs create mode 100644 src/vendor/html5ever/tests/tree_builder.rs create mode 100644 src/vendor/itoa/Cargo.toml.orig delete mode 100644 src/vendor/itoa/performance.png create mode 100644 src/vendor/itoa/src/udiv128.rs create mode 100644 src/vendor/kuchiki/.cargo-checksum.json create mode 100644 src/vendor/kuchiki/.cargo-ok create mode 100644 src/vendor/kuchiki/.gitignore create mode 100644 src/vendor/kuchiki/.travis.yml create mode 100644 src/vendor/kuchiki/Cargo.toml create mode 100644 src/vendor/kuchiki/Cargo.toml.orig create mode 100644 src/vendor/kuchiki/README.md create mode 100644 src/vendor/kuchiki/docs/.nojekyll create mode 100644 src/vendor/kuchiki/docs/404.html create mode 100644 src/vendor/kuchiki/docs/index.html create mode 100644 src/vendor/kuchiki/examples/find_matches.rs create mode 100644 src/vendor/kuchiki/examples/stack-overflow.rs create mode 100644 src/vendor/kuchiki/src/attributes.rs create mode 100644 src/vendor/kuchiki/src/iter.rs create mode 100644 src/vendor/kuchiki/src/lib.rs create mode 100644 src/vendor/kuchiki/src/move_cell.rs create mode 100644 src/vendor/kuchiki/src/node_data_ref.rs create mode 100644 src/vendor/kuchiki/src/parser.rs create mode 100644 src/vendor/kuchiki/src/select.rs create mode 100644 src/vendor/kuchiki/src/serializer.rs create mode 100644 src/vendor/kuchiki/src/tests.rs create mode 100644 src/vendor/kuchiki/src/tree.rs create mode 100644 src/vendor/kuchiki/test_data/foo.html create mode 100644 src/vendor/libc/ci/docker/asmjs-unknown-emscripten/Dockerfile create mode 100644 src/vendor/libc/ci/docker/wasm32-unknown-emscripten/Dockerfile create mode 100755 src/vendor/libc/ci/docker/wasm32-unknown-emscripten/node-wrapper.sh create mode 100644 src/vendor/libc/ci/docker/x86_64-rumprun-netbsd/runtest.rs delete mode 100644 src/vendor/libc/ci/docker/x86_64-unknown-openbsd/Dockerfile create mode 100755 src/vendor/libc/ci/emscripten-entry.sh create mode 100644 src/vendor/libc/ci/emscripten.sh create mode 100644 src/vendor/libc/ci/runtest-android.rs create mode 100644 src/vendor/libc/src/unix/notbsd/emscripten.rs delete mode 100644 src/vendor/libc/src/unix/notbsd/linux/musl/b32/asmjs.rs create mode 100644 src/vendor/libc/src/unix/uclibc/x86_64/l4re.rs create mode 100644 src/vendor/mac/.cargo-checksum.json create mode 100644 src/vendor/mac/.cargo-ok create mode 100644 src/vendor/mac/.gitignore create mode 100644 src/vendor/mac/.travis.yml create mode 100644 src/vendor/mac/Cargo.toml create mode 100644 src/vendor/mac/README.md create mode 100644 src/vendor/mac/src/cfg.rs create mode 100644 src/vendor/mac/src/format.rs create mode 100644 src/vendor/mac/src/inspect.rs create mode 100644 src/vendor/mac/src/lib.rs create mode 100644 src/vendor/mac/src/matches.rs create mode 100644 src/vendor/mac/src/mem.rs create mode 100644 src/vendor/mac/src/syntax_ext.rs create mode 100644 src/vendor/mac/src/test.rs create mode 100644 src/vendor/magenta-sys/.cargo-checksum.json create mode 100644 src/vendor/magenta-sys/.cargo-ok create mode 100644 src/vendor/magenta-sys/BUILD.gn create mode 100644 src/vendor/magenta-sys/Cargo.toml create mode 100644 src/vendor/magenta-sys/examples/hello.rs create mode 100644 src/vendor/magenta-sys/src/definitions.rs create mode 100644 src/vendor/magenta-sys/src/lib.rs create mode 100644 src/vendor/magenta/.cargo-checksum.json create mode 100644 src/vendor/magenta/.cargo-ok create mode 100644 src/vendor/magenta/.gitignore create mode 100644 src/vendor/magenta/AUTHORS create mode 100644 src/vendor/magenta/BUILD.gn create mode 100644 src/vendor/magenta/CONTRIBUTING.md create mode 100644 src/vendor/magenta/Cargo.toml create mode 100644 src/vendor/magenta/GETTING_STARTED.md create mode 100644 src/vendor/magenta/LICENSE create mode 100644 src/vendor/magenta/PATENTS create mode 100644 src/vendor/magenta/README.md create mode 100644 src/vendor/magenta/examples/BUILD.gn create mode 100644 src/vendor/magenta/src/channel.rs create mode 100644 src/vendor/magenta/src/event.rs create mode 100644 src/vendor/magenta/src/eventpair.rs create mode 100644 src/vendor/magenta/src/fifo.rs create mode 100644 src/vendor/magenta/src/job.rs create mode 100644 src/vendor/magenta/src/lib.rs create mode 100644 src/vendor/magenta/src/port.rs create mode 100644 src/vendor/magenta/src/process.rs create mode 100644 src/vendor/magenta/src/socket.rs create mode 100644 src/vendor/magenta/src/thread.rs create mode 100644 src/vendor/magenta/src/timer.rs create mode 100644 src/vendor/magenta/src/vmo.rs create mode 100644 src/vendor/magenta/tools/BUILD.gn create mode 100644 src/vendor/magenta/tools/README.md create mode 100644 src/vendor/magenta/tools/clang_wrapper.cc create mode 100755 src/vendor/magenta/tools/gen_status.py create mode 100644 src/vendor/markup5ever/.cargo-checksum.json create mode 100644 src/vendor/markup5ever/.cargo-ok create mode 100644 src/vendor/markup5ever/Cargo.toml create mode 100644 src/vendor/markup5ever/Cargo.toml.orig create mode 100644 src/vendor/markup5ever/build.rs create mode 100644 src/vendor/markup5ever/data/entities.json create mode 100644 src/vendor/markup5ever/data/mod.rs create mode 100644 src/vendor/markup5ever/interface/mod.rs create mode 100644 src/vendor/markup5ever/interface/tree_builder.rs create mode 100644 src/vendor/markup5ever/lib.rs create mode 100644 src/vendor/markup5ever/local_names.txt create mode 100644 src/vendor/markup5ever/rcdom.rs create mode 100644 src/vendor/markup5ever/serialize.rs create mode 100644 src/vendor/markup5ever/util/buffer_queue.rs create mode 100644 src/vendor/markup5ever/util/smallcharset.rs create mode 100644 src/vendor/matches/.cargo-checksum.json create mode 100644 src/vendor/matches/.cargo-ok create mode 100644 src/vendor/matches/Cargo.toml create mode 100644 src/vendor/matches/LICENSE create mode 100644 src/vendor/matches/lib.rs create mode 100644 src/vendor/mdbook/Cargo.toml.orig rename src/vendor/mdbook/{ => ci}/deploy.sh (95%) delete mode 100644 src/vendor/mdbook/ci/install.sh delete mode 100644 src/vendor/mdbook/ci/script.sh create mode 100644 src/vendor/mdbook/src/bin/build.rs create mode 100644 src/vendor/mdbook/src/bin/init.rs create mode 100644 src/vendor/mdbook/src/bin/serve.rs create mode 100644 src/vendor/mdbook/src/bin/test.rs create mode 100644 src/vendor/mdbook/src/bin/watch.rs delete mode 100644 src/vendor/mdbook/src/book/bookconfig.rs delete mode 100644 src/vendor/mdbook/src/book/bookconfig_test.rs create mode 100644 src/vendor/mdbook/src/config/bookconfig.rs create mode 100644 src/vendor/mdbook/src/config/htmlconfig.rs create mode 100644 src/vendor/mdbook/src/config/jsonconfig.rs create mode 100644 src/vendor/mdbook/src/config/mod.rs create mode 100644 src/vendor/mdbook/src/config/playpenconfig.rs create mode 100644 src/vendor/mdbook/src/config/tomlconfig.rs create mode 100644 src/vendor/mdbook/src/preprocess/links.rs create mode 100644 src/vendor/mdbook/src/preprocess/mod.rs delete mode 100644 src/vendor/mdbook/src/renderer/html_handlebars/helpers/playpen.rs create mode 100644 src/vendor/mdbook/src/theme/ayu-highlight.css delete mode 100644 src/vendor/mdbook/src/theme/stylus/book.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/general.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/menu.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/nav-icons.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/page.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/print.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/sidebar.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/theme-popup.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/themes/base.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/themes/coal.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/themes/index.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/themes/light.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/themes/navy.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/themes/rust.styl delete mode 100644 src/vendor/mdbook/src/theme/stylus/variables.styl create mode 100644 src/vendor/mdbook/tests/config.rs create mode 100644 src/vendor/mdbook/tests/dummy/book/SUMMARY.md create mode 100644 src/vendor/mdbook/tests/dummy/book/conclusion.md create mode 100644 src/vendor/mdbook/tests/dummy/book/first/index.md create mode 100644 src/vendor/mdbook/tests/dummy/book/first/nested.md create mode 100644 src/vendor/mdbook/tests/dummy/book/intro.md create mode 100644 src/vendor/mdbook/tests/dummy/book/second.md create mode 100644 src/vendor/mdbook/tests/dummy/mod.rs create mode 100644 src/vendor/mdbook/tests/helpers/mod.rs create mode 100644 src/vendor/mdbook/tests/init.rs create mode 100644 src/vendor/mdbook/tests/jsonconfig.rs create mode 100644 src/vendor/mdbook/tests/rendered_output.rs create mode 100644 src/vendor/mdbook/tests/testing.rs create mode 100644 src/vendor/mdbook/tests/tomlconfig.rs create mode 100644 src/vendor/miniz-sys/Cargo.toml.orig create mode 100644 src/vendor/open/Cargo.toml.orig create mode 100644 src/vendor/phf/.cargo-checksum.json create mode 100644 src/vendor/phf/.cargo-ok create mode 100644 src/vendor/phf/Cargo.toml create mode 100644 src/vendor/phf/src/lib.rs create mode 100644 src/vendor/phf/src/map.rs create mode 100644 src/vendor/phf/src/ordered_map.rs create mode 100644 src/vendor/phf/src/ordered_set.rs create mode 100644 src/vendor/phf/src/set.rs create mode 100644 src/vendor/phf_codegen/.cargo-checksum.json create mode 100644 src/vendor/phf_codegen/.cargo-ok create mode 100644 src/vendor/phf_codegen/Cargo.toml create mode 100644 src/vendor/phf_codegen/src/lib.rs create mode 100644 src/vendor/phf_generator/.cargo-checksum.json create mode 100644 src/vendor/phf_generator/.cargo-ok create mode 100644 src/vendor/phf_generator/Cargo.toml create mode 100644 src/vendor/phf_generator/src/lib.rs create mode 100644 src/vendor/phf_shared/.cargo-checksum.json create mode 100644 src/vendor/phf_shared/.cargo-ok create mode 100644 src/vendor/phf_shared/Cargo.toml create mode 100644 src/vendor/phf_shared/src/lib.rs create mode 100644 src/vendor/pkg-config/.cargo-checksum.json create mode 100644 src/vendor/pkg-config/.cargo-ok create mode 100644 src/vendor/pkg-config/.gitignore create mode 100644 src/vendor/pkg-config/.travis.yml create mode 100644 src/vendor/pkg-config/Cargo.toml create mode 100644 src/vendor/pkg-config/LICENSE-APACHE create mode 100644 src/vendor/pkg-config/LICENSE-MIT create mode 100644 src/vendor/pkg-config/README.md create mode 100644 src/vendor/pkg-config/src/lib.rs create mode 100644 src/vendor/pkg-config/tests/foo.pc create mode 100644 src/vendor/pkg-config/tests/framework.pc create mode 100644 src/vendor/pkg-config/tests/test.rs create mode 100644 src/vendor/precomputed-hash/.cargo-checksum.json create mode 100644 src/vendor/precomputed-hash/.cargo-ok create mode 100644 src/vendor/precomputed-hash/.gitignore create mode 100644 src/vendor/precomputed-hash/Cargo.toml create mode 100644 src/vendor/precomputed-hash/LICENSE create mode 100644 src/vendor/precomputed-hash/src/lib.rs create mode 100644 src/vendor/procedural-masquerade/.cargo-checksum.json create mode 100644 src/vendor/procedural-masquerade/.cargo-ok create mode 100644 src/vendor/procedural-masquerade/Cargo.toml create mode 100644 src/vendor/procedural-masquerade/lib.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/.cargo-checksum.json create mode 100644 src/vendor/pulldown-cmark-0.0.14/.cargo-ok create mode 100644 src/vendor/pulldown-cmark-0.0.14/.gitignore create mode 100644 src/vendor/pulldown-cmark-0.0.14/CONTRIBUTING.md create mode 100644 src/vendor/pulldown-cmark-0.0.14/Cargo.toml create mode 100644 src/vendor/pulldown-cmark-0.0.14/LICENSE create mode 100644 src/vendor/pulldown-cmark-0.0.14/README.md create mode 100644 src/vendor/pulldown-cmark-0.0.14/build.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/specs/footnotes.txt create mode 100644 src/vendor/pulldown-cmark-0.0.14/specs/table.txt create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/entities.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/escape.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/html.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/lib.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/main.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/parse.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/passes.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/puncttable.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/scanners.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/src/utils.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/tests/footnotes.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/tests/html.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/tests/spec.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/tests/table.rs create mode 100644 src/vendor/pulldown-cmark-0.0.14/third_party/CommonMark/LICENSE create mode 100644 src/vendor/pulldown-cmark-0.0.14/third_party/CommonMark/README.google create mode 100644 src/vendor/pulldown-cmark-0.0.14/third_party/CommonMark/spec.txt create mode 100644 src/vendor/pulldown-cmark-0.0.14/tools/mk_entities.py create mode 100644 src/vendor/pulldown-cmark-0.0.14/tools/mk_puncttable.py create mode 100644 src/vendor/pulldown-cmark/Cargo.toml.orig create mode 100644 src/vendor/pulldown-cmark/tests/errors.rs create mode 100644 src/vendor/quick-error/Cargo.toml.orig create mode 100644 src/vendor/quick-error/bulk.yaml create mode 100644 src/vendor/rand/Cargo.toml.orig create mode 100644 src/vendor/redox_syscall/.cargo-checksum.json create mode 100644 src/vendor/redox_syscall/.cargo-ok create mode 100644 src/vendor/redox_syscall/.gitignore create mode 100644 src/vendor/redox_syscall/Cargo.toml create mode 100644 src/vendor/redox_syscall/Cargo.toml.orig create mode 100644 src/vendor/redox_syscall/LICENSE create mode 100644 src/vendor/redox_syscall/README.md create mode 100644 src/vendor/redox_syscall/src/arch/arm.rs create mode 100644 src/vendor/redox_syscall/src/arch/x86.rs create mode 100644 src/vendor/redox_syscall/src/arch/x86_64.rs create mode 100644 src/vendor/redox_syscall/src/call.rs create mode 100644 src/vendor/redox_syscall/src/data.rs create mode 100644 src/vendor/redox_syscall/src/error.rs create mode 100644 src/vendor/redox_syscall/src/flag.rs create mode 100644 src/vendor/redox_syscall/src/io/dma.rs create mode 100644 src/vendor/redox_syscall/src/io/io.rs create mode 100644 src/vendor/redox_syscall/src/io/mmio.rs create mode 100644 src/vendor/redox_syscall/src/io/mod.rs create mode 100644 src/vendor/redox_syscall/src/io/pio.rs create mode 100644 src/vendor/redox_syscall/src/lib.rs create mode 100644 src/vendor/redox_syscall/src/number.rs create mode 100644 src/vendor/redox_syscall/src/scheme.rs create mode 100644 src/vendor/selectors/.cargo-checksum.json create mode 100644 src/vendor/selectors/.cargo-ok create mode 100644 src/vendor/selectors/Cargo.toml create mode 100644 src/vendor/selectors/README.md create mode 100644 src/vendor/selectors/arcslice.rs create mode 100644 src/vendor/selectors/attr.rs create mode 100644 src/vendor/selectors/bloom.rs create mode 100644 src/vendor/selectors/build.rs create mode 100644 src/vendor/selectors/gecko_like_types.rs create mode 100644 src/vendor/selectors/lib.rs create mode 100644 src/vendor/selectors/matching.rs create mode 100644 src/vendor/selectors/parser.rs create mode 100644 src/vendor/selectors/size_of_tests.rs create mode 100644 src/vendor/selectors/tree.rs create mode 100644 src/vendor/selectors/visitor.rs create mode 100644 src/vendor/serde_derive_internals/Cargo.toml.orig create mode 100644 src/vendor/serde_json/Cargo.toml.orig create mode 100644 src/vendor/siphasher/.cargo-checksum.json create mode 100644 src/vendor/siphasher/.cargo-ok create mode 100644 src/vendor/siphasher/.gitignore create mode 100644 src/vendor/siphasher/.travis.yml create mode 100644 src/vendor/siphasher/COPYING create mode 100644 src/vendor/siphasher/Cargo.toml create mode 100644 src/vendor/siphasher/README.md create mode 100644 src/vendor/siphasher/src/lib.rs create mode 100644 src/vendor/siphasher/src/sip.rs create mode 100644 src/vendor/siphasher/src/sip128.rs create mode 100644 src/vendor/siphasher/src/tests.rs create mode 100644 src/vendor/siphasher/src/tests128.rs create mode 100644 src/vendor/smallvec/.cargo-checksum.json create mode 100644 src/vendor/smallvec/.cargo-ok create mode 100644 src/vendor/smallvec/.gitignore create mode 100644 src/vendor/smallvec/.travis.yml create mode 100644 src/vendor/smallvec/Cargo.toml create mode 100644 src/vendor/smallvec/README.md create mode 100644 src/vendor/smallvec/benches/bench.rs create mode 100644 src/vendor/smallvec/lib.rs create mode 100644 src/vendor/string_cache/.cargo-checksum.json create mode 100644 src/vendor/string_cache/.cargo-ok create mode 100644 src/vendor/string_cache/.gitignore create mode 100644 src/vendor/string_cache/.travis.yml create mode 100644 src/vendor/string_cache/Cargo.toml create mode 100644 src/vendor/string_cache/Cargo.toml.orig create mode 100644 src/vendor/string_cache/LICENSE-APACHE create mode 100644 src/vendor/string_cache/LICENSE-MIT create mode 100644 src/vendor/string_cache/README.md create mode 100644 src/vendor/string_cache/build.rs create mode 100644 src/vendor/string_cache/src/atom.rs create mode 100644 src/vendor/string_cache/src/bench.rs create mode 100644 src/vendor/string_cache/src/event.rs create mode 100644 src/vendor/string_cache/src/lib.rs create mode 100644 src/vendor/string_cache_codegen/.cargo-checksum.json create mode 100644 src/vendor/string_cache_codegen/.cargo-ok create mode 100644 src/vendor/string_cache_codegen/Cargo.toml create mode 100644 src/vendor/string_cache_codegen/lib.rs create mode 100644 src/vendor/string_cache_shared/.cargo-checksum.json create mode 100644 src/vendor/string_cache_shared/.cargo-ok create mode 100644 src/vendor/string_cache_shared/Cargo.toml create mode 100644 src/vendor/string_cache_shared/lib.rs create mode 100644 src/vendor/tempdir/.cargo-checksum.json create mode 100644 src/vendor/tempdir/.cargo-ok create mode 100644 src/vendor/tempdir/.gitignore create mode 100644 src/vendor/tempdir/.travis.yml create mode 100644 src/vendor/tempdir/Cargo.toml create mode 100644 src/vendor/tempdir/LICENSE-APACHE create mode 100644 src/vendor/tempdir/LICENSE-MIT create mode 100644 src/vendor/tempdir/README.md create mode 100644 src/vendor/tempdir/src/lib.rs create mode 100644 src/vendor/tempdir/tests/smoke.rs create mode 100644 src/vendor/tendril/.cargo-checksum.json create mode 100644 src/vendor/tendril/.cargo-ok create mode 100644 src/vendor/tendril/.gitignore create mode 100644 src/vendor/tendril/.travis.yml create mode 100644 src/vendor/tendril/Cargo.toml create mode 100644 src/vendor/tendril/Cargo.toml.orig create mode 100644 src/vendor/tendril/LICENSE-APACHE create mode 100644 src/vendor/tendril/LICENSE-MIT create mode 100644 src/vendor/tendril/README.md create mode 100644 src/vendor/tendril/examples/fuzz.rs create mode 100644 src/vendor/tendril/src/bench.rs create mode 100644 src/vendor/tendril/src/buf32.rs create mode 100644 src/vendor/tendril/src/fmt.rs create mode 100644 src/vendor/tendril/src/lib.rs create mode 100644 src/vendor/tendril/src/stream.rs create mode 100644 src/vendor/tendril/src/tendril.rs create mode 100644 src/vendor/tendril/src/utf8_decode.rs create mode 100644 src/vendor/tendril/src/util.rs delete mode 100644 src/vendor/unicode-segmentation/.cargo-checksum.json delete mode 100644 src/vendor/unicode-segmentation/.gitignore delete mode 100644 src/vendor/unicode-segmentation/.travis.yml delete mode 100644 src/vendor/unicode-segmentation/COPYRIGHT delete mode 100644 src/vendor/unicode-segmentation/Cargo.toml delete mode 100644 src/vendor/unicode-segmentation/README.md delete mode 100755 src/vendor/unicode-segmentation/scripts/unicode.py delete mode 100755 src/vendor/unicode-segmentation/scripts/unicode_gen_breaktests.py delete mode 100644 src/vendor/unicode-segmentation/src/grapheme.rs delete mode 100644 src/vendor/unicode-segmentation/src/lib.rs delete mode 100644 src/vendor/unicode-segmentation/src/tables.rs delete mode 100644 src/vendor/unicode-segmentation/src/test.rs delete mode 100644 src/vendor/unicode-segmentation/src/testdata.rs delete mode 100644 src/vendor/unicode-segmentation/src/word.rs create mode 100644 src/vendor/unreachable-0.1.1/.cargo-checksum.json create mode 100644 src/vendor/unreachable-0.1.1/.cargo-ok create mode 100644 src/vendor/unreachable-0.1.1/.gitignore create mode 100644 src/vendor/unreachable-0.1.1/.travis.yml create mode 100644 src/vendor/unreachable-0.1.1/Cargo.toml create mode 100644 src/vendor/unreachable-0.1.1/README.md create mode 100644 src/vendor/unreachable-0.1.1/src/lib.rs create mode 100644 src/vendor/utf-8/.cargo-checksum.json create mode 100644 src/vendor/utf-8/.cargo-ok create mode 100644 src/vendor/utf-8/.gitignore create mode 100644 src/vendor/utf-8/Cargo.toml create mode 100644 src/vendor/utf-8/README.md create mode 100644 src/vendor/utf-8/benches/from_utf8_lossy.rs create mode 100644 src/vendor/utf-8/src/lib.rs create mode 100644 src/vendor/utf-8/src/lossy.rs create mode 100644 src/vendor/utf-8/src/polyfill.rs create mode 100644 src/vendor/utf-8/tests/decoder.rs create mode 100644 src/vendor/utf-8/tests/shared/data.rs create mode 100644 src/vendor/utf-8/tests/shared/string_from_utf8_lossy.rs create mode 100644 src/vendor/utf-8/tests/str_from_utf8.rs create mode 100644 src/vendor/utf-8/tests/string_from_utf8_lossy.rs diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c424ca7ab0..a86742d7bd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,5 @@ # Contributing to Rust +[contributing-to-rust]: #contributing-to-rust Thank you for your interest in contributing to Rust! There are many ways to contribute, and we appreciate all of them. This document is a bit long, so here's @@ -18,11 +19,12 @@ hop on [#rust-internals][pound-rust-internals]. As a reminder, all contributors are expected to follow our [Code of Conduct][coc]. -[pound-rust-internals]: http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-internals +[pound-rust-internals]: https://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-internals [internals]: https://internals.rust-lang.org [coc]: https://www.rust-lang.org/conduct.html ## Feature Requests +[feature-requests]: #feature-requests To request a change to the way that the Rust language works, please open an issue in the [RFCs repository](https://github.com/rust-lang/rfcs/issues/new) @@ -30,6 +32,7 @@ rather than this one. New features and other significant language changes must go through the RFC process. ## Bug Reports +[bug-reports]: #bug-reports While bugs are unfortunate, they're a reality in software. We can't fix what we don't know about, so please report liberally. If you're not sure if something @@ -80,6 +83,7 @@ $ RUST_BACKTRACE=1 rustc ... ``` ## The Build System +[the-build-system]: #the-build-system Rust's build system allows you to bootstrap the compiler, run tests & benchmarks, generate documentation, install a fresh build of Rust, and more. @@ -94,6 +98,7 @@ system internals, try asking in [`#rust-internals`][pound-rust-internals]. [bootstrap]: https://github.com/rust-lang/rust/tree/master/src/bootstrap/ ### Configuration +[configuration]: #configuration Before you can start building the compiler you need to configure the build for your system. In most cases, that will just mean using the defaults provided @@ -125,6 +130,11 @@ file. If you still have a `config.mk` file in your directory - from `./configure` - you may need to delete it for `config.toml` to work. ### Building +[building]: #building + +Dependencies +- [build dependencies](README.md#building-from-source) +- `gdb` 6.2.0 minimum, 7.1 or later recommended for test builds The build system uses the `x.py` script to control the build process. This script is used to build, test, and document various parts of the compiler. You can @@ -194,6 +204,7 @@ Note: Previously `./configure` and `make` were used to build this project. They are still available, but `x.py` is the recommended build system. ### Useful commands +[useful-commands]: #useful-commands Some common invocations of `x.py` are: @@ -232,9 +243,38 @@ Some common invocations of `x.py` are: guidelines as of yet, but basic rules like 4 spaces for indentation and no more than 99 characters in a single line should be kept in mind when writing code. -- `rustup toolchain link build//` - Use the custom compiler build via [rustup](https://github.com/rust-lang-nursery/rustup.rs#working-with-custom-toolchains-and-local-builds). + +### Using your local build +[using-local-build]: #using-local-build + +If you use Rustup to manage your rust install, it has a feature called ["custom +toolchains"][toolchain-link] that you can use to access your newly-built compiler +without having to install it to your system or user PATH. If you've run `python +x.py build`, then you can add your custom rustc to a new toolchain like this: + +[toolchain-link]: https://github.com/rust-lang-nursery/rustup.rs#working-with-custom-toolchains-and-local-builds + +``` +rustup toolchain link build//stage2 +``` + +Where `` is the build triple for the host (the triple of your +computer, by default), and `` is the name for your custom toolchain. (If you +added `--stage 1` to your build command, the compiler will be in the `stage1` +folder instead.) You'll only need to do this once - it will automatically point +to the latest build you've done. + +Once this is set up, you can use your custom toolchain just like any other. For +example, if you've named your toolchain `local`, running `cargo +local build` will +compile a project with your custom rustc, setting `rustup override set local` will +override the toolchain for your current directory, and `cargo +local doc` will use +your custom rustc and rustdoc to generate docs. (If you do this with a `--stage 1` +build, you'll need to build rustdoc specially, since it's not normally built in +stage 1. `python x.py build --stage 1 src/libstd src/tools/rustdoc` will build +rustdoc and libstd, which will allow rustdoc to be run with that toolchain.) ## Pull Requests +[pull-requests]: #pull-requests Pull requests are the primary mechanism we use to change Rust. GitHub itself has some [great documentation][pull-requests] on using the Pull Request feature. @@ -298,7 +338,33 @@ Speaking of tests, Rust has a comprehensive test suite. More information about it can be found [here](https://github.com/rust-lang/rust-wiki-backup/blob/master/Note-testsuite.md). +### External Dependencies +[external-dependencies]: #external-dependencies + +Currently building Rust will also build the following external projects: + +* [clippy](https://github.com/rust-lang-nursery/rust-clippy) +* [miri](https://github.com/solson/miri) + +If your changes break one of these projects, you need to fix them by opening +a pull request against the broken project asking to put the fix on a branch. +Then you can disable the tool building via `src/tools/toolstate.toml`. +Once the branch containing your fix is likely to be merged, you can point +the affected submodule at this branch. + +Don't forget to also add your changes with + +``` +git add path/to/submodule +``` + +outside the submodule. + +It can also be more convenient during development to set `submodules = false` +in the `config.toml` to prevent `x.py` from resetting to the original branch. + ## Writing Documentation +[writing-documentation]: #writing-documentation Documentation improvements are very welcome. The source of `doc.rust-lang.org` is located in `src/doc` in the tree, and standard API documentation is generated @@ -329,6 +395,7 @@ reference to `doc/reference.html`. The CSS might be messed up, but you can verify that the HTML is right. ## Issue Triage +[issue-triage]: #issue-triage Sometimes, an issue will stay open, even though the bug has been fixed. And sometimes, the original bug may go stale because something has changed in the @@ -347,32 +414,56 @@ labels to triage issues: * Magenta, **B**-prefixed labels identify bugs which are **blockers**. +* Dark blue, **beta-** labels track changes which need to be backported into + the beta branches. + +* Light purple, **C**-prefixed labels represent the **category** of an issue. + * Green, **E**-prefixed labels explain the level of **experience** necessary to fix the issue. +* The dark blue **final-comment-period** label marks bugs that are using the + RFC signoff functionality of [rfcbot][rfcbot] and are currenty in the final + comment period. + * Red, **I**-prefixed labels indicate the **importance** of the issue. The [I-nominated][inom] label indicates that an issue has been nominated for prioritizing at the next triage meeting. +* The purple **metabug** label marks lists of bugs collected by other + categories. + +* Purple gray, **O**-prefixed labels are the **operating system** or platform + that this issue is specific to. + * Orange, **P**-prefixed labels indicate a bug's **priority**. These labels are only assigned during triage meetings, and replace the [I-nominated][inom] label. -* Blue, **T**-prefixed bugs denote which **team** the issue belongs to. +* The gray **proposed-final-comment-period** label marks bugs that are using + the RFC signoff functionality of [rfcbot][rfcbot] and are currently awaiting + signoff of all team members in order to enter the final comment period. -* Dark blue, **beta-** labels track changes which need to be backported into - the beta branches. +* Pink, **regression**-prefixed labels track regressions from stable to the + release channels. -* The purple **metabug** label marks lists of bugs collected by other - categories. +* The light orange **relnotes** label marks issues that should be documented in + the release notes of the next release. + +* Gray, **S**-prefixed labels are used for tracking the **status** of pull + requests. + +* Blue, **T**-prefixed bugs denote which **team** the issue belongs to. If you're looking for somewhere to start, check out the [E-easy][eeasy] tag. [inom]: https://github.com/rust-lang/rust/issues?q=is%3Aopen+is%3Aissue+label%3AI-nominated [eeasy]: https://github.com/rust-lang/rust/issues?q=is%3Aopen+is%3Aissue+label%3AE-easy [lru]: https://github.com/rust-lang/rust/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-asc +[rfcbot]: https://github.com/dikaiosune/rust-dashboard/blob/master/RFCBOT.md ## Out-of-tree Contributions +[out-of-tree-contributions]: #out-of-tree-contributions There are a number of other ways to contribute to Rust that don't deal with this repository. @@ -392,11 +483,13 @@ valuable! [community-library]: https://github.com/rust-lang/rfcs/labels/A-community-library ## Helpful Links and Information +[helpful-info]: #helpful-info For people new to Rust, and just starting to contribute, or even for more seasoned developers, some useful places to look for information are: +* [Rust Forge][rustforge] contains additional documentation, including write-ups of how to achieve common tasks * The [Rust Internals forum][rif], a place to ask questions and discuss Rust's internals * The [generated documentation for rust's compiler][gdfrustc] @@ -412,6 +505,7 @@ are: [gsearchdocs]: https://www.google.com/search?q=site:doc.rust-lang.org+your+query+here [rif]: http://internals.rust-lang.org [rr]: https://doc.rust-lang.org/book/README.html +[rustforge]: https://forge.rust-lang.org/ [tlgba]: http://tomlee.co/2014/04/a-more-detailed-tour-of-the-rust-compiler/ [ro]: http://www.rustaceans.org/ [rctd]: ./src/test/COMPILER_TESTS.md diff --git a/README.md b/README.md index 78a9f509bb..95d543b8bb 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ standard library, and documentation. [Rust]: https://www.rust-lang.org ## Quick Start +[quick-start]: #quick-start Read ["Installation"] from [The Book]. @@ -13,6 +14,7 @@ Read ["Installation"] from [The Book]. [The Book]: https://doc.rust-lang.org/book/index.html ## Building from Source +[building-from-source]: #building-from-source 1. Make sure you have installed the dependencies: @@ -52,6 +54,7 @@ Read ["Installation"] from [The Book]. [Cargo]: https://github.com/rust-lang/cargo ### Building on Windows +[building-on-windows]: #building-on-windows There are two prominent ABIs in use on Windows: the native (MSVC) ABI used by Visual Studio, and the GNU ABI used by the GCC toolchain. Which version of Rust @@ -61,6 +64,7 @@ for interop with GNU software built using the MinGW/MSYS2 toolchain use the GNU build. #### MinGW +[windows-mingw]: #windows-mingw [MSYS2][msys2] can be used to easily build Rust on Windows: @@ -101,6 +105,7 @@ build. ``` #### MSVC +[windows-msvc]: #windows-msvc MSVC builds of Rust additionally require an installation of Visual Studio 2013 (or later) so `rustc` can use its linker. Make sure to check the “C++ tools” @@ -124,6 +129,7 @@ python x.py build ``` #### Specifying an ABI +[specifying-an-abi]: #specifying-an-abi Each specific ABI can also be used from either environment (for example, using the GNU ABI in powershell) by using an explicit build triple. The available @@ -141,6 +147,7 @@ in Building From Source), and modifying the `build` option under the `[build]` section. ### Configure and Make +[configure-and-make]: #configure-and-make While it's not the recommended build system, this project also provides a configure script and makefile (the latter of which just invokes `x.py`). @@ -155,6 +162,7 @@ When using the configure script, the generated `config.mk` file may override the `config.mk` file. ## Building Documentation +[building-documentation]: #building-documentation If you’d like to build the documentation, it’s almost the same: @@ -167,6 +175,7 @@ the ABI used. I.e., if the ABI was `x86_64-pc-windows-msvc`, the directory will `build\x86_64-pc-windows-msvc\doc`. ## Notes +[notes]: #notes Since the Rust compiler is written in Rust, it must be built by a precompiled "snapshot" version of itself (made in an earlier state of @@ -192,6 +201,7 @@ There is more advice about hacking on Rust in [CONTRIBUTING.md]. [CONTRIBUTING.md]: https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.md ## Getting Help +[getting-help]: #getting-help The Rust community congregates in a few places: @@ -204,6 +214,7 @@ The Rust community congregates in a few places: [users.rust-lang.org]: https://users.rust-lang.org/ ## Contributing +[contributing]: #contributing To contribute to Rust, please see [CONTRIBUTING](CONTRIBUTING.md). @@ -217,6 +228,7 @@ Rust. And a good place to ask for help would be [#rust-beginners]. [#rust-beginners]: irc://irc.mozilla.org/rust-beginners ## License +[license]: #license Rust is primarily distributed under the terms of both the MIT license and the Apache License (Version 2.0), with portions covered by various diff --git a/RELEASES.md b/RELEASES.md index c3a7367a2e..e65934a89e 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,9 +1,107 @@ +Version 1.21.0 (2017-10-12) +========================== + +Language +-------- +- [Relaxed path syntax. You can now add type parameters to values][43540] + Example: + ```rust + my_macro!(Vec::new); // Always worked + my_macro!(Vec::::new); // Now works + ``` +- [You can now use static references for literals.][43838] + Example: + ```rust + fn main() { + let x: &'static u32 = &0; + } + ``` + +Compiler +-------- +- [Upgraded jemalloc to 4.5.0][43911] +- [Enabled unwinding panics on Redox][43917] +- [Now runs LLVM in parallel during translation phase.][43506] + This should reduce peak memory usage. + +Libraries +--------- +- [Generate builtin impls for `Clone` for all arrays and tuples that + are `T: Clone`][43690] +- [`Stdin`, `Stdout`, and `Stderr` now implement `AsRawFd`.][43459] +- [`Rc` and `Arc` now implement `From<&[T]> where T: Clone`, `From`, + `From`, `From> where T: ?Sized`, and `From>`.][42565] + +Stabilized APIs +--------------- + +[`std::mem::discriminant`] + +Cargo +----- +- [You can now call `cargo install` with multiple package names][cargo/4216] +- [Cargo commands inside a virtual workspace will now implicitly + pass `--all`][cargo/4335] +- [Added a `[patch]` section to `Cargo.toml` to handle + prepublication dependencies][cargo/4123] [RFC 1969] +- [`include` & `exclude` fields in `Cargo.toml` now accept gitignore + like patterns][cargo/4270] +- [Added the `--all-targets` option][cargo/4400] +- [Using required dependencies as a feature is now deprecated and emits + a warning][cargo/4364] + + +Misc +---- +- [Cargo docs are moving][43916] + to [doc.rust-lang.org/cargo](https://doc.rust-lang.org/cargo) +- [The rustdoc book is now available][43863] + at [doc.rust-lang.org/rustdoc](https://doc.rust-lang.org/rustdoc) +- [Added a preview of RLS has been made available through rustup][44204] + Install with `rustup component add rls-preview` +- [`std::os` documentation for Unix, Linux, and Windows now appears on doc.rust-lang.org][43348] + Previously only showed `std::os::unix`. + +Compatibility Notes +------------------- +- [Changes in method matching against higher-ranked types][43880] This may cause + breakage in subtyping corner cases. [A more in-depth explanation is available.][info/43880] +- [rustc's JSON error output's byte position start at top of file.][42973] + Was previously relative to the rustc's internal `CodeMap` struct which + required the unstable library `libsyntax` to correctly use. +- [`unused_results` lint no longer ignores booleans][43728] + +[42565]: https://github.com/rust-lang/rust/pull/42565 +[42973]: https://github.com/rust-lang/rust/pull/42973 +[43348]: https://github.com/rust-lang/rust/pull/43348 +[43459]: https://github.com/rust-lang/rust/pull/43459 +[43506]: https://github.com/rust-lang/rust/pull/43506 +[43540]: https://github.com/rust-lang/rust/pull/43540 +[43690]: https://github.com/rust-lang/rust/pull/43690 +[43728]: https://github.com/rust-lang/rust/pull/43728 +[43838]: https://github.com/rust-lang/rust/pull/43838 +[43863]: https://github.com/rust-lang/rust/pull/43863 +[43880]: https://github.com/rust-lang/rust/pull/43880 +[43911]: https://github.com/rust-lang/rust/pull/43911 +[43916]: https://github.com/rust-lang/rust/pull/43916 +[43917]: https://github.com/rust-lang/rust/pull/43917 +[44204]: https://github.com/rust-lang/rust/pull/44204 +[cargo/4123]: https://github.com/rust-lang/cargo/pull/4123 +[cargo/4216]: https://github.com/rust-lang/cargo/pull/4216 +[cargo/4270]: https://github.com/rust-lang/cargo/pull/4270 +[cargo/4335]: https://github.com/rust-lang/cargo/pull/4335 +[cargo/4364]: https://github.com/rust-lang/cargo/pull/4364 +[cargo/4400]: https://github.com/rust-lang/cargo/pull/4400 +[RFC 1969]: https://github.com/rust-lang/rfcs/pull/1969 +[info/43880]: https://github.com/rust-lang/rust/issues/44224#issuecomment-330058902 +[`std::mem::discriminant`]: https://doc.rust-lang.org/std/mem/fn.discriminant.html + Version 1.20.0 (2017-08-31) =========================== Language -------- -- [Associated constants in traits is now stabilised.][42809] +- [Associated constants are now stabilised.][42809] - [A lot of macro bugs are now fixed.][42913] Compiler @@ -77,7 +175,7 @@ Stabilized APIs - [`slice::sort_unstable_by_key`] - [`slice::sort_unstable_by`] - [`slice::sort_unstable`] -- [`ste::from_boxed_utf8_unchecked`] +- [`str::from_boxed_utf8_unchecked`] - [`str::as_bytes_mut`] - [`str::as_bytes_mut`] - [`str::from_utf8_mut`] @@ -110,7 +208,7 @@ Compatibility Notes - [Functions with `'static` in their return types will now not be as usable as if they were using lifetime parameters instead.][42417] - [The reimplementation of `{f32, f64}::is_sign_{negative, positive}` now - takes the sign of NaN into account where previously didn't.][42430] + takes the sign of NaN into account where previously didn't.][42430] [42033]: https://github.com/rust-lang/rust/pull/42033 [42155]: https://github.com/rust-lang/rust/pull/42155 @@ -170,7 +268,7 @@ Compatibility Notes [`slice::sort_unstable_by_key`]: https://doc.rust-lang.org/std/primitive.slice.html#method.sort_unstable_by_key [`slice::sort_unstable_by`]: https://doc.rust-lang.org/std/primitive.slice.html#method.sort_unstable_by [`slice::sort_unstable`]: https://doc.rust-lang.org/std/primitive.slice.html#method.sort_unstable -[`ste::from_boxed_utf8_unchecked`]: https://doc.rust-lang.org/std/str/fn.from_boxed_utf8_unchecked.html +[`str::from_boxed_utf8_unchecked`]: https://doc.rust-lang.org/std/str/fn.from_boxed_utf8_unchecked.html [`str::as_bytes_mut`]: https://doc.rust-lang.org/std/primitive.str.html#method.as_bytes_mut [`str::from_utf8_mut`]: https://doc.rust-lang.org/std/str/fn.from_utf8_mut.html [`str::from_utf8_unchecked_mut`]: https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked_mut.html diff --git a/config.toml.example b/config.toml.example new file mode 100644 index 0000000000..a3790c8d20 --- /dev/null +++ b/config.toml.example @@ -0,0 +1,379 @@ +# Sample TOML configuration file for building Rust. +# +# To configure rustbuild, copy this file to the directory from which you will be +# running the build, and name it config.toml. +# +# All options are commented out by default in this file, and they're commented +# out with their default values. The build system by default looks for +# `config.toml` in the current directory of a build for build configuration, but +# a custom configuration file can also be specified with `--config` to the build +# system. + +# ============================================================================= +# Tweaking how LLVM is compiled +# ============================================================================= +[llvm] + +# Indicates whether rustc will support compilation with LLVM +# note: rustc does not compile without LLVM at the moment +#enabled = true + +# Indicates whether the LLVM build is a Release or Debug build +#optimize = true + +# Indicates whether an LLVM Release build should include debug info +#release-debuginfo = false + +# Indicates whether the LLVM assertions are enabled or not +#assertions = false + +# Indicates whether ccache is used when building LLVM +#ccache = false +# or alternatively ... +#ccache = "/path/to/ccache" + +# If an external LLVM root is specified, we automatically check the version by +# default to make sure it's within the range that we're expecting, but setting +# this flag will indicate that this version check should not be done. +#version-check = false + +# Link libstdc++ statically into the librustc_llvm instead of relying on a +# dynamic version to be available. +#static-libstdcpp = false + +# Tell the LLVM build system to use Ninja instead of the platform default for +# the generated build system. This can sometimes be faster than make, for +# example. +#ninja = false + +# LLVM targets to build support for. +# Note: this is NOT related to Rust compilation targets. However, as Rust is +# dependent on LLVM for code generation, turning targets off here WILL lead to +# the resulting rustc being unable to compile for the disabled architectures. +# Also worth pointing out is that, in case support for new targets are added to +# LLVM, enabling them here doesn't mean Rust is automatically gaining said +# support. You'll need to write a target specification at least, and most +# likely, teach rustc about the C ABI of the target. Get in touch with the +# Rust team and file an issue if you need assistance in porting! +#targets = "X86;ARM;AArch64;Mips;PowerPC;SystemZ;JSBackend;MSP430;Sparc;NVPTX;Hexagon" + +# LLVM experimental targets to build support for. These targets are specified in +# the same format as above, but since these targets are experimental, they are +# not built by default and the experimental Rust compilation targets that depend +# on them will not work unless the user opts in to building them. Possible +# experimental LLVM targets include WebAssembly for the +# wasm32-experimental-emscripten Rust target. +#experimental-targets = "" + +# Cap the number of parallel linker invocations when compiling LLVM. +# This can be useful when building LLVM with debug info, which significantly +# increases the size of binaries and consequently the memory required by +# each linker process. +# If absent or 0, linker invocations are treated like any other job and +# controlled by rustbuild's -j parameter. +#link-jobs = 0 + +# When invoking `llvm-config` this configures whether the `--shared` argument is +# passed to prefer linking to shared libraries. +#link-shared = false + +# ============================================================================= +# General build configuration options +# ============================================================================= +[build] + +# Build triple for the original snapshot compiler. This must be a compiler that +# nightlies are already produced for. The current platform must be able to run +# binaries of this build triple and the nightly will be used to bootstrap the +# first compiler. +#build = "x86_64-unknown-linux-gnu" # defaults to your host platform + +# In addition to the build triple, other triples to produce full compiler +# toolchains for. Each of these triples will be bootstrapped from the build +# triple and then will continue to bootstrap themselves. This platform must +# currently be able to run all of the triples provided here. +#host = ["x86_64-unknown-linux-gnu"] # defaults to just the build triple + +# In addition to all host triples, other triples to produce the standard library +# for. Each host triple will be used to produce a copy of the standard library +# for each target triple. +#target = ["x86_64-unknown-linux-gnu"] # defaults to just the build triple + +# Instead of downloading the src/stage0.txt version of Cargo specified, use +# this Cargo binary instead to build all Rust code +#cargo = "/path/to/bin/cargo" + +# Instead of downloading the src/stage0.txt version of the compiler +# specified, use this rustc binary instead as the stage0 snapshot compiler. +#rustc = "/path/to/bin/rustc" + +# Flag to specify whether any documentation is built. If false, rustdoc and +# friends will still be compiled but they will not be used to generate any +# documentation. +#docs = true + +# Indicate whether the compiler should be documented in addition to the standard +# library and facade crates. +#compiler-docs = false + +# Indicate whether submodules are managed and updated automatically. +#submodules = true + +# The path to (or name of) the GDB executable to use. This is only used for +# executing the debuginfo test suite. +#gdb = "gdb" + +# The node.js executable to use. Note that this is only used for the emscripten +# target when running tests, otherwise this can be omitted. +#nodejs = "node" + +# Python interpreter to use for various tasks throughout the build, notably +# rustdoc tests, the lldb python interpreter, and some dist bits and pieces. +# Note that Python 2 is currently required. +#python = "python2.7" + +# Force Cargo to check that Cargo.lock describes the precise dependency +# set that all the Cargo.toml files create, instead of updating it. +#locked-deps = false + +# Indicate whether the vendored sources are used for Rust dependencies or not +#vendor = false + +# Typically the build system will build the rust compiler twice. The second +# compiler, however, will simply use its own libraries to link against. If you +# would rather to perform a full bootstrap, compiling the compiler three times, +# then you can set this option to true. You shouldn't ever need to set this +# option to true. +#full-bootstrap = false + +# Enable a build of the extended rust tool set which is not only the compiler +# but also tools such as Cargo. This will also produce "combined installers" +# which are used to install Rust and Cargo together. This is disabled by +# default. +#extended = false + +# Verbosity level: 0 == not verbose, 1 == verbose, 2 == very verbose +#verbose = 0 + +# Build the sanitizer runtimes +#sanitizers = false + +# Build the profiler runtime +#profiler = false + +# Indicates whether the OpenSSL linked into Cargo will be statically linked or +# not. If static linkage is specified then the build system will download a +# known-good version of OpenSSL, compile it, and link it to Cargo. +#openssl-static = false + +# Run the build with low priority, by setting the process group's "nice" value +# to +10 on Unix platforms, and by using a "low priority" job object on Windows. +#low-priority = false + +# Arguments passed to the `./configure` script, used during distcheck. You +# probably won't fill this in but rather it's filled in by the `./configure` +# script. +#configure-args = [] + +# Indicates that a local rebuild is occurring instead of a full bootstrap, +# essentially skipping stage0 as the local compiler is recompiling itself again. +#local-rebuild = false + +# ============================================================================= +# General install configuration options +# ============================================================================= +[install] + +# Instead of installing to /usr/local, install to this path instead. +#prefix = "/usr/local" + +# Where to install system configuration files +# If this is a relative path, it will get installed in `prefix` above +#sysconfdir = "/etc" + +# Where to install documentation in `prefix` above +#docdir = "share/doc/rust" + +# Where to install binaries in `prefix` above +#bindir = "bin" + +# Where to install libraries in `prefix` above +#libdir = "lib" + +# Where to install man pages in `prefix` above +#mandir = "share/man" + +# ============================================================================= +# Options for compiling Rust code itself +# ============================================================================= +[rust] + +# Indicates that the build should be optimized for debugging Rust. Note that +# this is typically not what you want as it takes an incredibly large amount of +# time to have a debug-mode rustc compile any code (notably libstd). If this +# value is set to `true` it will affect a number of configuration options below +# as well, if unconfigured. +#debug = false + +# Whether or not to optimize the compiler and standard library +# Note: the slowness of the non optimized compiler compiling itself usually +# outweighs the time gains in not doing optimizations, therefore a +# full bootstrap takes much more time with optimize set to false. +#optimize = true + +# Number of codegen units to use for each compiler invocation. A value of 0 +# means "the number of cores on this machine", and 1+ is passed through to the +# compiler. +#codegen-units = 1 + +# Whether or not debug assertions are enabled for the compiler and standard +# library. Also enables compilation of debug! and trace! logging macros. +#debug-assertions = false + +# Whether or not debuginfo is emitted +#debuginfo = false + +# Whether or not line number debug information is emitted +#debuginfo-lines = false + +# Whether or not to only build debuginfo for the standard library if enabled. +# If enabled, this will not compile the compiler with debuginfo, just the +# standard library. +#debuginfo-only-std = false + +# Whether or not jemalloc is built and enabled +#use-jemalloc = true + +# Whether or not jemalloc is built with its debug option set +#debug-jemalloc = false + +# Whether or not `panic!`s generate backtraces (RUST_BACKTRACE) +#backtrace = true + +# The default linker that will be used by the generated compiler. Note that this +# is not the linker used to link said compiler. +#default-linker = "cc" + +# The default ar utility that will be used by the generated compiler if LLVM +# cannot be used. Note that this is not used to assemble said compiler. +#default-ar = "ar" + +# The "channel" for the Rust build to produce. The stable/beta channels only +# allow using stable features, whereas the nightly and dev channels allow using +# nightly features +#channel = "dev" + +# By default the `rustc` executable is built with `-Wl,-rpath` flags on Unix +# platforms to ensure that the compiler is usable by default from the build +# directory (as it links to a number of dynamic libraries). This may not be +# desired in distributions, for example. +#rpath = true + +# Suppresses extraneous output from tests to ensure the output of the test +# harness is relatively clean. +#quiet-tests = false + +# Flag indicating whether tests are compiled with optimizations (the -O flag) or +# with debuginfo (the -g flag) +#optimize-tests = true +#debuginfo-tests = true + +# Flag indicating whether codegen tests will be run or not. If you get an error +# saying that the FileCheck executable is missing, you may want to disable this. +#codegen-tests = true + +# Flag indicating whether git info will be retrieved from .git automatically. +# Having the git information can cause a lot of rebuilds during development. +# Note: If this attribute is not explicity set (e.g. if left commented out) it +# will default to true if channel = "dev", but will default to false otherwise. +#ignore-git = true + +# When creating source tarballs whether or not to create a source tarball. +#dist-src = false + +# Whether to also run the Miri tests suite when running tests. +# As a side-effect also generates MIR for all libraries. +#test-miri = false + +# ============================================================================= +# Options for specific targets +# +# Each of the following options is scoped to the specific target triple in +# question and is used for determining how to compile each target. +# ============================================================================= +[target.x86_64-unknown-linux-gnu] + +# C compiler to be used to compiler C code and link Rust code. Note that the +# default value is platform specific, and if not specified it may also depend on +# what platform is crossing to what platform. +#cc = "cc" + +# C++ compiler to be used to compiler C++ code (e.g. LLVM and our LLVM shims). +# This is only used for host targets. +#cxx = "c++" + +# Path to the `llvm-config` binary of the installation of a custom LLVM to link +# against. Note that if this is specifed we don't compile LLVM at all for this +# target. +#llvm-config = "../path/to/llvm/root/bin/llvm-config" + +# Path to the custom jemalloc static library to link into the standard library +# by default. This is only used if jemalloc is still enabled above +#jemalloc = "/path/to/jemalloc/libjemalloc_pic.a" + +# If this target is for Android, this option will be required to specify where +# the NDK for the target lives. This is used to find the C compiler to link and +# build native code. +#android-ndk = "/path/to/ndk" + +# Force static or dynamic linkage of the standard library for this target. If +# this target is a host for rustc, this will also affect the linkage of the +# compiler itself. This is useful for building rustc on targets that normally +# only use static libraries. If unset, the target's default linkage is used. +#crt-static = false + +# The root location of the MUSL installation directory. The library directory +# will also need to contain libunwind.a for an unwinding implementation. Note +# that this option only makes sense for MUSL targets that produce statically +# linked binaries +#musl-root = "..." + +# Used in testing for configuring where the QEMU images are located, you +# probably don't want to use this. +#qemu-rootfs = "..." + +# ============================================================================= +# Distribution options +# +# These options are related to distribution, mostly for the Rust project itself. +# You probably won't need to concern yourself with any of these options +# ============================================================================= +[dist] + +# This is the folder of artifacts that the build system will sign. All files in +# this directory will be signed with the default gpg key using the system `gpg` +# binary. The `asc` and `sha256` files will all be output into the standard dist +# output folder (currently `build/dist`) +# +# This folder should be populated ahead of time before the build system is +# invoked. +#sign-folder = "path/to/folder/to/sign" + +# This is a file which contains the password of the default gpg key. This will +# be passed to `gpg` down the road when signing all files in `sign-folder` +# above. This should be stored in plaintext. +#gpg-password-file = "path/to/gpg/password" + +# The remote address that all artifacts will eventually be uploaded to. The +# build system generates manifests which will point to these urls, and for the +# manifests to be correct they'll have to have the right URLs encoded. +# +# Note that this address should not contain a trailing slash as file names will +# be appended to it. +#upload-addr = "https://example.com/folder" + +# Whether to build a plain source tarball to upload +# We disable that on Windows not to override the one already uploaded on S3 +# as the one built on Windows will contain backslashes in paths causing problems +# on linux +#src-tarball = true diff --git a/configure b/configure index 664b473b2c..eeb8d081d3 100755 --- a/configure +++ b/configure @@ -1,779 +1,17 @@ #!/bin/sh -# /bin/sh on Solaris is not a POSIX compatible shell, but /usr/bin/bash is. -if [ `uname -s` = 'SunOS' -a "${POSIX_SHELL}" != "true" ]; then - POSIX_SHELL="true" - export POSIX_SHELL - exec /usr/bin/env bash $0 "$@" -fi -unset POSIX_SHELL # clear it so if we invoke other scripts, they run as bash as well +script="$(dirname $0)"/src/bootstrap/configure.py -msg() { - echo "configure: $*" -} - -step_msg() { - msg - msg "$1" - msg -} - -warn() { - echo "configure: WARNING: $1" -} - -err() { - echo "configure: error: $1" - exit 1 -} - -run() { - msg "$@" - "$@" -} - -need_ok() { - if [ $? -ne 0 ] - then - err "$1" - fi -} - -need_cmd() { - if command -v $1 >/dev/null 2>&1 - then msg "found program '$1'" - else err "program '$1' is missing, please install it" - fi -} - -make_dir() { - if [ ! -d $1 ] - then - run mkdir -p $1 - fi -} - -copy_if_changed() { - if cmp -s $1 $2 - then - msg "leaving $2 unchanged" - else - run cp -f $1 $2 - chmod u-w $2 # make copied artifact read-only - fi -} - -move_if_changed() { - if cmp -s $1 $2 - then - msg "leaving $2 unchanged" - else - run mv -f $1 $2 - chmod u-w $2 # make moved artifact read-only - fi -} - -putvar() { - local T - eval T=\$$1 - eval TLEN=\${#$1} - if [ $TLEN -gt 35 ] - then - printf "configure: %-20s := %.35s ...\n" $1 "$T" - else - printf "configure: %-20s := %s %s\n" $1 "$T" "$2" - fi - printf "%-20s := %s\n" $1 "$T" >>config.tmp -} - -putpathvar() { - local T - eval T=\$$1 - eval TLEN=\${#$1} - if [ $TLEN -gt 35 ] - then - printf "configure: %-20s := %.35s ...\n" $1 "$T" - else - printf "configure: %-20s := %s %s\n" $1 "$T" "$2" - fi - if [ -z "$T" ] - then - printf "%-20s := \n" $1 >>config.tmp - else - printf "%-20s := \"%s\"\n" $1 "$T" >>config.tmp - fi -} - -probe() { - local V=$1 +try() { + cmd=$1 shift - local P - local T - for P - do - T=$(command -v $P 2>&1) - if [ $? -eq 0 ] - then - VER0=$($P --version 2>/dev/null \ - | grep -o '[vV]\?[0-9][0-9.][a-z0-9.-]*' | head -1 ) - if [ $? -eq 0 -a "x${VER0}" != "x" ] - then - VER="($VER0)" - else - VER="" - fi - break - else - VER="" - T="" - fi - done - eval $V=\$T - putpathvar $V "$VER" -} - -probe_need() { - probe $* - local V=$1 - shift - eval VV=\$$V - if [ -z "$VV" ] - then - err "$V needed, but unable to find any of: $*" - fi -} - -validate_opt () { - for arg in $CFG_CONFIGURE_ARGS - do - isArgValid=0 - for option in $BOOL_OPTIONS - do - if test --disable-$option = $arg - then - isArgValid=1 - fi - if test --enable-$option = $arg - then - isArgValid=1 - fi - done - for option in $VAL_OPTIONS - do - if echo "$arg" | grep -q -- "--$option=" - then - isArgValid=1 - fi - done - if [ "$arg" = "--help" ] - then - echo - echo "No more help available for Configure options," - echo "check the Wiki or join our IRC channel" - break - else - if test $isArgValid -eq 0 - then - err "Option '$arg' is not recognized" - fi - fi - done -} - -# `valopt OPTION_NAME DEFAULT DOC` extracts a string-valued option -# from command line, using provided default value for the option if -# not present, and saves it to the generated config.mk. -# -# `valopt_nosave` is much the same, except that it does not save the -# result to config.mk (instead the script should use `putvar` itself -# later on to save it). `valopt_core` is the core upon which the -# other two are built. - -valopt_core() { - VAL_OPTIONS="$VAL_OPTIONS $2" - - local SAVE=$1 - local OP=$2 - local DEFAULT=$3 - shift - shift - shift - local DOC="$*" - if [ $HELP -eq 0 ] - then - local UOP=$(echo $OP | tr '[:lower:]' '[:upper:]' | tr '\-' '\_') - local V="CFG_${UOP}" - local V_PROVIDED="${V}_PROVIDED" - eval $V="$DEFAULT" - for arg in $CFG_CONFIGURE_ARGS - do - if echo "$arg" | grep -q -- "--$OP=" - then - val=$(echo "$arg" | cut -f2 -d=) - eval $V=$val - eval $V_PROVIDED=1 - fi - done - if [ "$SAVE" = "save" ] - then - putvar $V - fi - else - if [ -z "$DEFAULT" ] - then - DEFAULT="" - fi - OP="${OP}=[${DEFAULT}]" - printf " --%-30s %s\n" "$OP" "$DOC" - fi -} - -valopt_nosave() { - valopt_core nosave "$@" -} - -valopt() { - valopt_core save "$@" -} - -# `opt OPTION_NAME DEFAULT DOC` extracts a boolean-valued option from -# command line, using the provided default value (0/1) for the option -# if not present, and saves it to the generated config.mk. -# -# `opt_nosave` is much the same, except that it does not save the -# result to config.mk (instead the script should use `putvar` itself -# later on to save it). `opt_core` is the core upon which the other -# two are built. - -opt_core() { - BOOL_OPTIONS="$BOOL_OPTIONS $2" - - local SAVE=$1 - local OP=$2 - local DEFAULT=$3 - shift - shift - shift - local DOC="$*" - local FLAG="" - - if [ $DEFAULT -eq 0 ] - then - FLAG="enable" - DEFAULT_FLAG="disable" - else - FLAG="disable" - DEFAULT_FLAG="enable" - DOC="don't $DOC" - fi - - if [ $HELP -eq 0 ] - then - for arg in $CFG_CONFIGURE_ARGS - do - if [ "$arg" = "--${FLAG}-${OP}" ] - then - OP=$(echo $OP | tr 'a-z-' 'A-Z_') - FLAG=$(echo $FLAG | tr 'a-z' 'A-Z') - local V="CFG_${FLAG}_${OP}" - local V_PROVIDED="CFG_${FLAG}_${OP}_PROVIDED" - eval $V=1 - eval $V_PROVIDED=1 - if [ "$SAVE" = "save" ] - then - putvar $V - fi - elif [ "$arg" = "--${DEFAULT_FLAG}-${OP}" ] - then - OP=$(echo $OP | tr 'a-z-' 'A-Z_') - DEFAULT_FLAG=$(echo $DEFAULT_FLAG | tr 'a-z' 'A-Z') - local V_PROVIDED="CFG_${DEFAULT_FLAG}_${OP}_PROVIDED" - eval $V_PROVIDED=1 - fi - done - else - if [ -n "$META" ] - then - OP="$OP=<$META>" - fi - printf " --%-30s %s\n" "$FLAG-$OP" "$DOC" - fi -} - -opt_nosave() { - opt_core nosave "$@" -} - -opt() { - opt_core save "$@" -} - -envopt() { - local NAME=$1 - local V="CFG_${NAME}" - eval VV=\$$V - - # If configure didn't set a value already, then check environment. - # - # (It is recommended that the configure script always check the - # environment before setting any values to envopt variables; see - # e.g. how CFG_CC is handled, where it first checks `-z "$CC"`, - # and issues msg if it ends up employing that provided value.) - if [ -z "$VV" ] - then - eval $V=\$$NAME - eval VV=\$$V - fi - - # If script or environment provided a value, save it. - if [ -n "$VV" ] - then - putvar $V - fi -} - -enable_if_not_disabled() { - local OP=$1 - local UOP=$(echo $OP | tr '[:lower:]' '[:upper:]' | tr '\-' '\_') - local ENAB_V="CFG_ENABLE_$UOP" - local EXPLICITLY_DISABLED="CFG_DISABLE_${UOP}_PROVIDED" - eval VV=\$$EXPLICITLY_DISABLED - if [ -z "$VV" ]; then - eval $ENAB_V=1 + T=$($cmd --version 2>/dev/null) + if [ $? -eq 0 ]; then + exec $cmd "$script" "$@" fi } -to_gnu_triple() { - case $1 in - i686-pc-windows-gnu) echo i686-w64-mingw32 ;; - x86_64-pc-windows-gnu) echo x86_64-w64-mingw32 ;; - *) echo $1 ;; - esac -} - -# Prints the absolute path of a directory to stdout -abs_path() { - local _path="$1" - # Unset CDPATH because it causes havok: it makes the destination unpredictable - # and triggers 'cd' to print the path to stdout. Route `cd`'s output to /dev/null - # for good measure. - (unset CDPATH && cd "$_path" > /dev/null && pwd) -} - -HELP=0 -for arg; do - case "$arg" in - --help) HELP=1;; - esac -done - -msg "looking for configure programs" -need_cmd cmp -need_cmd mkdir -need_cmd printf -need_cmd cut -need_cmd head -need_cmd grep -need_cmd xargs -need_cmd cp -need_cmd find -need_cmd uname -need_cmd date -need_cmd tr -need_cmd sed -need_cmd file -need_cmd make - -CFG_SRC_DIR="$(abs_path $(dirname $0))/" -CFG_SRC_DIR_RELATIVE="$(dirname $0)/" -CFG_BUILD_DIR="$(pwd)/" -CFG_SELF="$0" -CFG_CONFIGURE_ARGS="$@" - - -case "${CFG_SRC_DIR}" in - *\ * ) - err "The path to the rust source directory contains spaces, which is not supported" - ;; - *) - ;; -esac - - -OPTIONS="" -if [ "$HELP" -eq 1 ] -then - echo - echo "Usage: $CFG_SELF [options]" - echo - echo "Options:" - echo -else - msg "recreating config.tmp" - echo '' >config.tmp - - step_msg "processing $CFG_SELF args" -fi - -BOOL_OPTIONS="" -VAL_OPTIONS="" - -opt debug 0 "debug mode; disables optimization unless \`--enable-optimize\` given" -opt valgrind 0 "run tests with valgrind (memcheck by default)" -opt helgrind 0 "run tests with helgrind instead of memcheck" -opt valgrind-rpass 1 "run rpass-valgrind tests with valgrind" -opt docs 1 "build standard library documentation" -opt compiler-docs 0 "build compiler documentation" -opt optimize-tests 1 "build tests with optimizations" -opt debuginfo-tests 0 "build tests with debugger metadata" -opt quiet-tests 0 "enable quieter output when running tests" -opt libcpp 1 "build llvm with libc++ instead of libstdc++ when using clang" -opt llvm-assertions 0 "build LLVM with assertions" -opt debug-assertions 0 "build with debugging assertions" -opt fast-make 0 "use .gitmodules as timestamp for submodule deps" -opt ccache 0 "invoke gcc/clang via ccache to reuse object files between builds" -opt sccache 0 "invoke gcc/clang via sccache to reuse object files between builds" -opt local-rust 0 "use an installed rustc rather than downloading a snapshot" -opt local-rebuild 0 "assume local-rust matches the current version, for rebuilds; implies local-rust, and is implied if local-rust already matches the current version" -opt llvm-static-stdcpp 0 "statically link to libstdc++ for LLVM" -opt llvm-link-shared 0 "prefer shared linking to LLVM (llvm-config --link-shared)" -opt rpath 1 "build rpaths into rustc itself" -opt stage0-landing-pads 1 "enable landing pads during bootstrap with stage0" -# This is used by the automation to produce single-target nightlies -opt dist-host-only 0 "only install bins for the host architecture" -opt inject-std-version 1 "inject the current compiler version of libstd into programs" -opt llvm-version-check 1 "check if the LLVM version is supported, build anyway" -opt codegen-tests 1 "run the src/test/codegen tests" -opt option-checking 1 "complain about unrecognized options in this configure script" -opt ninja 0 "build LLVM using the Ninja generator (for MSVC, requires building in the correct environment)" -opt locked-deps 0 "force Cargo.lock to be up to date" -opt vendor 0 "enable usage of vendored Rust crates" -opt sanitizers 0 "build the sanitizer runtimes (asan, lsan, msan, tsan)" -opt dist-src 1 "when building tarballs enables building a source tarball" -opt cargo-openssl-static 0 "static openssl in cargo" -opt profiler 0 "build the profiler runtime" - -# Optimization and debugging options. These may be overridden by the release channel, etc. -opt_nosave optimize 1 "build optimized rust code" -opt_nosave optimize-cxx 1 "build optimized C++ code" -opt_nosave optimize-llvm 1 "build optimized LLVM" -opt_nosave llvm-assertions 0 "build LLVM with assertions" -opt_nosave debug-assertions 0 "build with debugging assertions" -opt_nosave llvm-release-debuginfo 0 "build LLVM with debugger metadata" -opt_nosave debuginfo 0 "build with debugger metadata" -opt_nosave debuginfo-lines 0 "build with line number debugger metadata" -opt_nosave debuginfo-only-std 0 "build only libstd with debugging information" -opt_nosave debug-jemalloc 0 "build jemalloc with --enable-debug --enable-fill" - -valopt localstatedir "/var/lib" "local state directory" -valopt sysconfdir "/etc" "install system configuration files" - -valopt datadir "${CFG_PREFIX}/share" "install data" -valopt infodir "${CFG_PREFIX}/share/info" "install additional info" -valopt llvm-root "" "set LLVM root" -valopt python "" "set path to python" -valopt jemalloc-root "" "set directory where libjemalloc_pic.a is located" -valopt build "" "GNUs ./configure syntax LLVM build triple" -valopt android-cross-path "" "Android NDK standalone path (deprecated)" -valopt i686-linux-android-ndk "" "i686-linux-android NDK standalone path" -valopt arm-linux-androideabi-ndk "" "arm-linux-androideabi NDK standalone path" -valopt armv7-linux-androideabi-ndk "" "armv7-linux-androideabi NDK standalone path" -valopt aarch64-linux-android-ndk "" "aarch64-linux-android NDK standalone path" -valopt x86_64-linux-android-ndk "" "x86_64-linux-android NDK standalone path" -valopt nacl-cross-path "" "NaCl SDK path (Pepper Canary is recommended). Must be absolute!" -valopt musl-root "/usr/local" "MUSL root installation directory (deprecated)" -valopt musl-root-x86_64 "" "x86_64-unknown-linux-musl install directory" -valopt musl-root-i686 "" "i686-unknown-linux-musl install directory" -valopt musl-root-arm "" "arm-unknown-linux-musleabi install directory" -valopt musl-root-armhf "" "arm-unknown-linux-musleabihf install directory" -valopt musl-root-armv7 "" "armv7-unknown-linux-musleabihf install directory" -valopt extra-filename "" "Additional data that is hashed and passed to the -C extra-filename flag" -valopt qemu-armhf-rootfs "" "rootfs in qemu testing, you probably don't want to use this" -valopt qemu-aarch64-rootfs "" "rootfs in qemu testing, you probably don't want to use this" -valopt experimental-targets "" "experimental LLVM targets to build" - -if [ -e ${CFG_SRC_DIR}.git ] -then - valopt release-channel "dev" "the name of the release channel to build" -else - # If we have no git directory then we are probably a tarball distribution - # and should default to stable channel - Issue 28322 - probe CFG_GIT git - msg "git: no git directory. Changing default release channel to stable" - valopt release-channel "stable" "the name of the release channel to build" -fi - -# Used on systems where "cc" and "ar" are unavailable -valopt default-linker "cc" "the default linker" -valopt default-ar "ar" "the default ar" - -# Many of these are saved below during the "writing configuration" step -# (others are conditionally saved). -opt_nosave manage-submodules 1 "let the build manage the git submodules" -opt_nosave clang 0 "prefer clang to gcc for building the runtime" -opt_nosave jemalloc 1 "build liballoc with jemalloc" -opt full-bootstrap 0 "build three compilers instead of two" -opt extended 0 "build an extended rust tool set" - -valopt_nosave prefix "/usr/local" "set installation prefix" -valopt_nosave local-rust-root "/usr/local" "set prefix for local rust binary" -valopt_nosave host "${CFG_BUILD}" "GNUs ./configure syntax LLVM host triples" -valopt_nosave target "${CFG_HOST}" "GNUs ./configure syntax LLVM target triples" -valopt_nosave mandir "${CFG_PREFIX}/share/man" "install man pages in PATH" -valopt_nosave docdir "${CFG_PREFIX}/share/doc/rust" "install documentation in PATH" -valopt_nosave bindir "${CFG_PREFIX}/bin" "install binaries" - -# On Windows this determines root of the subtree for target libraries. -# Host runtime libs always go to 'bin'. -valopt libdir "${CFG_PREFIX}/lib" "install libraries" - -case "$CFG_LIBDIR" in - "$CFG_PREFIX"/*) CAT_INC=2;; - "$CFG_PREFIX"*) CAT_INC=1;; - *) - err "libdir must begin with the prefix. Use --prefix to set it accordingly.";; -esac - -CFG_LIBDIR_RELATIVE=`echo ${CFG_LIBDIR} | cut -c$((${#CFG_PREFIX}+${CAT_INC}))-` - -if [ $HELP -eq 1 ] -then - echo - exit 0 -fi - -# Validate Options -if [ -z "$CFG_DISABLE_OPTION_CHECKING" ] -then - step_msg "validating $CFG_SELF args" - validate_opt -fi - -# Validate the release channel, and configure options -case "$CFG_RELEASE_CHANNEL" in - nightly ) - msg "overriding settings for $CFG_RELEASE_CHANNEL" - enable_if_not_disabled llvm-assertions - # FIXME(stage0) re-enable this on the next stage0 now that #35566 is - # fixed - case "$CFG_BUILD" in - *-pc-windows-gnu) - ;; - *) - enable_if_not_disabled debuginfo-lines - enable_if_not_disabled debuginfo-only-std - ;; - esac - - ;; - beta | stable) - msg "overriding settings for $CFG_RELEASE_CHANNEL" - case "$CFG_BUILD" in - *-pc-windows-gnu) - ;; - *) - enable_if_not_disabled debuginfo-lines - enable_if_not_disabled debuginfo-only-std - ;; - esac - ;; - dev) - ;; - *) - err "release channel must be 'dev', 'nightly', 'beta' or 'stable'" - ;; -esac - -# Adjust perf and debug options for debug mode -if [ -n "$CFG_ENABLE_DEBUG" ]; then - msg "debug mode enabled, setting performance options" - if [ -z "$CFG_ENABLE_OPTIMIZE_PROVIDED" ]; then - msg "optimization not explicitly enabled, disabling optimization" - CFG_DISABLE_OPTIMIZE=1 - CFG_DISABLE_OPTIMIZE_CXX=1 - fi - - # Set following variables to 1 unless setting already provided - enable_if_not_disabled debug-assertions - enable_if_not_disabled debug-jemalloc - enable_if_not_disabled debuginfo - enable_if_not_disabled llvm-assertions -fi - -# OK, now write the debugging options -if [ -n "$CFG_DISABLE_OPTIMIZE" ]; then putvar CFG_DISABLE_OPTIMIZE; fi -if [ -n "$CFG_DISABLE_OPTIMIZE_CXX" ]; then putvar CFG_DISABLE_OPTIMIZE_CXX; fi -if [ -n "$CFG_DISABLE_OPTIMIZE_LLVM" ]; then putvar CFG_DISABLE_OPTIMIZE_LLVM; fi -if [ -n "$CFG_ENABLE_LLVM_ASSERTIONS" ]; then putvar CFG_ENABLE_LLVM_ASSERTIONS; fi -if [ -n "$CFG_ENABLE_DEBUG_ASSERTIONS" ]; then putvar CFG_ENABLE_DEBUG_ASSERTIONS; fi -if [ -n "$CFG_ENABLE_LLVM_RELEASE_DEBUGINFO" ]; then putvar CFG_ENABLE_LLVM_RELEASE_DEBUGINFO; fi -if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi -if [ -n "$CFG_ENABLE_DEBUGINFO_LINES" ]; then putvar CFG_ENABLE_DEBUGINFO_LINES; fi -if [ -n "$CFG_ENABLE_DEBUGINFO_ONLY_STD" ]; then putvar CFG_ENABLE_DEBUGINFO_ONLY_STD; fi -if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi - -step_msg "looking for build programs" - -probe_need CFG_CURL curl -if [ -z "$CFG_PYTHON_PROVIDED" ]; then - probe_need CFG_PYTHON python2.7 python2 python -fi - -python_version=$($CFG_PYTHON -V 2>&1) -if [ $(echo $python_version | grep -c '^Python 2\.7') -ne 1 ]; then - err "Found $python_version, but Python 2.7 is required" -fi - -# the valgrind rpass tests will fail if you don't have a valgrind, but they're -# only disabled if you opt out. -if [ -z "$CFG_VALGRIND" ] -then - # If the user has explicitly asked for valgrind tests, then fail - if [ -n "$CFG_ENABLE_VALGRIND" ] && [ -n "$CFG_ENABLE_VALGRIND_PROVIDED" ] - then - err "No valgrind present, but valgrind tests explicitly requested" - else - CFG_DISABLE_VALGRIND_RPASS=1 - putvar CFG_DISABLE_VALGRIND_RPASS - fi -fi - -# Do some sanity checks if running on buildbot -# (these env vars are set by rust-buildbot) -if [ -n "$RUST_DIST_SERVER" -a -n "$ALLOW_NONZERO_RLIMIT_CORE" ]; then - # Frequently the llvm submodule directory is broken by the build - # being killed - llvm_lock="${CFG_SRC_DIR}/.git/modules/src/llvm/index.lock" - if [ -e "$llvm_lock" ]; then - step_msg "removing $llvm_lock" - rm -f "$llvm_lock" - fi -fi - -BIN_SUF= -if [ "$CFG_OSTYPE" = "pc-windows-gnu" ] || [ "$CFG_OSTYPE" = "pc-windows-msvc" ] -then - BIN_SUF=.exe -fi - -# --enable-local-rebuild implies --enable-local-rust too -if [ -n "$CFG_ENABLE_LOCAL_REBUILD" ] -then - if [ -z "$CFG_ENABLE_LOCAL_RUST" ] - then - CFG_ENABLE_LOCAL_RUST=1 - putvar CFG_ENABLE_LOCAL_RUST - fi -fi - -if [ -n "$CFG_ENABLE_LOCAL_RUST" ] -then - system_rustc=$(which rustc) - if [ -f ${CFG_LOCAL_RUST_ROOT}/bin/rustc${BIN_SUF} ] - then - : # everything already configured - elif [ -n "$system_rustc" ] - then - # we assume that rustc is in a /bin directory - CFG_LOCAL_RUST_ROOT=${system_rustc%/bin/rustc} - else - err "no local rust to use" - fi - - CMD="${CFG_LOCAL_RUST_ROOT}/bin/rustc${BIN_SUF}" - LRV=`LD_LIBRARY_PATH=${CFG_LOCAL_RUST_ROOT}/lib $CMD --version` - if [ $? -ne 0 ] - then - step_msg "failure while running $CMD --version" - exit 1 - fi - step_msg "using rustc at: ${CFG_LOCAL_RUST_ROOT} with version: $LRV" - putvar CFG_LOCAL_RUST_ROOT -fi - -# Same with jemalloc. save the setting here. -if [ -n "$CFG_DISABLE_JEMALLOC" ] -then - putvar CFG_DISABLE_JEMALLOC -fi - -# All safeguards based on $CFG_ENABLE_CLANG should occur before this -# point in the script; after this point, script logic should inspect -# $CFG_USING_CLANG rather than $CFG_ENABLE_CLANG. - -# Set CFG_{CC,CXX,CPP,CFLAGS,CXXFLAGS,LDFLAGS} -envopt CC -envopt CXX -envopt CPP -envopt CFLAGS -envopt CXXFLAGS -envopt LDFLAGS - -# a little post-processing of various config values -CFG_PREFIX=${CFG_PREFIX%/} -CFG_MANDIR=${CFG_MANDIR%/} -CFG_DOCDIR=${CFG_DOCDIR%/} -CFG_BINDIR=${CFG_BINDIR%/} -CFG_HOST="$(echo $CFG_HOST | tr ',' ' ')" -CFG_TARGET="$(echo $CFG_TARGET | tr ',' ' ')" - -# copy build-triples to host-triples so that builds are a subset of hosts -V_TEMP="" -for i in $CFG_BUILD $CFG_HOST; -do - echo "$V_TEMP" | grep -qF $i || V_TEMP="$V_TEMP${V_TEMP:+ }$i" -done -CFG_HOST=$V_TEMP - -# copy host-triples to target-triples so that hosts are a subset of targets -V_TEMP="" -for i in $CFG_HOST $CFG_TARGET; -do - echo "$V_TEMP" | grep -qF $i || V_TEMP="$V_TEMP${V_TEMP:+ }$i" -done -CFG_TARGET=$V_TEMP - -step_msg "writing configuration" - -putvar CFG_SRC_DIR -putvar CFG_SRC_DIR_RELATIVE -putvar CFG_BUILD_DIR -putvar CFG_OSTYPE -putvar CFG_CPUTYPE -putvar CFG_CONFIGURE_ARGS -putvar CFG_PREFIX -putvar CFG_HOST -putvar CFG_TARGET -putvar CFG_LIBDIR_RELATIVE -putvar CFG_DISABLE_MANAGE_SUBMODULES -putvar CFG_AARCH64_LINUX_ANDROID_NDK -putvar CFG_ARM_LINUX_ANDROIDEABI_NDK -putvar CFG_ARMV7_LINUX_ANDROIDEABI_NDK -putvar CFG_I686_LINUX_ANDROID_NDK -putvar CFG_X86_64_LINUX_ANDROID_NDK -putvar CFG_NACL_CROSS_PATH -putvar CFG_MANDIR -putvar CFG_DOCDIR -putvar CFG_BINDIR -putvar CFG_USING_LIBCPP - -msg -copy_if_changed ${CFG_SRC_DIR}src/bootstrap/mk/Makefile.in ./Makefile -move_if_changed config.tmp config.mk -rm -f config.tmp -touch config.stamp - -if [ -z "$CFG_ENABLE_DEBUG" ]; then - step_msg "configured in release mode. for development consider --enable-debug" -else - step_msg "complete" -fi - -if [ "$CFG_SRC_DIR" = `pwd` ]; then - X_PY=x.py -else - X_PY=${CFG_SRC_DIR_RELATIVE}x.py -fi - -msg "run \`python ${X_PY} --help\`" -msg +try python2.7 "$@" +try python27 "$@" +try python2 "$@" +exec python $script "$@" diff --git a/git-commit-hash b/git-commit-hash new file mode 100644 index 0000000000..060dacf078 --- /dev/null +++ b/git-commit-hash @@ -0,0 +1 @@ +05e2e1c41414e8fc73d0f267ea8dab1a3eeeaa99 \ No newline at end of file diff --git a/src/Cargo.lock b/src/Cargo.lock index 19f3042c70..bc1fdf40b0 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1,11 +1,3 @@ -[root] -name = "unwind" -version = "0.0.0" -dependencies = [ - "core 0.0.0", - "libc 0.0.0", -] - [[package]] name = "advapi32-sys" version = "0.2.0" @@ -46,8 +38,8 @@ dependencies = [ "alloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.0.0", ] @@ -80,31 +72,31 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "backtrace" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace-sys 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "backtrace-sys" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -130,21 +122,26 @@ name = "bitflags" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "bitflags" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bootstrap" version = "0.0.0" dependencies = [ "build_helper 0.1.0", - "cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", - "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -161,8 +158,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "build-manifest" version = "0.1.0" dependencies = [ - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -170,28 +167,28 @@ dependencies = [ name = "build_helper" version = "0.1.0" dependencies = [ - "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "cargo" -version = "0.22.0" +version = "0.23.0" dependencies = [ - "advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "cargotest 0.1.0", "core-foundation 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "crates-io 0.11.0", - "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "crates-io 0.12.0", + "crossbeam 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crypto-hash 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)", - "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", "fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", "git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -200,24 +197,23 @@ dependencies = [ "ignore 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "jobserver 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libgit2-sys 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)", "psapi-sys 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_ignored 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "tar 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "termcolor 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -227,14 +223,14 @@ dependencies = [ name = "cargotest" version = "0.1.0" dependencies = [ - "cargo 0.22.0", - "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo 0.23.0", + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", "hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "tar 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -243,6 +239,11 @@ dependencies = [ name = "cargotest2" version = "0.1.0" +[[package]] +name = "cc" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "cfg-if" version = "0.1.2" @@ -250,7 +251,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "clap" -version = "2.26.0" +version = "2.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -258,8 +259,7 @@ dependencies = [ "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "textwrap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-segmentation 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "textwrap 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -267,10 +267,10 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -281,12 +281,28 @@ dependencies = [ "core 0.0.0", ] +[[package]] +name = "commoncrypto" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "commoncrypto-sys" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "compiler_builtins" version = "0.0.0" dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -295,9 +311,9 @@ version = "0.0.0" dependencies = [ "diff 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -306,6 +322,14 @@ dependencies = [ name = "completion" version = "0.1.0" +[[package]] +name = "conv" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "core" version = "0.0.0" @@ -319,7 +343,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "core-foundation-sys 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -327,18 +351,18 @@ name = "core-foundation-sys" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crates-io" -version = "0.11.0" +version = "0.12.0" dependencies = [ "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -347,33 +371,79 @@ name = "crossbeam" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "crossbeam" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "crypto-hash" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cssparser" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "procedural-masquerade 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cssparser-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "procedural-masquerade 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "curl" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "curl-sys 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)", - "socket2 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", + "socket2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "curl-sys" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libz-sys 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "custom_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "dbghelp-sys" version = "0.2.0" @@ -383,6 +453,14 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "debug_unreachable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "derive-new" version = "0.3.0" @@ -404,14 +482,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "dtoa" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -442,18 +520,10 @@ dependencies = [ [[package]] name = "error-chain" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "error-chain" -version = "0.11.0-rc.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -465,10 +535,11 @@ dependencies = [ [[package]] name = "filetime" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -485,11 +556,11 @@ version = "0.1.0" [[package]] name = "flate2" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", - "miniz-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -512,35 +583,39 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "futures" -version = "0.1.14" +name = "futf" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "debug_unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] -name = "gcc" -version = "0.3.51" +name = "futures" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "getopts" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "git2" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libgit2-sys 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -550,7 +625,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -591,16 +666,16 @@ dependencies = [ [[package]] name = "handlebars" -version = "0.26.2" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "pest 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "quick-error 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -628,6 +703,26 @@ dependencies = [ name = "hover" version = "0.1.0" +[[package]] +name = "html-diff" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kuchiki 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "html5ever" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "markup5ever 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "idna" version = "0.1.4" @@ -669,9 +764,9 @@ version = "0.1.0" name = "installer" version = "0.0.0" dependencies = [ - "clap 2.26.0 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "tar 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -682,7 +777,7 @@ dependencies = [ [[package]] name = "itoa" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -690,20 +785,20 @@ name = "jobserver" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-core" -version = "7.1.0" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -715,15 +810,26 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "kuchiki" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cssparser 0.13.7 (registry+https://github.com/rust-lang/crates.io-index)", + "html5ever 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "selectors 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "languageserver-types" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "enum_primitive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -742,21 +848,21 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.29" +version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libgit2-sys" -version = "0.6.12" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", - "curl-sys 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)", + "libz-sys 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -765,20 +871,20 @@ name = "libssh2-sys" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libz-sys 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libz-sys" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -794,12 +900,48 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "lzma-sys" -version = "0.1.8" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "mac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "magenta" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "magenta-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "magenta-sys" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "markup5ever" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache_codegen 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tendril 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -809,18 +951,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "mdbook" -version = "0.0.22" +version = "0.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "clap 2.26.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "handlebars 0.26.2 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "handlebars 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "open 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "pulldown-cmark 0.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "open 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pulldown-cmark 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -829,7 +975,7 @@ name = "memchr" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -837,16 +983,16 @@ name = "memchr" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "miniz-sys" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -871,7 +1017,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -896,7 +1042,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -947,7 +1093,7 @@ name = "num_cpus" version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -956,19 +1102,19 @@ version = "0.1.0" [[package]] name = "open" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "openssl" -version = "0.9.17" +version = "0.9.21" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -978,11 +1124,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "openssl-sys" -version = "0.9.17" +version = "0.9.21" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1023,25 +1169,70 @@ name = "pest" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "phf" +version = "0.7.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "phf_codegen" +version = "0.7.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "phf_generator" +version = "0.7.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "phf_shared" +version = "0.7.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "siphasher 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "pkg-config" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "precomputed-hash" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "proc_macro" version = "0.0.0" dependencies = [ + "rustc_errors 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] +[[package]] +name = "procedural-masquerade" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "profiler_builtins" version = "0.0.0" dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1059,12 +1250,20 @@ version = "0.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pulldown-cmark" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "quick-error" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1082,7 +1281,7 @@ name = "racer" version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "clap 2.26.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "syntex_errors 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1100,12 +1299,18 @@ dependencies = [ [[package]] name = "rand" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "magenta 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "redox_syscall" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "reformat" version = "0.1.0" @@ -1162,30 +1367,30 @@ version = "0.1.0" [[package]] name = "rls" -version = "0.121.0" +version = "0.122.0" dependencies = [ - "cargo 0.22.0", + "cargo 0.23.0", "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "languageserver-types 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "racer 2.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-analysis 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", "rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-rustc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-rustc 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustfmt-nightly 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustfmt-nightly 0.2.7", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rls-analysis" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "derive-new 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1202,13 +1407,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rls-rustc" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1217,8 +1422,8 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1234,8 +1439,8 @@ dependencies = [ name = "rustbook" version = "0.1.0" dependencies = [ - "clap 2.26.0 (registry+https://github.com/rust-lang/crates.io-index)", - "mdbook 0.0.22 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)", + "mdbook 0.0.26 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1243,14 +1448,14 @@ name = "rustc" version = "0.0.0" dependencies = [ "arena 0.0.0", - "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", "fmt_macros 0.0.0", "graphviz 0.0.0", "jobserver 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_back 0.0.0", - "rustc_bitflags 0.0.0", "rustc_const_math 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", @@ -1291,7 +1496,8 @@ dependencies = [ name = "rustc_apfloat" version = "0.0.0" dependencies = [ - "rustc_bitflags 0.0.0", + "bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_cratesio_shim 0.0.0", ] [[package]] @@ -1301,7 +1507,7 @@ dependencies = [ "alloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", - "cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] @@ -1314,10 +1520,6 @@ dependencies = [ "syntax 0.0.0", ] -[[package]] -name = "rustc_bitflags" -version = "0.0.0" - [[package]] name = "rustc_borrowck" version = "0.0.0" @@ -1355,6 +1557,13 @@ dependencies = [ "syntax 0.0.0", ] +[[package]] +name = "rustc_cratesio_shim" +version = "0.0.0" +dependencies = [ + "bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rustc_data_structures" version = "0.0.0" @@ -1435,9 +1644,10 @@ dependencies = [ name = "rustc_llvm" version = "0.0.0" dependencies = [ + "bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "build_helper 0.1.0", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_bitflags 0.0.0", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_cratesio_shim 0.0.0", ] [[package]] @@ -1447,7 +1657,7 @@ dependencies = [ "alloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", - "cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] @@ -1455,7 +1665,7 @@ dependencies = [ name = "rustc_metadata" version = "0.0.0" dependencies = [ - "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "proc_macro 0.0.0", @@ -1473,14 +1683,15 @@ dependencies = [ name = "rustc_mir" version = "0.0.0" dependencies = [ + "bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", - "rustc_bitflags 0.0.0", "rustc_const_eval 0.0.0", "rustc_const_math 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", + "serialize 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] @@ -1492,7 +1703,7 @@ dependencies = [ "alloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", - "cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] @@ -1530,6 +1741,7 @@ name = "rustc_privacy" version = "0.0.0" dependencies = [ "rustc 0.0.0", + "rustc_typeck 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] @@ -1565,8 +1777,9 @@ dependencies = [ name = "rustc_trans" version = "0.0.0" dependencies = [ - "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", "jobserver 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1575,7 +1788,6 @@ dependencies = [ "rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_allocator 0.0.0", "rustc_back 0.0.0", - "rustc_bitflags 0.0.0", "rustc_const_math 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", @@ -1592,7 +1804,12 @@ dependencies = [ name = "rustc_trans_utils" version = "0.0.0" dependencies = [ + "ar 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", + "rustc_back 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] @@ -1604,7 +1821,7 @@ dependencies = [ "alloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", - "cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] @@ -1630,8 +1847,9 @@ name = "rustdoc" version = "0.0.0" dependencies = [ "build_helper 0.1.0", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", + "html-diff 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "pulldown-cmark 0.0.14 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1645,19 +1863,18 @@ dependencies = [ [[package]] name = "rustfmt-nightly" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.2.7" dependencies = [ "diff 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "strings 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1684,13 +1901,28 @@ name = "scopeguard" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "selectors" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cssparser 0.13.7 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "precomputed-hash 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "semver" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1700,22 +1932,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde" -version = "1.0.11" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde_derive" -version = "1.0.11" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive_internals 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive_internals 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_derive_internals" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1724,21 +1956,21 @@ dependencies = [ [[package]] name = "serde_ignored" -version = "0.0.3" +version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_json" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "dtoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1751,13 +1983,23 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "socket2" +name = "siphasher" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "smallvec" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "socket2" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1775,10 +2017,10 @@ dependencies = [ "alloc_jemalloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "collections 0.0.0", "compiler_builtins 0.0.0", "core 0.0.0", - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.0.0", "panic_abort 0.0.0", "panic_unwind 0.0.0", @@ -1799,6 +2041,36 @@ dependencies = [ "core 0.0.0", ] +[[package]] +name = "string_cache" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "debug_unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "precomputed-hash 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache_codegen 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "string_cache_codegen" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "string_cache_shared" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "strings" version = "0.1.0" @@ -1843,8 +2115,9 @@ dependencies = [ name = "syntax" version = "0.0.0" dependencies = [ - "bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_cratesio_shim 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "serialize 0.0.0", @@ -1875,7 +2148,7 @@ name = "syntex_errors" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", "syntex_pos 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1897,7 +2170,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", "syntex_errors 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1911,8 +2184,8 @@ name = "tar" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "xattr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1921,7 +2194,17 @@ name = "tempdir" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tendril" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futf 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "utf-8 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1943,13 +2226,13 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "termcolor" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "wincolor 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1959,13 +2242,13 @@ dependencies = [ name = "test" version = "0.0.0" dependencies = [ - "getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.0.0", ] [[package]] name = "textwrap" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1978,7 +2261,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2015,7 +2298,7 @@ name = "toml" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2056,6 +2339,18 @@ name = "unicode-xid" version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "unicødë" +version = "0.1.0" + +[[package]] +name = "unreachable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "unreachable" version = "1.0.0" @@ -2071,6 +2366,14 @@ dependencies = [ "tidy 0.1.0", ] +[[package]] +name = "unwind" +version = "0.0.0" +dependencies = [ + "core 0.0.0", + "libc 0.0.0", +] + [[package]] name = "url" version = "1.5.1" @@ -2086,7 +2389,7 @@ name = "url_serde" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2099,6 +2402,14 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "utf-8" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "utf8-ranges" version = "0.1.3" @@ -2153,6 +2464,10 @@ dependencies = [ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "workspace_symbol" +version = "0.1.0" + [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -2167,7 +2482,7 @@ name = "xattr" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2175,7 +2490,7 @@ name = "xz2" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lzma-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "lzma-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2190,65 +2505,82 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6" "checksum ar 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b24e4eef8e3fa7e2ca75b157e6039cdf8d9d3a68213ddc19d0fd9d576b9717c9" "checksum atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d912da0db7fa85514874458ca3651fe2cddace8d0b0505571dbdcd41ab490159" -"checksum backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "72f9b4182546f4b04ebc4ab7f84948953a118bd6021a1b6a6c909e3e94f6be76" -"checksum backtrace-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "afccc5772ba333abccdf60d55200fa3406f8c59dcf54d5f7998c9107d3799c7c" +"checksum backtrace 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "99f2ce94e22b8e664d95c57fff45b98a966c2252b60691d0b7aeeccd88d70983" +"checksum backtrace-sys 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "c63ea141ef8fdb10409d0f5daf30ac51f84ef43bff66f16627773d2a292cd189" "checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d" "checksum bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1370e9fc2a6ae53aea8b7a5110edbd08836ed87c88736dfabccade1c2b44bff4" "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" +"checksum bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f5cde24d1b2e2216a726368b2363a273739c91f4e3eb4e0dd12d672d396ad989" "checksum bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f382711e76b9de6c744cc00d0497baba02fb00a787f088c879f01d09468e32" +"checksum cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7db2f146208d7e0fbee761b09cd65a7f51ccc38705d4e7262dad4d73b12a76b1" "checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" -"checksum clap 2.26.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2267a8fdd4dce6956ba6649e130f62fb279026e5e84b92aa939ac8f85ce3f9f0" -"checksum cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)" = "b8ebbb35d3dc9cd09497168f33de1acb79b265d350ab0ac34133b98f8509af1f" +"checksum clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3451e409013178663435d6f15fdb212f14ee4424a3d74f979d081d0a66b6f1f2" +"checksum cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "357c07e7a1fc95732793c1edb5901e1a1f305cfcf63a90eb12dbd22bdb6b789d" +"checksum commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d056a8586ba25a1e4d61cb090900e495952c7886786fc55f909ab2f819b69007" +"checksum commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fed34f46747aa73dfaa578069fd8279d2818ade2b55f38f22a9401c7f4083e2" +"checksum conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299" "checksum core-foundation 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5909502e547762013619f4c4e01cc7393c20fe2d52d7fa471c1210adb2320dc7" "checksum core-foundation-sys 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bc9fb3d6cb663e6fd7cf1c63f9b144ee2b1e4a78595a0451dd34bff85b9a3387" "checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97" +"checksum crossbeam 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8837ab96533202c5b610ed44bc7f4183e7957c1c8f56e8cc78bb098593c8ba0a" +"checksum crypto-hash 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34903878eec1694faf53cae8473a088df333181de421d4d3d48061d6559fe602" +"checksum cssparser 0.13.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef6124306e5ebc5ab11891d063aeafdd0cdc308079b708c8b566125f3680292b" +"checksum cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df" "checksum curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7034c534a1d7d22f7971d6088aa9d281d219ef724026c3428092500f41ae9c2c" -"checksum curl-sys 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d5481162dc4f424d088581db2f979fa7d4c238fe9794595de61d8d7522e277de" +"checksum curl-sys 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "4bee31aa3a079d5f3ff9579ea4dcfb1b1a17a40886f5f467436d383e78134b55" +"checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9" "checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850" +"checksum debug_unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9a032eac705ca39214d169f83e3d3da290af06d8d1d344d1baad2fd002dca4b3" "checksum derive-new 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "41be6ca3b99e0c0483fb2389685448f650459c3ecbe4e18d7705d8010ec4ab8e" "checksum diff 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0a515461b6c8c08419850ced27bc29e86166dcdcde8fbe76f8b1f0589bb49472" "checksum docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3b5b93718f8b3e5544fcc914c43de828ca6c6ace23e0332c6080a2977b49787a" -"checksum dtoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "80c8b71fd71146990a9742fc06dcbbde19161a267e0ad4e572c35162f4578c90" +"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab" "checksum enum_primitive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be4551092f4d519593039259a9ed8daedf0da12e5109c5280338073eaeb81180" "checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f" "checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" -"checksum error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9435d864e017c3c6afeac1654189b06cdb491cf2ff73dbf0d73b0f292f42ff8" -"checksum error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)" = "38d3a55d9a7a456748f2a3912c0941a5d9a68006eb15b3c3c9836b8420dc102d" -"checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922" -"checksum flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)" = "36df0166e856739905cd3d7e0b210fe818592211a008862599845e012d8d304c" +"checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" +"checksum filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "6ab199bf38537c6f38792669e081e0bb278b9b7405bba2642e4e5d15bf732c0e" +"checksum flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)" = "e6234dd4468ae5d1e2dbb06fe2b058696fdc50a339c68a393aefbf00bc81e423" "checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344" "checksum foreign-types 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e4056b9bd47f8ac5ba12be771f77a0dae796d1bbaaf5fd0b9c2d38b69b8a29d" "checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866" -"checksum futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4b63a4792d4f8f686defe3b39b92127fea6344de5d38202b2ee5a11bbbf29d6a" -"checksum gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)" = "120d07f202dcc3f72859422563522b66fe6463a4c513df062874daad05f85f0a" -"checksum getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9047cfbd08a437050b363d35ef160452c5fe8ea5187ae0a624708c91581d685" -"checksum git2 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "aa01936ac96555c083c0e8553f672616274408d9d3fc5b8696603fbf63ff43ee" +"checksum futf 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "51f93f3de6ba1794dcd5810b3546d004600a59a98266487c8407bc4b24e398f3" +"checksum futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "05a23db7bd162d4e8265968602930c476f688f0c180b44bdaf55e0cb2c687558" +"checksum getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "65922871abd2f101a2eb0eaebadc66668e54a87ad9c3dd82520b5f86ede5eff9" +"checksum git2 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0c1c0203d653f4140241da0c1375a404f0a397249ec818cd2076c6280c50f6fa" "checksum git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "68676bc784bf0bef83278898929bf64a251e87c0340723d0b93fa096c9c5bf8e" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum globset 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "feeb1b6840809ef5efcf7a4a990bc4e1b7ee3df8cf9e2379a75aeb2ba42ac9c3" "checksum hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bf088f042a467089e9baa4972f57f9247e42a0cc549ba264c7a04fbb8ecb89d4" -"checksum handlebars 0.26.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fbba80e74e9591a5f6a4ffff6b7f9d645759a896e431cfbdc853e9184370294a" +"checksum handlebars 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fb04af2006ea09d985fef82b81e0eb25337e51b691c76403332378a53d521edc" "checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" "checksum home 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9f25ae61099d8f3fee8b483df0bd4ecccf4b2731897aad40d50eca1b641fe6db" +"checksum html-diff 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5298d63081a642508fce965740ddb03a386c5d81bf1fef0579a815cf49cb8c68" +"checksum html5ever 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a49d5001dd1bddf042ea41ed4e0a671d50b1bf187e66b349d7ec613bdce4ad90" "checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d" "checksum ignore 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b3fcaf2365eb14b28ec7603c98c06cc531f19de9eb283d89a3dff8417c8c99f5" -"checksum itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c" +"checksum itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c" "checksum jobserver 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "443ae8bc0af6c106e6e8b77e04684faecc1a5ce94e058f4c2b0a037b0ea1b133" -"checksum jsonrpc-core 7.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "903e5eee845f3d83c1436d12848d97b1247cf850ff06a8e1db2f1ce3543af2cf" +"checksum jsonrpc-core 7.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1acd0f9934da94466d2370f36832b9b19271b4abdfdb5e69f0bcd991ebcd515" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +"checksum kuchiki 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ef2ea4f2f7883cd7c6772b06c14abca01a2cc1f75c426cebffcf6b3b925ef9fc" "checksum languageserver-types 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d52e477b23bf52cd3ca0f9fc6c5d14be954eec97e3b9cdfbd962d911bd533caf" "checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf" -"checksum libc 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)" = "8a014d9226c2cc402676fbe9ea2e15dd5222cd1dd57f576b5b283178c944a264" -"checksum libgit2-sys 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "df18a822100352d9863b302faf6f8f25c0e77f0e60feb40e5dbe1238b7f13b1d" +"checksum libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "d1419b2939a0bc44b77feb34661583c7546b532b192feab36249ab584b86856c" +"checksum libgit2-sys 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)" = "205fc37e829c5b36de63d14c8dc8b62c5a6a2519b16318ed0977079ca97256a9" "checksum libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0db4ec23611747ef772db1c4d650f8bd762f07b461727ec998f953c614024b75" -"checksum libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "3fdd64ef8ee652185674455c1d450b83cbc8ad895625d543b5324d923f82e4d8" +"checksum libz-sys 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "44ebbc760fd2d2f4d93de09a0e13d97e057612052e871da9985cedcb451e6bd5" "checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" -"checksum lzma-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "66b2e318eb97ab84f05725471f90c52a09c964053a5899a13fd0165acc26d00b" +"checksum lzma-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c1b93b78f89e8737dac81837fc8f5521ac162abcba902e1a3db949d55346d1da" +"checksum mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" +"checksum magenta 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf0336886480e671965f794bc9b6fce88503563013d1bfb7a502c81fe3ac527" +"checksum magenta-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40d014c7011ac470ae28e2f76a02bfea4a8480f73e701353b49ad7a8d75f4699" +"checksum markup5ever 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ff834ac7123c6a37826747e5ca09db41fd7a83126792021c2e636ad174bb77d3" "checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376" -"checksum mdbook 0.0.22 (registry+https://github.com/rust-lang/crates.io-index)" = "22911d86cde6f80fa9f0fb2a68bbbde85d97af4fe0ce267141c83a4187d28700" +"checksum mdbook 0.0.26 (registry+https://github.com/rust-lang/crates.io-index)" = "8a1ac668292d1e5c7b1c6fd64f70d3a85105b8069a89558a0d67bdb2ff298ca1" "checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" "checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4" -"checksum miniz-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "28eaee17666671fa872e567547e8428e83308ebe5808cdf6a0e28397dbe2c726" +"checksum miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "609ce024854aeb19a0ef7567d348aaa5a746b32fb72e336df7fcc16869d7e2b4" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09" "checksum num 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "a311b77ebdc5dd4cf6449d81e4135d9f0e3b153839ac90e648a8ef538f923525" @@ -2259,46 +2591,59 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum num-rational 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "288629c76fac4b33556f4b7ab57ba21ae202da65ba8b77466e6d598e31990790" "checksum num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "99843c856d68d8b4313b03a17e33c4bb42ae8f6610ea81b28abe076ac721b9b0" "checksum num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aec53c34f2d0247c5ca5d32cca1478762f301740468ee9ee6dcb7a0dd7a0c584" -"checksum open 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3478ed1686bd1300c8a981a940abc92b06fac9cbef747f4c668d4e032ff7b842" -"checksum openssl 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)" = "085aaedcc89a2fac1eb2bc19cd66f29d4ea99fec60f82a5f3a88a6be7dbd90b5" +"checksum open 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c281318d992e4432cfa799969467003d05921582a7489a8325e37f8a450d5113" +"checksum openssl 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)" = "2225c305d8f57001a0d34263e046794aa251695f20773102fbbfeb1e7b189955" "checksum openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d98df0270d404ccd3c050a41d579c52d1db15375168bb3471e04ec0f5f378daf" -"checksum openssl-sys 0.9.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7e3a9845a4c9fdb321931868aae5549e96bb7b979bf9af7de03603d74691b5f3" +"checksum openssl-sys 0.9.21 (registry+https://github.com/rust-lang/crates.io-index)" = "92867746af30eea7a89feade385f7f5366776f1c52ec6f0de81360373fa88363" "checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37" "checksum percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de154f638187706bde41d9b4738748933d64e6b37bdbffc0b47a97d16a6ae356" "checksum pest 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0a6dda33d67c26f0aac90d324ab2eb7239c819fc7b2552fe9faa4fe88441edc8" +"checksum phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "cb325642290f28ee14d8c6201159949a872f220c62af6e110a56ea914fbe42fc" +"checksum phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d62594c0bb54c464f633175d502038177e90309daf2e0158be42ed5f023ce88f" +"checksum phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "6b07ffcc532ccc85e3afc45865469bf5d9e4ef5bfcf9622e3cfe80c2d275ec03" +"checksum phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "07e24b0ca9643bdecd0632f2b3da6b1b89bbb0030e0b992afc1113b23a7bc2f2" "checksum pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903" +"checksum precomputed-hash 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf1fc3616b3ef726a847f2cd2388c646ef6a1f1ba4835c2629004da48184150" +"checksum procedural-masquerade 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c93cdc1fb30af9ddf3debc4afbdb0f35126cbd99daa229dd76cdd5349b41d989" "checksum psapi-sys 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "abcd5d1a07d360e29727f757a9decb3ce8bc6e0efa8969cfaad669a8317a2478" "checksum pulldown-cmark 0.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9ab1e588ef8efd702c7ed9d2bd774db5e6f4d878bb5a1a9f371828fbdff6973" -"checksum quick-error 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c36987d4978eb1be2e422b1e0423a557923a5c3e7e6f31d5699e9aafaefa469" +"checksum pulldown-cmark 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a656fdb8b6848f896df5e478a0eb9083681663e37dcb77dd16981ff65329fe8b" +"checksum quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eda5fe9b71976e62bc81b781206aaa076401769b2143379d3eb2118388babac4" "checksum quote 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4c5cf478fe1006dbcc72567121d23dbdae5f1632386068c5c86ff4f645628504" "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" "checksum racer 2.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "f120c7510ef7aff254aeb06067fb6fac573ec96a1660e194787cf9dced412bf0" -"checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d" +"checksum rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "eb250fd207a4729c976794d03db689c9be1d634ab5a1c9da9492a13d8fecbcdf" +"checksum redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)" = "8dde11f18c108289bef24469638a04dce49da56084f2d50618b226e47eb04509" "checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f" "checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" "checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" -"checksum rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d2cb40c0371765897ae428b5706bb17135705ad4f6d1b8b6afbaabcf8c9b5cff" +"checksum rls-analysis 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fa390bdc70b0a90d07d9cd5c6989ba5fca2d59728903919ebda1a1b2037b18d7" "checksum rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11d339f1888e33e74d8032de0f83c40b2bdaaaf04a8cfc03b32186c3481fb534" -"checksum rls-rustc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5fa757c9d547d460427ceff01875f9cac5f5acd8fc6543946e9b0335ba29d537" +"checksum rls-rustc 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b21ea952e9bf1569929abf1bb920262cde04b7b1b26d8e0260286302807299d2" "checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a" "checksum rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd34691a510938bb67fe0444fb363103c73ffb31c121d1e16bc92d8945ea8ff" "checksum rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "aee45432acc62f7b9a108cc054142dac51f979e69e71ddce7d6fc7adf29e817e" "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" -"checksum rustfmt-nightly 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6eea0d0590ae793fc4d281df56e01dc7531575c8ed9a72fadf5fdc7305a0d32f" "checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7" "checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d" "checksum scopeguard 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "59a076157c1e2dc561d8de585151ee6965d910dd4dcb5dabb7ae3e83981a6c57" -"checksum semver 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3fdd61b85a0fa777f7fb7c454b9189b2941b110d1385ce84d7f76efdf1606a85" +"checksum selectors 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e3c89b1c6a3c029c82263f7dd2d44d0005ee7374eb09e254ab59dede4353a8c0" +"checksum semver 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bee2bc909ab2d8d60dab26e8cad85b25d795b14603a0dcb627b78b9d30b6454b" "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)" = "f7726f29ddf9731b17ff113c461e362c381d9d69433f79de4f3dd572488823e9" -"checksum serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)" = "cf823e706be268e73e7747b147aa31c8f633ab4ba31f115efb57e5047c3a76dd" -"checksum serde_derive_internals 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "37aee4e0da52d801acfbc0cc219eb1eda7142112339726e427926a6f6ee65d3a" -"checksum serde_ignored 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c10e798e4405d7dcec3658989e35ee6706f730a9ed7c1184d5ebd84317e82f46" -"checksum serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "48b04779552e92037212c3615370f6bd57a40ebba7f20e554ff9f55e41a69a7b" +"checksum serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)" = "6a7046c9d4c6c522d10b2d098f9bebe2bef227e0e74044d8c1bfcf6b476af799" +"checksum serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)" = "1afcaae083fd1c46952a315062326bc9957f182358eb7da03b57ef1c688f7aa9" +"checksum serde_derive_internals 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bd381f6d01a6616cdba8530492d453b7761b456ba974e98768a18cad2cd76f58" +"checksum serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "190e9765dcedb56be63b6e0993a006c7e3b071a016a304736e4a315dc01fb142" +"checksum serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d243424e06f9f9c39e3cd36147470fd340db785825e367625f79298a6ac6b7ac" "checksum shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "dd5cc96481d54583947bfe88bf30c23d53f883c6cd0145368b69989d97b84ef8" -"checksum socket2 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4daf80fcf54186fac4fe049e0b39d36a5cfde69a11a06413e61e77f553cccf9a" +"checksum siphasher 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0df90a788073e8d0235a67e50441d47db7c8ad9debd91cbf43736a2a92d36537" +"checksum smallvec 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4f8266519bc1d17d0b5b16f6c21295625d562841c708f6376f49028a43e9c11e" +"checksum socket2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9e76b159741052c7deaa9fd0b5ca6b5f79cecf525ed665abfe5002086c6b2791" "checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b" +"checksum string_cache 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "413fc7852aeeb5472f1986ef755f561ddf0c789d3d796e65f0b6fe293ecd4ef8" +"checksum string_cache_codegen 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "479cde50c3539481f33906a387f2bd17c8e87cb848c35b6021d41fb81ff9b4d7" +"checksum string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1884d1bc09741d466d9b14e6d37ac89d6909cbcac41dd9ae982d4d063bbedfc" "checksum strings 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "da75d8bf2c4d210d63dd09581a041b036001f9f6e03d9b151dbff810fb7ba26a" "checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" "checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" @@ -2309,10 +2654,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum syntex_syntax 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)" = "76a302e717e348aa372ff577791c3832395650073b8d8432f8b3cb170b34afde" "checksum tar 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "281285b717926caa919ad905ef89c63d75805c7d89437fb873100925a53f2b1b" "checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6" +"checksum tendril 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1b72f8e2f5b73b65c315b1a70c730f24b9d7a25f39e98de8acbe2bb795caea" "checksum term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "fa63644f74ce96fbeb9b794f66aff2a52d601cbd5e80f4b97123e3899f4570f1" "checksum term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2b6b55df3198cc93372e85dd2ed817f0e38ce8cc0f22eb32391bfad9c4bf209" -"checksum termcolor 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9a5193a56b8d82014662c4b933dea6bec851daf018a2b01722e007daaf5f9dca" -"checksum textwrap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f728584ea33b0ad19318e20557cb0a39097751dbb07171419673502f848c7af6" +"checksum termcolor 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9065bced9c3e43453aa3d56f1e98590b8455b341d2fa191a1090c0dd0b242c75" +"checksum textwrap 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df8e08afc40ae3459e4838f303e465aa50d823df8d7f83ca88108f6d3afe7edd" "checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03" "checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5" "checksum thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1697c4b57aeeb7a536b647165a2825faddffb1d3bad386d507709bd51a90bb14" @@ -2325,10 +2671,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f" "checksum unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "36dff09cafb4ec7c8cf0023eb0b686cb6ce65499116a12201c9e11840ca01beb" "checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" +"checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91" "checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" "checksum url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eeb819346883532a271eb626deb43c4a1bb4c4dd47c519bd78137c3e72a4fe27" "checksum url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "74e7d099f1ee52f823d4bdd60c93c3602043c728f5db3b97bdb548467f7bddea" "checksum userenv-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "71d28ea36bbd9192d75bd9fa9b39f96ddb986eaee824adae5d53b6e51919b2f3" +"checksum utf-8 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6f923c601c7ac48ef1d66f7d5b5b2d9a7ba9c51333ab75a3ddf8d0309185a56" "checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f" "checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122" "checksum vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9e0a7d8bed3178a8fb112199d466eeca9ed09a14ba8ad67718179b4fd5487d0b" diff --git a/src/Cargo.toml b/src/Cargo.toml index 8754d5b2b6..f4b4189e01 100644 --- a/src/Cargo.toml +++ b/src/Cargo.toml @@ -18,6 +18,7 @@ members = [ "tools/cargo", "tools/rustdoc", "tools/rls", + "tools/rustfmt", # FIXME(https://github.com/rust-lang/cargo/issues/4089): move these to exclude "tools/rls/test_data/borrow_error", "tools/rls/test_data/completion", @@ -37,6 +38,8 @@ members = [ "tools/rls/test_data/infer_custom_bin", "tools/rls/test_data/infer_lib", "tools/rls/test_data/omit_init_build", + "tools/rls/test_data/unicødë", + "tools/rls/test_data/workspace_symbol", ] # Curiously, compiletest will segfault if compiled with opt-level=3 on 64-bit @@ -55,3 +58,9 @@ debug-assertions = false [profile.test] debug = false debug-assertions = false + +[patch."https://github.com/rust-lang/cargo"] +cargo = { path = "tools/cargo" } + +[patch.crates-io] +rustfmt-nightly = { path = "tools/rustfmt" } diff --git a/src/bootstrap/Cargo.toml b/src/bootstrap/Cargo.toml index daa2a3d0a0..3f1d03b187 100644 --- a/src/bootstrap/Cargo.toml +++ b/src/bootstrap/Cargo.toml @@ -34,7 +34,7 @@ cmake = "0.1.23" filetime = "0.1" num_cpus = "1.0" getopts = "0.2" -gcc = "0.3.50" +cc = "1.0" libc = "0.2" serde = "1.0.8" serde_derive = "1.0.8" diff --git a/src/bootstrap/README.md b/src/bootstrap/README.md index 2e844ceb17..e543b8c070 100644 --- a/src/bootstrap/README.md +++ b/src/bootstrap/README.md @@ -76,10 +76,9 @@ The script accepts commands, flags, and arguments to determine what to do: There are currently two methods for configuring the rustbuild build system. First, rustbuild offers a TOML-based configuration system with a `config.toml` -file in the same location as `config.mk`. An example of this configuration can -be found at `config.toml.example`, and the configuration file can also be passed -as `--config path/to/config.toml` if the build system is being invoked manually -(via the python script). +file. An example of this configuration can be found at `config.toml.example`, +and the configuration file can also be passed as `--config path/to/config.toml` +if the build system is being invoked manually (via the python script). Next, the `./configure` options serialized in `config.mk` will be parsed and read. That is, if any `./configure` options are passed, they'll be diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index 0baca9e58f..848b10d312 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -246,6 +246,12 @@ fn main() { } } + // When running miri tests, we need to generate MIR for all libraries + if env::var("TEST_MIRI").ok().map_or(false, |val| val == "true") { + cmd.arg("-Zalways-encode-mir"); + cmd.arg("-Zmir-emit-validate=1"); + } + // Force all crates compiled by this compiler to (a) be unstable and (b) // allow the `rustc_private` feature to link to other unstable crates // also in the sysroot. diff --git a/src/bootstrap/bin/sccache-plus-cl.rs b/src/bootstrap/bin/sccache-plus-cl.rs index cf0c127492..8584014d48 100644 --- a/src/bootstrap/bin/sccache-plus-cl.rs +++ b/src/bootstrap/bin/sccache-plus-cl.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -extern crate gcc; +extern crate cc; use std::env; use std::process::{self, Command}; @@ -18,12 +18,13 @@ fn main() { // Locate the actual compiler that we're invoking env::remove_var("CC"); env::remove_var("CXX"); - let mut cfg = gcc::Config::new(); + let mut cfg = cc::Build::new(); cfg.cargo_metadata(false) .out_dir("/") .target(&target) .host(&target) .opt_level(0) + .warnings(false) .debug(false); let compiler = cfg.get_compiler(); diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 9369a55ccb..4a8c3dcebc 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -167,6 +167,141 @@ def format_build_time(duration): return str(datetime.timedelta(seconds=int(duration))) +def default_build_triple(): + """Build triple as in LLVM""" + default_encoding = sys.getdefaultencoding() + try: + ostype = subprocess.check_output( + ['uname', '-s']).strip().decode(default_encoding) + cputype = subprocess.check_output( + ['uname', '-m']).strip().decode(default_encoding) + except (subprocess.CalledProcessError, OSError): + if sys.platform == 'win32': + return 'x86_64-pc-windows-msvc' + err = "uname not found" + sys.exit(err) + + # The goal here is to come up with the same triple as LLVM would, + # at least for the subset of platforms we're willing to target. + ostype_mapper = { + 'Bitrig': 'unknown-bitrig', + 'Darwin': 'apple-darwin', + 'DragonFly': 'unknown-dragonfly', + 'FreeBSD': 'unknown-freebsd', + 'Haiku': 'unknown-haiku', + 'NetBSD': 'unknown-netbsd', + 'OpenBSD': 'unknown-openbsd' + } + + # Consider the direct transformation first and then the special cases + if ostype in ostype_mapper: + ostype = ostype_mapper[ostype] + elif ostype == 'Linux': + os_from_sp = subprocess.check_output( + ['uname', '-o']).strip().decode(default_encoding) + if os_from_sp == 'Android': + ostype = 'linux-android' + else: + ostype = 'unknown-linux-gnu' + elif ostype == 'SunOS': + ostype = 'sun-solaris' + # On Solaris, uname -m will return a machine classification instead + # of a cpu type, so uname -p is recommended instead. However, the + # output from that option is too generic for our purposes (it will + # always emit 'i386' on x86/amd64 systems). As such, isainfo -k + # must be used instead. + try: + cputype = subprocess.check_output( + ['isainfo', '-k']).strip().decode(default_encoding) + except (subprocess.CalledProcessError, OSError): + err = "isainfo not found" + sys.exit(err) + elif ostype.startswith('MINGW'): + # msys' `uname` does not print gcc configuration, but prints msys + # configuration. so we cannot believe `uname -m`: + # msys1 is always i686 and msys2 is always x86_64. + # instead, msys defines $MSYSTEM which is MINGW32 on i686 and + # MINGW64 on x86_64. + ostype = 'pc-windows-gnu' + cputype = 'i686' + if os.environ.get('MSYSTEM') == 'MINGW64': + cputype = 'x86_64' + elif ostype.startswith('MSYS'): + ostype = 'pc-windows-gnu' + elif ostype.startswith('CYGWIN_NT'): + cputype = 'i686' + if ostype.endswith('WOW64'): + cputype = 'x86_64' + ostype = 'pc-windows-gnu' + else: + err = "unknown OS type: {}".format(ostype) + sys.exit(err) + + cputype_mapper = { + 'BePC': 'i686', + 'aarch64': 'aarch64', + 'amd64': 'x86_64', + 'arm64': 'aarch64', + 'i386': 'i686', + 'i486': 'i686', + 'i686': 'i686', + 'i786': 'i686', + 'powerpc': 'powerpc', + 'powerpc64': 'powerpc64', + 'powerpc64le': 'powerpc64le', + 'ppc': 'powerpc', + 'ppc64': 'powerpc64', + 'ppc64le': 'powerpc64le', + 's390x': 's390x', + 'x64': 'x86_64', + 'x86': 'i686', + 'x86-64': 'x86_64', + 'x86_64': 'x86_64' + } + + # Consider the direct transformation first and then the special cases + if cputype in cputype_mapper: + cputype = cputype_mapper[cputype] + elif cputype in {'xscale', 'arm'}: + cputype = 'arm' + if ostype == 'linux-android': + ostype = 'linux-androideabi' + elif cputype == 'armv6l': + cputype = 'arm' + if ostype == 'linux-android': + ostype = 'linux-androideabi' + else: + ostype += 'eabihf' + elif cputype in {'armv7l', 'armv8l'}: + cputype = 'armv7' + if ostype == 'linux-android': + ostype = 'linux-androideabi' + else: + ostype += 'eabihf' + elif cputype == 'mips': + if sys.byteorder == 'big': + cputype = 'mips' + elif sys.byteorder == 'little': + cputype = 'mipsel' + else: + raise ValueError("unknown byteorder: {}".format(sys.byteorder)) + elif cputype == 'mips64': + if sys.byteorder == 'big': + cputype = 'mips64' + elif sys.byteorder == 'little': + cputype = 'mips64el' + else: + raise ValueError('unknown byteorder: {}'.format(sys.byteorder)) + # only the n64 ABI is supported, indicate it + ostype += 'abi64' + elif cputype == 'sparcv9': + pass + else: + err = "unknown cpu type: {}".format(cputype) + sys.exit(err) + + return "{}-{}".format(cputype, ostype) + class RustBuild(object): """Provide all the methods required to build Rust""" def __init__(self): @@ -177,7 +312,6 @@ class RustBuild(object): self.build = '' self.build_dir = os.path.join(os.getcwd(), "build") self.clean = False - self.config_mk = '' self.config_toml = '' self.printed = False self.rust_root = os.path.abspath(os.path.join(__file__, '../../..')) @@ -374,26 +508,6 @@ class RustBuild(object): return self.get_string(value) or value.strip() return None - def get_mk(self, key): - """Returns the value of the given key in config.mk, otherwise returns None - - >>> rb = RustBuild() - >>> rb.config_mk = 'key := value\\n' - >>> rb.get_mk('key') - 'value' - - If the key does not exists, the result is None: - - >>> rb.get_mk('does_not_exists') == None - True - """ - for line in iter(self.config_mk.splitlines()): - if line.startswith(key + ' '): - var = line[line.find(':=') + 2:].strip() - if var != '': - return var - return None - def cargo(self): """Return config path for cargo""" return self.program_config('cargo') @@ -407,15 +521,9 @@ class RustBuild(object): >>> rb = RustBuild() >>> rb.config_toml = 'rustc = "rustc"\\n' - >>> rb.config_mk = 'CFG_LOCAL_RUST_ROOT := /tmp/rust\\n' >>> rb.program_config('rustc') 'rustc' - >>> cargo_path = rb.program_config('cargo') - >>> cargo_path.rstrip(".exe") == os.path.join("/tmp/rust", - ... "bin", "cargo") - True >>> rb.config_toml = '' - >>> rb.config_mk = '' >>> cargo_path = rb.program_config('cargo') >>> cargo_path.rstrip(".exe") == os.path.join(rb.bin_root(), ... "bin", "cargo") @@ -424,10 +532,6 @@ class RustBuild(object): config = self.get_toml(program) if config: return config - config = self.get_mk('CFG_LOCAL_RUST_ROOT') - if config: - return os.path.join(config, "bin", "{}{}".format( - program, self.exe_suffix())) return os.path.join(self.bin_root(), "bin", "{}{}".format( program, self.exe_suffix())) @@ -439,10 +543,14 @@ class RustBuild(object): 'devel' """ start = line.find('"') - if start == -1: - return None - end = start + 1 + line[start + 1:].find('"') - return line[start + 1:end] + if start != -1: + end = start + 1 + line[start + 1:].find('"') + return line[start + 1:end] + start = line.find('\'') + if start != -1: + end = start + 1 + line[start + 1:].find('\'') + return line[start + 1:end] + return None @staticmethod def exe_suffix(): @@ -517,162 +625,19 @@ class RustBuild(object): def build_triple(self): """Build triple as in LLVM""" - default_encoding = sys.getdefaultencoding() config = self.get_toml('build') if config: return config - config = self.get_mk('CFG_BUILD') - if config: - return config - try: - ostype = subprocess.check_output( - ['uname', '-s']).strip().decode(default_encoding) - cputype = subprocess.check_output( - ['uname', '-m']).strip().decode(default_encoding) - except (subprocess.CalledProcessError, OSError): - if sys.platform == 'win32': - return 'x86_64-pc-windows-msvc' - err = "uname not found" - if self.verbose: - raise Exception(err) - sys.exit(err) - - # The goal here is to come up with the same triple as LLVM would, - # at least for the subset of platforms we're willing to target. - ostype_mapper = { - 'Bitrig': 'unknown-bitrig', - 'Darwin': 'apple-darwin', - 'DragonFly': 'unknown-dragonfly', - 'FreeBSD': 'unknown-freebsd', - 'Haiku': 'unknown-haiku', - 'NetBSD': 'unknown-netbsd', - 'OpenBSD': 'unknown-openbsd' - } - - # Consider the direct transformation first and then the special cases - if ostype in ostype_mapper: - ostype = ostype_mapper[ostype] - elif ostype == 'Linux': - os_from_sp = subprocess.check_output( - ['uname', '-o']).strip().decode(default_encoding) - if os_from_sp == 'Android': - ostype = 'linux-android' - else: - ostype = 'unknown-linux-gnu' - elif ostype == 'SunOS': - ostype = 'sun-solaris' - # On Solaris, uname -m will return a machine classification instead - # of a cpu type, so uname -p is recommended instead. However, the - # output from that option is too generic for our purposes (it will - # always emit 'i386' on x86/amd64 systems). As such, isainfo -k - # must be used instead. - try: - cputype = subprocess.check_output( - ['isainfo', '-k']).strip().decode(default_encoding) - except (subprocess.CalledProcessError, OSError): - err = "isainfo not found" - if self.verbose: - raise Exception(err) - sys.exit(err) - elif ostype.startswith('MINGW'): - # msys' `uname` does not print gcc configuration, but prints msys - # configuration. so we cannot believe `uname -m`: - # msys1 is always i686 and msys2 is always x86_64. - # instead, msys defines $MSYSTEM which is MINGW32 on i686 and - # MINGW64 on x86_64. - ostype = 'pc-windows-gnu' - cputype = 'i686' - if os.environ.get('MSYSTEM') == 'MINGW64': - cputype = 'x86_64' - elif ostype.startswith('MSYS'): - ostype = 'pc-windows-gnu' - elif ostype.startswith('CYGWIN_NT'): - cputype = 'i686' - if ostype.endswith('WOW64'): - cputype = 'x86_64' - ostype = 'pc-windows-gnu' - else: - err = "unknown OS type: {}".format(ostype) - if self.verbose: - raise ValueError(err) - sys.exit(err) - - cputype_mapper = { - 'BePC': 'i686', - 'aarch64': 'aarch64', - 'amd64': 'x86_64', - 'arm64': 'aarch64', - 'i386': 'i686', - 'i486': 'i686', - 'i686': 'i686', - 'i786': 'i686', - 'powerpc': 'powerpc', - 'powerpc64': 'powerpc64', - 'powerpc64le': 'powerpc64le', - 'ppc': 'powerpc', - 'ppc64': 'powerpc64', - 'ppc64le': 'powerpc64le', - 's390x': 's390x', - 'x64': 'x86_64', - 'x86': 'i686', - 'x86-64': 'x86_64', - 'x86_64': 'x86_64' - } - - # Consider the direct transformation first and then the special cases - if cputype in cputype_mapper: - cputype = cputype_mapper[cputype] - elif cputype in {'xscale', 'arm'}: - cputype = 'arm' - if ostype == 'linux-android': - ostype = 'linux-androideabi' - elif cputype == 'armv6l': - cputype = 'arm' - if ostype == 'linux-android': - ostype = 'linux-androideabi' - else: - ostype += 'eabihf' - elif cputype in {'armv7l', 'armv8l'}: - cputype = 'armv7' - if ostype == 'linux-android': - ostype = 'linux-androideabi' - else: - ostype += 'eabihf' - elif cputype == 'mips': - if sys.byteorder == 'big': - cputype = 'mips' - elif sys.byteorder == 'little': - cputype = 'mipsel' - else: - raise ValueError("unknown byteorder: {}".format(sys.byteorder)) - elif cputype == 'mips64': - if sys.byteorder == 'big': - cputype = 'mips64' - elif sys.byteorder == 'little': - cputype = 'mips64el' - else: - raise ValueError('unknown byteorder: {}'.format(sys.byteorder)) - # only the n64 ABI is supported, indicate it - ostype += 'abi64' - elif cputype == 'sparcv9': - pass - else: - err = "unknown cpu type: {}".format(cputype) - if self.verbose: - raise ValueError(err) - sys.exit(err) - - return "{}-{}".format(cputype, ostype) + return default_build_triple() def update_submodules(self): """Update submodules""" if (not os.path.exists(os.path.join(self.rust_root, ".git"))) or \ - self.get_toml('submodules') == "false" or \ - self.get_mk('CFG_DISABLE_MANAGE_SUBMODULES') == "1": + self.get_toml('submodules') == "false": return print('Updating submodules') default_encoding = sys.getdefaultencoding() - run(["git", "submodule", "-q", "sync"], cwd=self.rust_root) + run(["git", "submodule", "-q", "sync"], cwd=self.rust_root, verbose=self.verbose) submodules = [s.split(' ', 1)[1] for s in subprocess.check_output( ["git", "config", "--file", os.path.join(self.rust_root, ".gitmodules"), @@ -680,11 +645,9 @@ class RustBuild(object): ).decode(default_encoding).splitlines()] submodules = [module for module in submodules if not ((module.endswith("llvm") and - (self.get_toml('llvm-config') or - self.get_mk('CFG_LLVM_ROOT'))) or + self.get_toml('llvm-config')) or (module.endswith("jemalloc") and - (self.get_toml('jemalloc') or - self.get_mk('CFG_JEMALLOC_ROOT'))))] + self.get_toml('jemalloc')))] run(["git", "submodule", "update", "--init", "--recursive"] + submodules, cwd=self.rust_root, verbose=self.verbose) @@ -719,11 +682,7 @@ def bootstrap(): try: with open(args.config or 'config.toml') as config: build.config_toml = config.read() - except: - pass - try: - build.config_mk = open('config.mk').read() - except: + except (OSError, IOError): pass if '\nverbose = 2' in build.config_toml: @@ -731,11 +690,9 @@ def bootstrap(): elif '\nverbose = 1' in build.config_toml: build.verbose = 1 - build.use_vendored_sources = '\nvendor = true' in build.config_toml or \ - 'CFG_ENABLE_VENDOR' in build.config_mk + build.use_vendored_sources = '\nvendor = true' in build.config_toml - build.use_locked_deps = '\nlocked-deps = true' in build.config_toml or \ - 'CFG_ENABLE_LOCKED_DEPS' in build.config_mk + build.use_locked_deps = '\nlocked-deps = true' in build.config_toml if 'SUDO_USER' in os.environ and not build.use_vendored_sources: if os.environ.get('USER') != os.environ['SUDO_USER']: diff --git a/src/bootstrap/bootstrap_test.py b/src/bootstrap/bootstrap_test.py index a65a3a4042..32ea4b4abe 100644 --- a/src/bootstrap/bootstrap_test.py +++ b/src/bootstrap/bootstrap_test.py @@ -15,6 +15,7 @@ import doctest import unittest import tempfile import hashlib +import sys from shutil import rmtree @@ -110,5 +111,6 @@ if __name__ == '__main__': TEST_LOADER.loadTestsFromTestCase(VerifyTestCase), TEST_LOADER.loadTestsFromTestCase(ProgramOutOfDate)]) - RUNNER = unittest.TextTestRunner(verbosity=2) - RUNNER.run(SUITE) + RUNNER = unittest.TextTestRunner(stream=sys.stdout, verbosity=2) + result = RUNNER.run(SUITE) + sys.exit(0 if result.wasSuccessful() else 1) diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index 298f6a004a..e7a5196178 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -8,15 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::any::Any; +use std::cell::RefCell; +use std::collections::BTreeSet; +use std::env; use std::fmt::Debug; +use std::fs; use std::hash::Hash; -use std::cell::RefCell; +use std::ops::Deref; use std::path::{Path, PathBuf}; use std::process::Command; -use std::fs; -use std::ops::Deref; -use std::any::Any; -use std::collections::BTreeSet; use compile; use install; @@ -248,19 +249,19 @@ impl<'a> Builder<'a> { compile::StartupObjects, tool::BuildManifest, tool::Rustbook, tool::ErrorIndex, tool::UnstableBookGen, tool::Tidy, tool::Linkchecker, tool::CargoTest, tool::Compiletest, tool::RemoteTestServer, tool::RemoteTestClient, - tool::RustInstaller, tool::Cargo, tool::Rls, tool::Rustdoc, - native::Llvm), + tool::RustInstaller, tool::Cargo, tool::Rls, tool::Rustdoc, tool::Clippy, + native::Llvm, tool::Rustfmt, tool::Miri), Kind::Test => describe!(check::Tidy, check::Bootstrap, check::DefaultCompiletest, - check::HostCompiletest, check::Crate, check::CrateLibrustc, check::Linkcheck, - check::Cargotest, check::Cargo, check::Rls, check::Docs, check::ErrorIndex, - check::Distcheck), + check::HostCompiletest, check::Crate, check::CrateLibrustc, check::Rustdoc, + check::Linkcheck, check::Cargotest, check::Cargo, check::Rls, check::Docs, + check::ErrorIndex, check::Distcheck, check::Rustfmt, check::Miri, check::Clippy), Kind::Bench => describe!(check::Crate, check::CrateLibrustc), Kind::Doc => describe!(doc::UnstableBook, doc::UnstableBookGen, doc::TheBook, doc::Standalone, doc::Std, doc::Test, doc::Rustc, doc::ErrorIndex, doc::Nomicon, doc::Reference, doc::Rustdoc, doc::CargoBook), Kind::Dist => describe!(dist::Docs, dist::Mingw, dist::Rustc, dist::DebuggerScripts, dist::Std, dist::Analysis, dist::Src, dist::PlainSourceTarball, dist::Cargo, - dist::Rls, dist::Extended, dist::HashSign), + dist::Rls, dist::Extended, dist::HashSign, dist::DontDistWithMiriEnabled), Kind::Install => describe!(install::Docs, install::Std, install::Cargo, install::Rls, install::Analysis, install::Src, install::Rustc), } @@ -305,7 +306,7 @@ impl<'a> Builder<'a> { Subcommand::Bench { ref paths, .. } => (Kind::Bench, &paths[..]), Subcommand::Dist { ref paths } => (Kind::Dist, &paths[..]), Subcommand::Install { ref paths } => (Kind::Install, &paths[..]), - Subcommand::Clean => panic!(), + Subcommand::Clean { .. } => panic!(), }; let builder = Builder { @@ -437,9 +438,14 @@ impl<'a> Builder<'a> { let out_dir = self.stage_out(compiler, mode); cargo.env("CARGO_TARGET_DIR", out_dir) .arg(cmd) - .arg("-j").arg(self.jobs().to_string()) .arg("--target").arg(target); + // If we were invoked from `make` then that's already got a jobserver + // set up for us so no need to tell Cargo about jobs all over again. + if env::var_os("MAKEFLAGS").is_none() && env::var_os("MFLAGS").is_none() { + cargo.arg("-j").arg(self.jobs().to_string()); + } + // FIXME: Temporary fix for https://github.com/rust-lang/cargo/issues/3005 // Force cargo to output binaries with disambiguating hashes in the name cargo.env("__CARGO_DEFAULT_LIB_METADATA", &self.config.channel); @@ -475,6 +481,7 @@ impl<'a> Builder<'a> { } else { PathBuf::from("/path/to/nowhere/rustdoc/not/required") }) + .env("TEST_MIRI", self.config.test_miri.to_string()) .env("RUSTC_FLAGS", self.rustc_flags(target).join(" ")); if mode != Mode::Tool { @@ -524,7 +531,10 @@ impl<'a> Builder<'a> { // For other crates, however, we know that we've already got a standard // library up and running, so we can use the normal compiler to compile // build scripts in that situation. - if mode == Mode::Libstd { + // + // If LLVM support is disabled we need to use the snapshot compiler to compile + // build scripts, as the new compiler doesnt support executables. + if mode == Mode::Libstd || !self.build.config.llvm_enabled { cargo.env("RUSTC_SNAPSHOT", &self.initial_rustc) .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_snapshot_libdir()); } else { diff --git a/src/bootstrap/cc.rs b/src/bootstrap/cc_detect.rs similarity index 79% rename from src/bootstrap/cc.rs rename to src/bootstrap/cc_detect.rs index 0f25da8a23..76bb7ccbc1 100644 --- a/src/bootstrap/cc.rs +++ b/src/bootstrap/cc_detect.rs @@ -23,7 +23,7 @@ //! 6. "cc" //! //! Some of this logic is implemented here, but much of it is farmed out to the -//! `gcc` crate itself, so we end up having the same fallbacks as there. +//! `cc` crate itself, so we end up having the same fallbacks as there. //! Similar logic is then used to find a C++ compiler, just some s/cc/c++/ is //! used. //! @@ -35,7 +35,7 @@ use std::process::Command; use std::iter; use build_helper::{cc2ar, output}; -use gcc; +use cc; use Build; use config::Target; @@ -45,15 +45,15 @@ pub fn find(build: &mut Build) { // For all targets we're going to need a C compiler for building some shims // and such as well as for being a linker for Rust code. for target in build.targets.iter().chain(&build.hosts).cloned().chain(iter::once(build.build)) { - let mut cfg = gcc::Config::new(); - cfg.cargo_metadata(false).opt_level(0).debug(false) + let mut cfg = cc::Build::new(); + cfg.cargo_metadata(false).opt_level(0).warnings(false).debug(false) .target(&target).host(&build.build); let config = build.config.target_config.get(&target); if let Some(cc) = config.and_then(|c| c.cc.as_ref()) { cfg.compiler(cc); } else { - set_compiler(&mut cfg, "gcc", target, config, build); + set_compiler(&mut cfg, Language::C, target, config, build); } let compiler = cfg.get_compiler(); @@ -67,14 +67,14 @@ pub fn find(build: &mut Build) { // For all host triples we need to find a C++ compiler as well for host in build.hosts.iter().cloned().chain(iter::once(build.build)) { - let mut cfg = gcc::Config::new(); - cfg.cargo_metadata(false).opt_level(0).debug(false).cpp(true) + let mut cfg = cc::Build::new(); + cfg.cargo_metadata(false).opt_level(0).warnings(false).debug(false).cpp(true) .target(&host).host(&build.build); let config = build.config.target_config.get(&host); if let Some(cxx) = config.and_then(|c| c.cxx.as_ref()) { cfg.compiler(cxx); } else { - set_compiler(&mut cfg, "g++", host, config, build); + set_compiler(&mut cfg, Language::CPlusPlus, host, config, build); } let compiler = cfg.get_compiler(); build.verbose(&format!("CXX_{} = {:?}", host, compiler.path())); @@ -82,8 +82,8 @@ pub fn find(build: &mut Build) { } } -fn set_compiler(cfg: &mut gcc::Config, - gnu_compiler: &str, +fn set_compiler(cfg: &mut cc::Build, + compiler: Language, target: Interned, config: Option<&Target>, build: &Build) { @@ -94,7 +94,7 @@ fn set_compiler(cfg: &mut gcc::Config, t if t.contains("android") => { if let Some(ndk) = config.and_then(|c| c.ndk.as_ref()) { let target = target.replace("armv7", "arm"); - let compiler = format!("{}-{}", target, gnu_compiler); + let compiler = format!("{}-{}", target, compiler.clang()); cfg.compiler(ndk.join("bin").join(compiler)); } } @@ -103,6 +103,7 @@ fn set_compiler(cfg: &mut gcc::Config, // which is a gcc version from ports, if this is the case. t if t.contains("openbsd") => { let c = cfg.get_compiler(); + let gnu_compiler = compiler.gcc(); if !c.path().ends_with(gnu_compiler) { return } @@ -145,3 +146,29 @@ fn set_compiler(cfg: &mut gcc::Config, _ => {} } } + +/// The target programming language for a native compiler. +enum Language { + /// The compiler is targeting C. + C, + /// The compiler is targeting C++. + CPlusPlus, +} + +impl Language { + /// Obtains the name of a compiler in the GCC collection. + fn gcc(self) -> &'static str { + match self { + Language::C => "gcc", + Language::CPlusPlus => "g++", + } + } + + /// Obtains the name of a compiler in the clang suite. + fn clang(self) -> &'static str { + match self { + Language::C => "clang", + Language::CPlusPlus => "clang++", + } + } +} diff --git a/src/bootstrap/channel.rs b/src/bootstrap/channel.rs index b79c7de343..d9861867cf 100644 --- a/src/bootstrap/channel.rs +++ b/src/bootstrap/channel.rs @@ -24,12 +24,12 @@ use Build; use config::Config; // The version number -pub const CFG_RELEASE_NUM: &str = "1.21.0"; +pub const CFG_RELEASE_NUM: &str = "1.22.1"; // An optional number to put after the label, e.g. '.2' -> '-beta.2' // Be sure to make this starts with a dot to conform to semver pre-release // versions (section 9) -pub const CFG_PRERELEASE_VERSION: &str = ".4"; +pub const CFG_PRERELEASE_VERSION: &str = ".3"; pub struct GitInfo { inner: Option, diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index 92fb2105b7..bd9d29be96 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -23,7 +23,7 @@ use std::path::{PathBuf, Path}; use std::process::Command; use std::io::Read; -use build_helper::{self, output}; +use build_helper::{self, output, BuildExpectation}; use builder::{Kind, RunConfig, ShouldRun, Builder, Compiler, Step}; use cache::{INTERNER, Interned}; @@ -33,6 +33,7 @@ use native; use tool::{self, Tool}; use util::{self, dylib_path, dylib_path_var}; use {Build, Mode}; +use toolstate::ToolState; const ADB_TEST_DIR: &str = "/data/tmp/work"; @@ -64,22 +65,26 @@ impl fmt::Display for TestKind { } } -fn try_run(build: &Build, cmd: &mut Command) { +fn try_run_expecting(build: &Build, cmd: &mut Command, expect: BuildExpectation) { if !build.fail_fast { - if !build.try_run(cmd) { - let failures = build.delayed_failures.get(); - build.delayed_failures.set(failures + 1); + if !build.try_run(cmd, expect) { + let mut failures = build.delayed_failures.borrow_mut(); + failures.push(format!("{:?}", cmd)); } } else { - build.run(cmd); + build.run_expecting(cmd, expect); } } +fn try_run(build: &Build, cmd: &mut Command) { + try_run_expecting(build, cmd, BuildExpectation::None) +} + fn try_run_quiet(build: &Build, cmd: &mut Command) { if !build.fail_fast { if !build.try_run_quiet(cmd) { - let failures = build.delayed_failures.get(); - build.delayed_failures.set(failures + 1); + let mut failures = build.delayed_failures.borrow_mut(); + failures.push(format!("{:?}", cmd)); } } else { build.run_quiet(cmd); @@ -241,15 +246,162 @@ impl Step for Rls { let compiler = builder.compiler(stage, host); builder.ensure(tool::Rls { compiler, target: self.host }); + let mut cargo = tool::prepare_tool_cargo(builder, + compiler, + host, + "test", + "src/tools/rls"); + + // Don't build tests dynamically, just a pain to work with + cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); + + builder.add_rustc_lib_path(compiler, &mut cargo); + cargo.arg("--").args(&build.config.cmd.test_args()); + + try_run_expecting( + build, + &mut cargo, + builder.build.config.toolstate.rls.passes(ToolState::Testing), + ); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Rustfmt { + stage: u32, + host: Interned, +} + +impl Step for Rustfmt { + type Output = (); + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/rustfmt") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Rustfmt { + stage: run.builder.top_stage, + host: run.target, + }); + } + + /// Runs `cargo test` for rustfmt. + fn run(self, builder: &Builder) { + let build = builder.build; + let stage = self.stage; + let host = self.host; + let compiler = builder.compiler(stage, host); + + builder.ensure(tool::Rustfmt { compiler, target: self.host }); let mut cargo = builder.cargo(compiler, Mode::Tool, host, "test"); - cargo.arg("--manifest-path").arg(build.src.join("src/tools/rls/Cargo.toml")); + cargo.arg("--manifest-path").arg(build.src.join("src/tools/rustfmt/Cargo.toml")); // Don't build tests dynamically, just a pain to work with cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); builder.add_rustc_lib_path(compiler, &mut cargo); - try_run(build, &mut cargo); + try_run_expecting( + build, + &mut cargo, + builder.build.config.toolstate.rustfmt.passes(ToolState::Testing), + ); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Miri { + host: Interned, +} + +impl Step for Miri { + type Output = (); + const ONLY_HOSTS: bool = true; + const DEFAULT: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + let test_miri = run.builder.build.config.test_miri; + run.path("src/tools/miri").default_condition(test_miri) + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Miri { + host: run.target, + }); + } + + /// Runs `cargo test` for miri. + fn run(self, builder: &Builder) { + let build = builder.build; + let host = self.host; + let compiler = builder.compiler(1, host); + + let miri = builder.ensure(tool::Miri { compiler, target: self.host }); + let mut cargo = builder.cargo(compiler, Mode::Tool, host, "test"); + cargo.arg("--manifest-path").arg(build.src.join("src/tools/miri/Cargo.toml")); + + // Don't build tests dynamically, just a pain to work with + cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); + // miri tests need to know about the stage sysroot + cargo.env("MIRI_SYSROOT", builder.sysroot(compiler)); + cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler)); + cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler)); + cargo.env("MIRI_PATH", miri); + + builder.add_rustc_lib_path(compiler, &mut cargo); + + try_run_expecting( + build, + &mut cargo, + builder.build.config.toolstate.miri.passes(ToolState::Testing), + ); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Clippy { + host: Interned, +} + +impl Step for Clippy { + type Output = (); + const ONLY_HOSTS: bool = true; + const DEFAULT: bool = false; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/clippy") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Clippy { + host: run.target, + }); + } + + /// Runs `cargo test` for clippy. + fn run(self, builder: &Builder) { + let build = builder.build; + let host = self.host; + let compiler = builder.compiler(1, host); + + let _clippy = builder.ensure(tool::Clippy { compiler, target: self.host }); + let mut cargo = builder.cargo(compiler, Mode::Tool, host, "test"); + cargo.arg("--manifest-path").arg(build.src.join("src/tools/clippy/Cargo.toml")); + + // Don't build tests dynamically, just a pain to work with + cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); + // clippy tests need to know about the stage sysroot + cargo.env("SYSROOT", builder.sysroot(compiler)); + + builder.add_rustc_lib_path(compiler, &mut cargo); + + try_run_expecting( + build, + &mut cargo, + builder.build.config.toolstate.clippy.passes(ToolState::Testing), + ); } } @@ -900,7 +1052,6 @@ impl Step for CrateLibrustc { } } - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Crate { compiler: Compiler, @@ -1080,6 +1231,75 @@ impl Step for Crate { } } +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Rustdoc { + host: Interned, + test_kind: TestKind, +} + +impl Step for Rustdoc { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/librustdoc").path("src/tools/rustdoc") + } + + fn make_run(run: RunConfig) { + let builder = run.builder; + + let test_kind = if builder.kind == Kind::Test { + TestKind::Test + } else if builder.kind == Kind::Bench { + TestKind::Bench + } else { + panic!("unexpected builder.kind in crate: {:?}", builder.kind); + }; + + builder.ensure(Rustdoc { + host: run.host, + test_kind, + }); + } + + fn run(self, builder: &Builder) { + let build = builder.build; + let test_kind = self.test_kind; + + let compiler = builder.compiler(builder.top_stage, self.host); + let target = compiler.host; + + let mut cargo = tool::prepare_tool_cargo(builder, + compiler, + target, + test_kind.subcommand(), + "src/tools/rustdoc"); + let _folder = build.fold_output(|| { + format!("{}_stage{}-rustdoc", test_kind.subcommand(), compiler.stage) + }); + println!("{} rustdoc stage{} ({} -> {})", test_kind, compiler.stage, + &compiler.host, target); + + if test_kind.subcommand() == "test" && !build.fail_fast { + cargo.arg("--no-fail-fast"); + } + + cargo.arg("-p").arg("rustdoc:0.0.0"); + + cargo.arg("--"); + cargo.args(&build.config.cmd.test_args()); + + if build.config.quiet_tests { + cargo.arg("--quiet"); + } + + let _time = util::timeit(); + + try_run(build, &mut cargo); + } +} + fn envify(s: &str) -> String { s.chars().map(|c| { match c { diff --git a/src/bootstrap/clean.rs b/src/bootstrap/clean.rs index 119340a019..87f194fb7d 100644 --- a/src/bootstrap/clean.rs +++ b/src/bootstrap/clean.rs @@ -13,7 +13,7 @@ //! Responsible for cleaning out a build directory of all old and stale //! artifacts to prepare for a fresh build. Currently doesn't remove the //! `build/cache` directory (download cache) or the `build/$target/llvm` -//! directory as we want that cached between builds. +//! directory unless the --all flag is present. use std::fs; use std::io::{self, ErrorKind}; @@ -21,24 +21,29 @@ use std::path::Path; use Build; -pub fn clean(build: &Build) { +pub fn clean(build: &Build, all: bool) { rm_rf("tmp".as_ref()); - rm_rf(&build.out.join("tmp")); - rm_rf(&build.out.join("dist")); - for host in &build.hosts { - let entries = match build.out.join(host).read_dir() { - Ok(iter) => iter, - Err(_) => continue, - }; + if all { + rm_rf(&build.out); + } else { + rm_rf(&build.out.join("tmp")); + rm_rf(&build.out.join("dist")); - for entry in entries { - let entry = t!(entry); - if entry.file_name().to_str() == Some("llvm") { - continue + for host in &build.hosts { + let entries = match build.out.join(host).read_dir() { + Ok(iter) => iter, + Err(_) => continue, + }; + + for entry in entries { + let entry = t!(entry); + if entry.file_name().to_str() == Some("llvm") { + continue + } + let path = t!(entry.path().canonicalize()); + rm_rf(&path); } - let path = t!(entry.path().canonicalize()); - rm_rf(&path); } } } diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index f43035fbfe..c8b2ed042c 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -10,12 +10,12 @@ //! Serialized configuration of a build. //! -//! This module implements parsing `config.mk` and `config.toml` configuration -//! files to tweak how the build runs. +//! This module implements parsing `config.toml` configuration files to tweak +//! how the build runs. use std::collections::HashMap; use std::env; -use std::fs::{self, File}; +use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::process; @@ -23,10 +23,11 @@ use std::cmp; use num_cpus; use toml; -use util::{exe, push_exe_path}; +use util::exe; use cache::{INTERNER, Interned}; use flags::Flags; pub use flags::Subcommand; +use toolstate::ToolStates; /// Global configuration for the entire build and/or bootstrap. /// @@ -111,6 +112,7 @@ pub struct Config { pub low_priority: bool, pub channel: String, pub quiet_tests: bool, + pub test_miri: bool, // Fallback musl-root for all targets pub musl_root: Option, pub prefix: Option, @@ -124,14 +126,14 @@ pub struct Config { pub nodejs: Option, pub gdb: Option, pub python: Option, - pub configure_args: Vec, pub openssl_static: bool, - + pub configure_args: Vec, // These are either the stage0 downloaded binaries or the locally installed ones. pub initial_cargo: PathBuf, pub initial_rustc: PathBuf, + pub toolstate: ToolStates, } /// Per-target configuration stored in the global configuration structure. @@ -190,6 +192,8 @@ struct Build { sanitizers: Option, profiler: Option, openssl_static: Option, + configure_args: Option>, + local_rebuild: Option, } /// TOML representation of various global install decisions. @@ -219,6 +223,7 @@ struct Llvm { targets: Option, experimental_targets: Option, link_jobs: Option, + link_shared: Option, } #[derive(Deserialize, Default, Clone)] @@ -265,6 +270,10 @@ struct Rust { debuginfo_tests: Option, codegen_tests: Option, ignore_git: Option, + debug: Option, + dist_src: Option, + quiet_tests: Option, + test_miri: Option, } /// TOML representation of how each build target is configured. @@ -300,6 +309,7 @@ impl Config { config.codegen_tests = true; config.ignore_git = false; config.rust_dist_src = true; + config.test_miri = false; config.on_fail = flags.on_fail; config.stage = flags.stage; @@ -326,6 +336,18 @@ impl Config { } }).unwrap_or_else(|| TomlConfig::default()); + let toolstate_toml_path = config.src.join("src/tools/toolstate.toml"); + let parse_toolstate = || -> Result<_, Box<::std::error::Error>> { + let mut f = File::open(toolstate_toml_path)?; + let mut contents = String::new(); + f.read_to_string(&mut contents)?; + Ok(toml::from_str(&contents)?) + }; + config.toolstate = parse_toolstate().unwrap_or_else(|err| { + println!("failed to parse TOML configuration 'toolstate.toml': {}", err); + process::exit(2); + }); + let build = toml.build.clone().unwrap_or(Build::default()); set(&mut config.build, build.build.clone().map(|x| INTERNER.intern_string(x))); set(&mut config.build, flags.build); @@ -374,6 +396,8 @@ impl Config { set(&mut config.sanitizers, build.sanitizers); set(&mut config.profiler, build.profiler); set(&mut config.openssl_static, build.openssl_static); + set(&mut config.configure_args, build.configure_args); + set(&mut config.local_rebuild, build.local_rebuild); config.verbose = cmp::max(config.verbose, flags.verbose); if let Some(ref install) = toml.install { @@ -385,6 +409,18 @@ impl Config { config.mandir = install.mandir.clone().map(PathBuf::from); } + // Store off these values as options because if they're not provided + // we'll infer default values for them later + let mut llvm_assertions = None; + let mut debuginfo_lines = None; + let mut debuginfo_only_std = None; + let mut debug = None; + let mut debug_jemalloc = None; + let mut debuginfo = None; + let mut debug_assertions = None; + let mut optimize = None; + let mut ignore_git = None; + if let Some(ref llvm) = toml.llvm { match llvm.ccache { Some(StringOrBool::String(ref s)) => { @@ -397,31 +433,36 @@ impl Config { } set(&mut config.ninja, llvm.ninja); set(&mut config.llvm_enabled, llvm.enabled); - set(&mut config.llvm_assertions, llvm.assertions); + llvm_assertions = llvm.assertions; set(&mut config.llvm_optimize, llvm.optimize); set(&mut config.llvm_release_debuginfo, llvm.release_debuginfo); set(&mut config.llvm_version_check, llvm.version_check); set(&mut config.llvm_static_stdcpp, llvm.static_libstdcpp); + set(&mut config.llvm_link_shared, llvm.link_shared); config.llvm_targets = llvm.targets.clone(); config.llvm_experimental_targets = llvm.experimental_targets.clone(); config.llvm_link_jobs = llvm.link_jobs; } if let Some(ref rust) = toml.rust { - set(&mut config.rust_debug_assertions, rust.debug_assertions); - set(&mut config.rust_debuginfo, rust.debuginfo); - set(&mut config.rust_debuginfo_lines, rust.debuginfo_lines); - set(&mut config.rust_debuginfo_only_std, rust.debuginfo_only_std); - set(&mut config.rust_optimize, rust.optimize); + debug = rust.debug; + debug_assertions = rust.debug_assertions; + debuginfo = rust.debuginfo; + debuginfo_lines = rust.debuginfo_lines; + debuginfo_only_std = rust.debuginfo_only_std; + optimize = rust.optimize; + ignore_git = rust.ignore_git; + debug_jemalloc = rust.debug_jemalloc; set(&mut config.rust_optimize_tests, rust.optimize_tests); set(&mut config.rust_debuginfo_tests, rust.debuginfo_tests); set(&mut config.codegen_tests, rust.codegen_tests); set(&mut config.rust_rpath, rust.rpath); - set(&mut config.debug_jemalloc, rust.debug_jemalloc); set(&mut config.use_jemalloc, rust.use_jemalloc); set(&mut config.backtrace, rust.backtrace); set(&mut config.channel, rust.channel.clone()); - set(&mut config.ignore_git, rust.ignore_git); + set(&mut config.rust_dist_src, rust.dist_src); + set(&mut config.quiet_tests, rust.quiet_tests); + set(&mut config.test_miri, rust.test_miri); config.rustc_default_linker = rust.default_linker.clone(); config.rustc_default_ar = rust.default_ar.clone(); config.musl_root = rust.musl_root.clone().map(PathBuf::from); @@ -476,224 +517,29 @@ impl Config { None => stage0_root.join(exe("cargo", &config.build)), }; - // compat with `./configure` while we're still using that - if fs::metadata("config.mk").is_ok() { - config.update_with_config_mk(); - } + // Now that we've reached the end of our configuration, infer the + // default values for all options that we haven't otherwise stored yet. - config - } + let default = config.channel == "nightly"; + config.llvm_assertions = llvm_assertions.unwrap_or(default); - /// "Temporary" routine to parse `config.mk` into this configuration. - /// - /// While we still have `./configure` this implements the ability to decode - /// that configuration into this. This isn't exactly a full-blown makefile - /// parser, but hey it gets the job done! - fn update_with_config_mk(&mut self) { - let mut config = String::new(); - File::open("config.mk").unwrap().read_to_string(&mut config).unwrap(); - for line in config.lines() { - let mut parts = line.splitn(2, ":=").map(|s| s.trim()); - let key = parts.next().unwrap(); - let value = match parts.next() { - Some(n) if n.starts_with('\"') => &n[1..n.len() - 1], - Some(n) => n, - None => continue - }; - - macro_rules! check { - ($(($name:expr, $val:expr),)*) => { - if value == "1" { - $( - if key == concat!("CFG_ENABLE_", $name) { - $val = true; - continue - } - if key == concat!("CFG_DISABLE_", $name) { - $val = false; - continue - } - )* - } - } - } + let default = match &config.channel[..] { + "stable" | "beta" | "nightly" => true, + _ => false, + }; + config.rust_debuginfo_lines = debuginfo_lines.unwrap_or(default); + config.rust_debuginfo_only_std = debuginfo_only_std.unwrap_or(default); - check! { - ("MANAGE_SUBMODULES", self.submodules), - ("COMPILER_DOCS", self.compiler_docs), - ("DOCS", self.docs), - ("LLVM_ASSERTIONS", self.llvm_assertions), - ("LLVM_RELEASE_DEBUGINFO", self.llvm_release_debuginfo), - ("OPTIMIZE_LLVM", self.llvm_optimize), - ("LLVM_VERSION_CHECK", self.llvm_version_check), - ("LLVM_STATIC_STDCPP", self.llvm_static_stdcpp), - ("LLVM_LINK_SHARED", self.llvm_link_shared), - ("OPTIMIZE", self.rust_optimize), - ("DEBUG_ASSERTIONS", self.rust_debug_assertions), - ("DEBUGINFO", self.rust_debuginfo), - ("DEBUGINFO_LINES", self.rust_debuginfo_lines), - ("DEBUGINFO_ONLY_STD", self.rust_debuginfo_only_std), - ("JEMALLOC", self.use_jemalloc), - ("DEBUG_JEMALLOC", self.debug_jemalloc), - ("RPATH", self.rust_rpath), - ("OPTIMIZE_TESTS", self.rust_optimize_tests), - ("DEBUGINFO_TESTS", self.rust_debuginfo_tests), - ("QUIET_TESTS", self.quiet_tests), - ("LOCAL_REBUILD", self.local_rebuild), - ("NINJA", self.ninja), - ("CODEGEN_TESTS", self.codegen_tests), - ("LOCKED_DEPS", self.locked_deps), - ("VENDOR", self.vendor), - ("FULL_BOOTSTRAP", self.full_bootstrap), - ("EXTENDED", self.extended), - ("SANITIZERS", self.sanitizers), - ("PROFILER", self.profiler), - ("DIST_SRC", self.rust_dist_src), - ("CARGO_OPENSSL_STATIC", self.openssl_static), - } + let default = debug == Some(true); + config.debug_jemalloc = debug_jemalloc.unwrap_or(default); + config.rust_debuginfo = debuginfo.unwrap_or(default); + config.rust_debug_assertions = debug_assertions.unwrap_or(default); + config.rust_optimize = optimize.unwrap_or(!default); - match key { - "CFG_BUILD" if value.len() > 0 => self.build = INTERNER.intern_str(value), - "CFG_HOST" if value.len() > 0 => { - self.hosts.extend(value.split(" ").map(|s| INTERNER.intern_str(s))); + let default = config.channel == "dev"; + config.ignore_git = ignore_git.unwrap_or(default); - } - "CFG_TARGET" if value.len() > 0 => { - self.targets.extend(value.split(" ").map(|s| INTERNER.intern_str(s))); - } - "CFG_EXPERIMENTAL_TARGETS" if value.len() > 0 => { - self.llvm_experimental_targets = Some(value.to_string()); - } - "CFG_MUSL_ROOT" if value.len() > 0 => { - self.musl_root = Some(parse_configure_path(value)); - } - "CFG_MUSL_ROOT_X86_64" if value.len() > 0 => { - let target = INTERNER.intern_str("x86_64-unknown-linux-musl"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.musl_root = Some(parse_configure_path(value)); - } - "CFG_MUSL_ROOT_I686" if value.len() > 0 => { - let target = INTERNER.intern_str("i686-unknown-linux-musl"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.musl_root = Some(parse_configure_path(value)); - } - "CFG_MUSL_ROOT_ARM" if value.len() > 0 => { - let target = INTERNER.intern_str("arm-unknown-linux-musleabi"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.musl_root = Some(parse_configure_path(value)); - } - "CFG_MUSL_ROOT_ARMHF" if value.len() > 0 => { - let target = INTERNER.intern_str("arm-unknown-linux-musleabihf"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.musl_root = Some(parse_configure_path(value)); - } - "CFG_MUSL_ROOT_ARMV7" if value.len() > 0 => { - let target = INTERNER.intern_str("armv7-unknown-linux-musleabihf"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.musl_root = Some(parse_configure_path(value)); - } - "CFG_DEFAULT_AR" if value.len() > 0 => { - self.rustc_default_ar = Some(value.to_string()); - } - "CFG_DEFAULT_LINKER" if value.len() > 0 => { - self.rustc_default_linker = Some(value.to_string()); - } - "CFG_GDB" if value.len() > 0 => { - self.gdb = Some(parse_configure_path(value)); - } - "CFG_RELEASE_CHANNEL" => { - self.channel = value.to_string(); - } - "CFG_PREFIX" => { - self.prefix = Some(PathBuf::from(value)); - } - "CFG_SYSCONFDIR" => { - self.sysconfdir = Some(PathBuf::from(value)); - } - "CFG_DOCDIR" => { - self.docdir = Some(PathBuf::from(value)); - } - "CFG_BINDIR" => { - self.bindir = Some(PathBuf::from(value)); - } - "CFG_LIBDIR" => { - self.libdir = Some(PathBuf::from(value)); - } - "CFG_LIBDIR_RELATIVE" => { - self.libdir_relative = Some(PathBuf::from(value)); - } - "CFG_MANDIR" => { - self.mandir = Some(PathBuf::from(value)); - } - "CFG_LLVM_ROOT" if value.len() > 0 => { - let target = self.target_config.entry(self.build.clone()) - .or_insert(Target::default()); - let root = parse_configure_path(value); - target.llvm_config = Some(push_exe_path(root, &["bin", "llvm-config"])); - } - "CFG_JEMALLOC_ROOT" if value.len() > 0 => { - let target = self.target_config.entry(self.build.clone()) - .or_insert(Target::default()); - target.jemalloc = Some(parse_configure_path(value).join("libjemalloc_pic.a")); - } - "CFG_ARM_LINUX_ANDROIDEABI_NDK" if value.len() > 0 => { - let target = INTERNER.intern_str("arm-linux-androideabi"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.ndk = Some(parse_configure_path(value)); - } - "CFG_ARMV7_LINUX_ANDROIDEABI_NDK" if value.len() > 0 => { - let target = INTERNER.intern_str("armv7-linux-androideabi"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.ndk = Some(parse_configure_path(value)); - } - "CFG_I686_LINUX_ANDROID_NDK" if value.len() > 0 => { - let target = INTERNER.intern_str("i686-linux-android"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.ndk = Some(parse_configure_path(value)); - } - "CFG_AARCH64_LINUX_ANDROID_NDK" if value.len() > 0 => { - let target = INTERNER.intern_str("aarch64-linux-android"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.ndk = Some(parse_configure_path(value)); - } - "CFG_X86_64_LINUX_ANDROID_NDK" if value.len() > 0 => { - let target = INTERNER.intern_str("x86_64-linux-android"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.ndk = Some(parse_configure_path(value)); - } - "CFG_LOCAL_RUST_ROOT" if value.len() > 0 => { - let path = parse_configure_path(value); - self.initial_rustc = push_exe_path(path.clone(), &["bin", "rustc"]); - self.initial_cargo = push_exe_path(path, &["bin", "cargo"]); - } - "CFG_PYTHON" if value.len() > 0 => { - let path = parse_configure_path(value); - self.python = Some(path); - } - "CFG_ENABLE_CCACHE" if value == "1" => { - self.ccache = Some(exe("ccache", &self.build)); - } - "CFG_ENABLE_SCCACHE" if value == "1" => { - self.ccache = Some(exe("sccache", &self.build)); - } - "CFG_CONFIGURE_ARGS" if value.len() > 0 => { - self.configure_args = value.split_whitespace() - .map(|s| s.to_string()) - .collect(); - } - "CFG_QEMU_ARMHF_ROOTFS" if value.len() > 0 => { - let target = INTERNER.intern_str("arm-unknown-linux-gnueabihf"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.qemu_rootfs = Some(parse_configure_path(value)); - } - "CFG_QEMU_AARCH64_ROOTFS" if value.len() > 0 => { - let target = INTERNER.intern_str("aarch64-unknown-linux-gnu"); - let target = self.target_config.entry(target).or_insert(Target::default()); - target.qemu_rootfs = Some(parse_configure_path(value)); - } - _ => {} - } - } + config } pub fn verbose(&self) -> bool { @@ -705,30 +551,6 @@ impl Config { } } -#[cfg(not(windows))] -fn parse_configure_path(path: &str) -> PathBuf { - path.into() -} - -#[cfg(windows)] -fn parse_configure_path(path: &str) -> PathBuf { - // on windows, configure produces unix style paths e.g. /c/some/path but we - // only want real windows paths - - use std::process::Command; - use build_helper; - - // '/' is invalid in windows paths, so we can detect unix paths by the presence of it - if !path.contains('/') { - return path.into(); - } - - let win_path = build_helper::output(Command::new("cygpath").arg("-w").arg(path)); - let win_path = win_path.trim(); - - win_path.into() -} - fn set(field: &mut T, val: Option) { if let Some(v) = val { *field = v; diff --git a/src/bootstrap/configure.py b/src/bootstrap/configure.py new file mode 100755 index 0000000000..67337bf442 --- /dev/null +++ b/src/bootstrap/configure.py @@ -0,0 +1,417 @@ +#!/usr/bin/env python +# Copyright 2017 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +# ignore-tidy-linelength + +import sys +import os +rust_dir = os.path.dirname(os.path.abspath(__file__)) +rust_dir = os.path.dirname(rust_dir) +rust_dir = os.path.dirname(rust_dir) +sys.path.append(os.path.join(rust_dir, "src", "bootstrap")) +import bootstrap + +class Option: + def __init__(self, name, rustbuild, desc, value): + self.name = name + self.rustbuild = rustbuild + self.desc = desc + self.value = value + +options = [] + +def o(*args): + options.append(Option(*args, value=False)) + +def v(*args): + options.append(Option(*args, value=True)) + +o("debug", "rust.debug", "debug mode; disables optimization unless `--enable-optimize` given") +o("docs", "build.docs", "build standard library documentation") +o("compiler-docs", "build.compiler-docs", "build compiler documentation") +o("optimize-tests", "rust.optimize-tests", "build tests with optimizations") +o("test-miri", "rust.test-miri", "run miri's test suite") +o("debuginfo-tests", "rust.debuginfo-tests", "build tests with debugger metadata") +o("quiet-tests", "rust.quiet-tests", "enable quieter output when running tests") +o("ccache", "llvm.ccache", "invoke gcc/clang via ccache to reuse object files between builds") +o("sccache", None, "invoke gcc/clang via sccache to reuse object files between builds") +o("local-rust", None, "use an installed rustc rather than downloading a snapshot") +v("local-rust-root", None, "set prefix for local rust binary") +o("local-rebuild", "build.local-rebuild", "assume local-rust matches the current version, for rebuilds; implies local-rust, and is implied if local-rust already matches the current version") +o("llvm-static-stdcpp", "llvm.static-libstdcpp", "statically link to libstdc++ for LLVM") +o("llvm-link-shared", "llvm.link-shared", "prefer shared linking to LLVM (llvm-config --link-shared)") +o("rpath", "rust.rpath", "build rpaths into rustc itself") +o("llvm-version-check", "llvm.version-check", "check if the LLVM version is supported, build anyway") +o("codegen-tests", "rust.codegen-tests", "run the src/test/codegen tests") +o("option-checking", None, "complain about unrecognized options in this configure script") +o("ninja", "llvm.ninja", "build LLVM using the Ninja generator (for MSVC, requires building in the correct environment)") +o("locked-deps", "build.locked-deps", "force Cargo.lock to be up to date") +o("vendor", "build.vendor", "enable usage of vendored Rust crates") +o("sanitizers", "build.sanitizers", "build the sanitizer runtimes (asan, lsan, msan, tsan)") +o("dist-src", "rust.dist-src", "when building tarballs enables building a source tarball") +o("cargo-openssl-static", "build.openssl-static", "static openssl in cargo") +o("profiler", "build.profiler", "build the profiler runtime") + +# Optimization and debugging options. These may be overridden by the release +# channel, etc. +o("optimize", "rust.optimize", "build optimized rust code") +o("optimize-llvm", "llvm.optimize", "build optimized LLVM") +o("llvm-assertions", "llvm.assertions", "build LLVM with assertions") +o("debug-assertions", "rust.debug-assertions", "build with debugging assertions") +o("llvm-release-debuginfo", "llvm.release-debuginfo", "build LLVM with debugger metadata") +o("debuginfo", "rust.debuginfo", "build with debugger metadata") +o("debuginfo-lines", "rust.debuginfo-lines", "build with line number debugger metadata") +o("debuginfo-only-std", "rust.debuginfo-only-std", "build only libstd with debugging information") +o("debug-jemalloc", "rust.debug-jemalloc", "build jemalloc with --enable-debug --enable-fill") + +v("prefix", "install.prefix", "set installation prefix") +v("localstatedir", "install.localstatedir", "local state directory") +v("datadir", "install.datadir", "install data") +v("sysconfdir", "install.sysconfdir", "install system configuration files") +v("infodir", "install.infodir", "install additional info") +v("libdir", "install.libdir", "install libraries") +v("mandir", "install.mandir", "install man pages in PATH") +v("docdir", "install.docdir", "install documentation in PATH") +v("bindir", "install.bindir", "install binaries") + +v("llvm-root", None, "set LLVM root") +v("python", "build.python", "set path to python") +v("jemalloc-root", None, "set directory where libjemalloc_pic.a is located") +v("android-cross-path", "target.arm-linux-androideabi.android-ndk", + "Android NDK standalone path (deprecated)") +v("i686-linux-android-ndk", "target.i686-linux-android.android-ndk", + "i686-linux-android NDK standalone path") +v("arm-linux-androideabi-ndk", "target.arm-linux-androideabi.android-ndk", + "arm-linux-androideabi NDK standalone path") +v("armv7-linux-androideabi-ndk", "target.armv7-linux-androideabi.android-ndk", + "armv7-linux-androideabi NDK standalone path") +v("aarch64-linux-android-ndk", "target.aarch64-linux-android.android-ndk", + "aarch64-linux-android NDK standalone path") +v("x86_64-linux-android-ndk", "target.x86_64-linux-android.android-ndk", + "x86_64-linux-android NDK standalone path") +v("musl-root", "target.x86_64-unknown-linux-musl.musl-root", + "MUSL root installation directory (deprecated)") +v("musl-root-x86_64", "target.x86_64-unknown-linux-musl.musl-root", + "x86_64-unknown-linux-musl install directory") +v("musl-root-i686", "target.i686-unknown-linux-musl.musl-root", + "i686-unknown-linux-musl install directory") +v("musl-root-arm", "target.arm-unknown-linux-musleabi.musl-root", + "arm-unknown-linux-musleabi install directory") +v("musl-root-armhf", "target.arm-unknown-linux-musleabihf.musl-root", + "arm-unknown-linux-musleabihf install directory") +v("musl-root-armv7", "target.armv7-unknown-linux-musleabihf.musl-root", + "armv7-unknown-linux-musleabihf install directory") +v("musl-root-aarch64", "target.aarch64-unknown-linux-musl.musl-root", + "aarch64-unknown-linux-musl install directory") +v("qemu-armhf-rootfs", "target.arm-unknown-linux-gnueabihf.qemu-rootfs", + "rootfs in qemu testing, you probably don't want to use this") +v("qemu-aarch64-rootfs", "target.aarch64-unknown-linux-gnu.qemu-rootfs", + "rootfs in qemu testing, you probably don't want to use this") +v("experimental-targets", "llvm.experimental-targets", + "experimental LLVM targets to build") +v("release-channel", "rust.channel", "the name of the release channel to build") + +# Used on systems where "cc" and "ar" are unavailable +v("default-linker", "rust.default-linker", "the default linker") +v("default-ar", "rust.default-ar", "the default ar") + +# Many of these are saved below during the "writing configuration" step +# (others are conditionally saved). +o("manage-submodules", "build.submodules", "let the build manage the git submodules") +o("jemalloc", "rust.use-jemalloc", "build liballoc with jemalloc") +o("full-bootstrap", "build.full-bootstrap", "build three compilers instead of two") +o("extended", "build.extended", "build an extended rust tool set") + +v("build", "build.build", "GNUs ./configure syntax LLVM build triple") +v("host", None, "GNUs ./configure syntax LLVM host triples") +v("target", None, "GNUs ./configure syntax LLVM target triples") + +v("set", None, "set arbitrary key/value pairs in TOML configuration") + +def p(msg): + print("configure: " + msg) + +def err(msg): + print("configure: error: " + msg) + sys.exit(1) + +if '--help' in sys.argv or '-h' in sys.argv: + print('Usage: ./configure [options]') + print('') + print('Options') + for option in options: + if 'android' in option.name: + # no one needs to know about these obscure options + continue + if option.value: + print('\t{:30} {}'.format('--{}=VAL'.format(option.name), option.desc)) + else: + print('\t{:30} {}'.format('--enable-{}'.format(option.name), option.desc)) + print('') + print('This configure script is a thin configuration shim over the true') + print('configuration system, `config.toml`. You can explore the comments') + print('in `config.toml.example` next to this configure script to see') + print('more information about what each option is. Additionally you can') + print('pass `--set` as an argument to set arbitrary key/value pairs') + print('in the TOML configuration if desired') + print('') + print('Also note that all options which take `--enable` can similarly') + print('be passed with `--disable-foo` to forcibly disable the option') + sys.exit(0) + +# Parse all command line arguments into one of these three lists, handling +# boolean and value-based options separately +unknown_args = [] +need_value_args = [] +known_args = {} + +p("processing command line") +i = 1 +while i < len(sys.argv): + arg = sys.argv[i] + i += 1 + if not arg.startswith('--'): + unknown_args.append(arg) + continue + + found = False + for option in options: + value = None + if option.value: + keyval = arg[2:].split('=', 1) + key = keyval[0] + if option.name != key: + continue + + if len(keyval) > 1: + value = keyval[1] + elif i < len(sys.argv): + value = sys.argv[i] + i += 1 + else: + need_value_args.append(arg) + continue + else: + if arg[2:] == 'enable-' + option.name: + value = True + elif arg[2:] == 'disable-' + option.name: + value = False + else: + continue + + found = True + if not option.name in known_args: + known_args[option.name] = [] + known_args[option.name].append((option, value)) + break + + if not found: + unknown_args.append(arg) +p("") + +if 'option-checking' not in known_args or known_args['option-checking'][1]: + if len(unknown_args) > 0: + err("Option '" + unknown_args[0] + "' is not recognized") + if len(need_value_args) > 0: + err("Option '{0}' needs a value ({0}=val)".format(need_value_args[0])) + +# Parse all known arguments into a configuration structure that reflects the +# TOML we're going to write out +config = {} + +def build(): + if 'build' in known_args: + return known_args['build'][0][1] + return bootstrap.default_build_triple() + +def set(key, value): + s = "{:20} := {}".format(key, value) + if len(s) < 70: + p(s) + else: + p(s[:70] + " ...") + + arr = config + parts = key.split('.') + for i, part in enumerate(parts): + if i == len(parts) - 1: + arr[part] = value + else: + if not part in arr: + arr[part] = {} + arr = arr[part] + +for key in known_args: + # The `set` option is special and can be passed a bunch of times + if key == 'set': + for option, value in known_args[key]: + keyval = value.split('=', 1) + if len(keyval) == 1 or keyval[1] == "true": + value = True + elif keyval[1] == "false": + value = False + else: + value = keyval[1] + set(keyval[0], value) + continue + + # Ensure each option is only passed once + arr = known_args[key] + if len(arr) > 1: + err("Option '{}' provided more than once".format(key)) + option, value = arr[0] + + # If we have a clear avenue to set our value in rustbuild, do so + if option.rustbuild is not None: + set(option.rustbuild, value) + continue + + # Otherwise we're a "special" option and need some extra handling, so do + # that here. + if option.name == 'sccache': + set('llvm.ccache', 'sccache') + elif option.name == 'local-rust': + for path in os.environ['PATH'].split(os.pathsep): + if os.path.exists(path + '/rustc'): + set('build.rustc', path + '/rustc') + break + for path in os.environ['PATH'].split(os.pathsep): + if os.path.exists(path + '/cargo'): + set('build.cargo', path + '/cargo') + break + elif option.name == 'local-rust-root': + set('build.rustc', value + '/bin/rustc') + set('build.cargo', value + '/bin/cargo') + elif option.name == 'llvm-root': + set('target.{}.llvm-config'.format(build()), value + '/bin/llvm-config') + elif option.name == 'jemalloc-root': + set('target.{}.jemalloc'.format(build()), value + '/libjemalloc_pic.a') + elif option.name == 'host': + set('build.host', value.split(',')) + elif option.name == 'target': + set('build.target', value.split(',')) + elif option.name == 'option-checking': + # this was handled above + pass + else: + raise RuntimeError("unhandled option {}".format(option.name)) + +set('build.configure-args', sys.argv[1:]) + +# "Parse" the `config.toml.example` file into the various sections, and we'll +# use this as a template of a `config.toml` to write out which preserves +# all the various comments and whatnot. +# +# Note that the `target` section is handled separately as we'll duplicate it +# per configure dtarget, so there's a bit of special handling for that here. +sections = {} +cur_section = None +sections[None] = [] +section_order = [None] +targets = {} + +for line in open(rust_dir + '/config.toml.example').read().split("\n"): + if line.startswith('['): + cur_section = line[1:-1] + if cur_section.startswith('target'): + cur_section = 'target' + elif '.' in cur_section: + raise RuntimeError("don't know how to deal with section: {}".format(cur_section)) + sections[cur_section] = [line] + section_order.append(cur_section) + else: + sections[cur_section].append(line) + +# Fill out the `targets` array by giving all configured targets a copy of the +# `target` section we just loaded from the example config +configured_targets = [build()] +if 'build' in config: + if 'host' in config['build']: + configured_targets += config['build']['host'] + if 'target' in config['build']: + configured_targets += config['build']['target'] +if 'target' in config: + for target in config['target']: + configured_targets.append(target) +for target in configured_targets: + targets[target] = sections['target'][:] + targets[target][0] = targets[target][0].replace("x86_64-unknown-linux-gnu", target) + +# Here we walk through the constructed configuration we have from the parsed +# command line arguemnts. We then apply each piece of configuration by +# basically just doing a `sed` to change the various configuration line to what +# we've got configure. +def to_toml(value): + if isinstance(value, bool): + if value: + return "true" + else: + return "false" + elif isinstance(value, list): + return '[' + ', '.join(map(to_toml, value)) + ']' + elif isinstance(value, str): + return "'" + value + "'" + else: + raise 'no toml' + +def configure_section(lines, config): + for key in config: + value = config[key] + found = False + for i, line in enumerate(lines): + if not line.startswith('#' + key + ' = '): + continue + found = True + lines[i] = "{} = {}".format(key, to_toml(value)) + break + if not found: + raise RuntimeError("failed to find config line for {}".format(key)) + +for section_key in config: + section_config = config[section_key] + if not section_key in sections: + raise RuntimeError("config key {} not in sections".format(key)) + + if section_key == 'target': + for target in section_config: + configure_section(targets[target], section_config[target]) + else: + configure_section(sections[section_key], section_config) + +# Now that we've built up our `config.toml`, write it all out in the same +# order that we read it in. +p("") +p("writing `config.toml` in current directory") +with open('config.toml', 'w') as f: + for section in section_order: + if section == 'target': + for target in targets: + for line in targets[target]: + f.write(line + "\n") + else: + for line in sections[section]: + f.write(line + "\n") + +with open('Makefile', 'w') as f: + contents = os.path.join(rust_dir, 'src', 'bootstrap', 'mk', 'Makefile.in') + contents = open(contents).read() + contents = contents.replace("$(CFG_SRC_DIR)", rust_dir + '/') + contents = contents.replace("$(CFG_PYTHON)", sys.executable) + f.write(contents) + +# Finally, clean up with a bit of a help message +relpath = os.path.dirname(__file__) +if relpath == '': + relpath = '.' + +p("") +p("run `python {}/x.py --help`".format(relpath)) +p("") diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index 65a59d78d7..3d4aa0413d 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -20,7 +20,7 @@ use std::env; use std::fs::{self, File}; -use std::io::{Read, Write}; +use std::io::{self, Read, Write}; use std::path::{PathBuf, Path}; use std::process::{Command, Stdio}; @@ -365,6 +365,9 @@ impl Step for Rustc { // tiny morsel of metadata is used by rust-packaging let version = build.rust_version(); t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes())); + if let Some(sha) = build.rust_sha() { + t!(t!(File::create(overlay.join("git-commit-hash"))).write_all(sha.as_bytes())); + } // On MinGW we've got a few runtime DLL dependencies that we need to // include. The first argument to this script is where to put these DLLs @@ -429,7 +432,7 @@ impl Step for Rustc { // Man pages t!(fs::create_dir_all(image.join("share/man/man1"))); - cp_r(&build.src.join("man"), &image.join("share/man/man1")); + cp_r(&build.src.join("src/doc/man"), &image.join("share/man/man1")); // Debugger scripts builder.ensure(DebuggerScripts { @@ -724,6 +727,9 @@ impl Step for Src { let dst_src = dst.join("rust"); t!(fs::create_dir_all(&dst_src)); + let src_files = [ + "src/Cargo.lock", + ]; // This is the reduced set of paths which will become the rust-src component // (essentially libstd and all of its path dependencies) let std_src_dirs = [ @@ -754,11 +760,14 @@ impl Step for Src { "src/libprofiler_builtins", ]; let std_src_dirs_exclude = [ - "src/compiler-rt/test", + "src/libcompiler_builtins/compiler-rt/test", "src/jemalloc/test/unit", ]; copy_src_dirs(build, &std_src_dirs[..], &std_src_dirs_exclude[..], &dst_src); + for file in src_files.iter() { + copy(&build.src.join(file), &dst_src.join(file)); + } // Create source tarball in rust-installer format let mut cmd = rust_installer(builder); @@ -822,9 +831,9 @@ impl Step for PlainSourceTarball { "RELEASES.md", "configure", "x.py", + "config.toml.example", ]; let src_dirs = [ - "man", "src", ]; @@ -837,6 +846,9 @@ impl Step for PlainSourceTarball { // Create the version file write_file(&plain_dst_src.join("version"), build.rust_version().as_bytes()); + if let Some(sha) = build.rust_sha() { + write_file(&plain_dst_src.join("git-commit-hash"), sha.as_bytes()); + } // If we're building from git sources, we need to vendor a complete distribution. if build.rust_info.is_git() { @@ -887,7 +899,12 @@ impl Step for PlainSourceTarball { fn install(src: &Path, dstdir: &Path, perms: u32) { let dst = dstdir.join(src.file_name().unwrap()); t!(fs::create_dir_all(dstdir)); - t!(fs::copy(src, &dst)); + drop(fs::remove_file(&dst)); + { + let mut s = t!(fs::File::open(&src)); + let mut d = t!(fs::File::create(&dst)); + io::copy(&mut s, &mut d).expect("failed to copy"); + } chmod(&dst, perms); } @@ -1081,19 +1098,39 @@ impl Step for Rls { .arg("--output-dir").arg(&distdir(build)) .arg("--non-installed-overlay").arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) - .arg("--legacy-manifest-dirs=rustlib,cargo"); - - if build.config.channel == "nightly" { - cmd.arg("--component-name=rls"); - } else { - cmd.arg("--component-name=rls-preview"); - } + .arg("--legacy-manifest-dirs=rustlib,cargo") + .arg("--component-name=rls-preview"); build.run(&mut cmd); distdir(build).join(format!("{}-{}.tar.gz", name, target)) } } + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct DontDistWithMiriEnabled; + +impl Step for DontDistWithMiriEnabled { + type Output = PathBuf; + const DEFAULT: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + let build_miri = run.builder.build.config.test_miri; + run.default_condition(build_miri) + } + + fn make_run(run: RunConfig) { + run.builder.ensure(DontDistWithMiriEnabled); + } + + fn run(self, _: &Builder) -> PathBuf { + panic!("Do not distribute with miri enabled.\n\ + The distributed libraries would include all MIR (increasing binary size). + The distributed MIR would include validation statements."); + } +} + + #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct Extended { stage: u32, @@ -1156,6 +1193,9 @@ impl Step for Extended { install(&build.src.join("LICENSE-MIT"), &overlay, 0o644); let version = build.rust_version(); t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes())); + if let Some(sha) = build.rust_sha() { + t!(t!(File::create(overlay.join("git-commit-hash"))).write_all(sha.as_bytes())); + } install(&etc.join("README.md"), &overlay, 0o644); // When rust-std package split from rustc, we needed to ensure that during @@ -1163,7 +1203,10 @@ impl Step for Extended { // the std files during uninstall. To do this ensure that rustc comes // before rust-std in the list below. let mut tarballs = vec![rustc_installer, cargo_installer, rls_installer, - analysis_installer, docs_installer, std_installer]; + analysis_installer, std_installer]; + if build.config.docs { + tarballs.push(docs_installer); + } if target.contains("pc-windows-gnu") { tarballs.push(mingw_installer.unwrap()); } @@ -1285,12 +1328,8 @@ impl Step for Extended { cp_r(&work.join(&format!("{}-{}", pkgname(build, "rust-std"), target)) .join(format!("rust-std-{}", target)), &exe.join("rust-std")); - let rls_path = if build.config.channel == "nightly" { - work.join(&format!("{}-{}", pkgname(build, "rls"), target)).join("rls") - } else { - work.join(&format!("{}-{}", pkgname(build, "rls"), target)).join("rls-preview") - }; - cp_r(&rls_path, &exe.join("rls")); + cp_r(&work.join(&format!("{}-{}", pkgname(build, "rls"), target)).join("rls-preview"), + &exe.join("rls")); cp_r(&work.join(&format!("{}-{}", pkgname(build, "rust-analysis"), target)) .join(format!("rust-analysis-{}", target)), &exe.join("rust-analysis")); diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs index 86f5346bea..b9a52a6679 100644 --- a/src/bootstrap/doc.rs +++ b/src/bootstrap/doc.rs @@ -669,11 +669,6 @@ impl Step for ErrorIndex { let build = builder.build; let target = self.target; - builder.ensure(compile::Rustc { - compiler: builder.compiler(0, build.build), - target, - }); - println!("Documenting error index ({})", target); let out = build.doc_out(target); t!(fs::create_dir_all(&out)); diff --git a/src/bootstrap/flags.rs b/src/bootstrap/flags.rs index a84d43d3de..df378188b4 100644 --- a/src/bootstrap/flags.rs +++ b/src/bootstrap/flags.rs @@ -60,7 +60,9 @@ pub enum Subcommand { paths: Vec, test_args: Vec, }, - Clean, + Clean { + all: bool, + }, Dist { paths: Vec, }, @@ -136,7 +138,7 @@ To learn more about a subcommand, run `./x.py -h`"); None => { // No subcommand -- show the general usage and subcommand help println!("{}\n", subcommand_help); - process::exit(0); + process::exit(1); } }; @@ -147,6 +149,7 @@ To learn more about a subcommand, run `./x.py -h`"); opts.optmulti("", "test-args", "extra arguments", "ARGS"); }, "bench" => { opts.optmulti("", "test-args", "extra arguments", "ARGS"); }, + "clean" => { opts.optflag("", "all", "clean all build artifacts"); }, _ => { }, }; @@ -250,7 +253,7 @@ Arguments: } }); - // All subcommands can have an optional "Available paths" section + // All subcommands except `clean` can have an optional "Available paths" section if matches.opt_present("verbose") { let config = Config::parse(&["build".to_string()]); let mut build = Build::new(config); @@ -258,9 +261,10 @@ Arguments: let maybe_rules_help = Builder::get_help(&build, subcommand.as_str()); extra_help.push_str(maybe_rules_help.unwrap_or_default().as_str()); - } else { - extra_help.push_str(format!("Run `./x.py {} -h -v` to see a list of available paths.", - subcommand).as_str()); + } else if subcommand.as_str() != "clean" { + extra_help.push_str(format!( + "Run `./x.py {} -h -v` to see a list of available paths.", + subcommand).as_str()); } // User passed in -h/--help? @@ -290,10 +294,13 @@ Arguments: } "clean" => { if paths.len() > 0 { - println!("\nclean takes no arguments\n"); + println!("\nclean does not take a path argument\n"); usage(1, &opts, &subcommand_help, &extra_help); } - Subcommand::Clean + + Subcommand::Clean { + all: matches.opt_present("all"), + } } "dist" => { Subcommand::Dist { diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs index 84a9e56b64..2d721f4557 100644 --- a/src/bootstrap/lib.rs +++ b/src/bootstrap/lib.rs @@ -126,7 +126,7 @@ extern crate lazy_static; extern crate serde_json; extern crate cmake; extern crate filetime; -extern crate gcc; +extern crate cc; extern crate getopts; extern crate num_cpus; extern crate toml; @@ -134,20 +134,21 @@ extern crate toml; #[cfg(unix)] extern crate libc; -use std::cell::Cell; +use std::cell::RefCell; use std::collections::{HashSet, HashMap}; use std::env; use std::fs::{self, File}; use std::io::Read; use std::path::{PathBuf, Path}; -use std::process::Command; +use std::process::{self, Command}; use std::slice; -use build_helper::{run_silent, run_suppressed, try_run_silent, try_run_suppressed, output, mtime}; +use build_helper::{run_silent, run_suppressed, try_run_silent, try_run_suppressed, output, mtime, + BuildExpectation}; use util::{exe, libdir, OutputFolder, CiEnv}; -mod cc; +mod cc_detect; mod channel; mod check; mod clean; @@ -164,6 +165,7 @@ pub mod util; mod builder; mod cache; mod tool; +mod toolstate; #[cfg(windows)] mod job; @@ -239,13 +241,13 @@ pub struct Build { // Runtime state filled in later on // target -> (cc, ar) - cc: HashMap, (gcc::Tool, Option)>, + cc: HashMap, (cc::Tool, Option)>, // host -> (cc, ar) - cxx: HashMap, gcc::Tool>, + cxx: HashMap, cc::Tool>, crates: HashMap, Crate>, is_sudo: bool, ci_env: CiEnv, - delayed_failures: Cell, + delayed_failures: RefCell>, } #[derive(Debug)] @@ -327,7 +329,7 @@ impl Build { lldb_python_dir: None, is_sudo, ci_env: CiEnv::current(), - delayed_failures: Cell::new(0), + delayed_failures: RefCell::new(Vec::new()), } } @@ -343,12 +345,12 @@ impl Build { job::setup(self); } - if let Subcommand::Clean = self.config.cmd { - return clean::clean(self); + if let Subcommand::Clean { all } = self.config.cmd { + return clean::clean(self, all); } self.verbose("finding compilers"); - cc::find(self); + cc_detect::find(self); self.verbose("running sanity check"); sanity::check(self); // If local-rust is the same major.minor as the current version, then force a local-rebuild @@ -366,6 +368,16 @@ impl Build { metadata::build(self); builder::Builder::run(&self); + + // Check for postponed failures from `test --no-fail-fast`. + let failures = self.delayed_failures.borrow(); + if failures.len() > 0 { + println!("\n{} command(s) did not execute successfully:\n", failures.len()); + for failure in failures.iter() { + println!(" - {}\n", failure); + } + process::exit(1); + } } /// Clear out `dir` if `input` is newer. @@ -542,24 +554,31 @@ impl Build { .join(libdir(&self.config.build)) } + /// Runs a command, printing out nice contextual information if its build + /// status is not the expected one + fn run_expecting(&self, cmd: &mut Command, expect: BuildExpectation) { + self.verbose(&format!("running: {:?}", cmd)); + run_silent(cmd, expect) + } + /// Runs a command, printing out nice contextual information if it fails. fn run(&self, cmd: &mut Command) { - self.verbose(&format!("running: {:?}", cmd)); - run_silent(cmd) + self.run_expecting(cmd, BuildExpectation::None) } /// Runs a command, printing out nice contextual information if it fails. fn run_quiet(&self, cmd: &mut Command) { self.verbose(&format!("running: {:?}", cmd)); - run_suppressed(cmd) + run_suppressed(cmd, BuildExpectation::None) } - /// Runs a command, printing out nice contextual information if it fails. - /// Exits if the command failed to execute at all, otherwise returns its - /// `status.success()`. - fn try_run(&self, cmd: &mut Command) -> bool { + /// Runs a command, printing out nice contextual information if its build + /// status is not the expected one. + /// Exits if the command failed to execute at all, otherwise returns whether + /// the expectation was met + fn try_run(&self, cmd: &mut Command, expect: BuildExpectation) -> bool { self.verbose(&format!("running: {:?}", cmd)); - try_run_silent(cmd) + try_run_silent(cmd, expect) } /// Runs a command, printing out nice contextual information if it fails. @@ -567,7 +586,7 @@ impl Build { /// `status.success()`. fn try_run_quiet(&self, cmd: &mut Command) -> bool { self.verbose(&format!("running: {:?}", cmd)); - try_run_suppressed(cmd) + try_run_suppressed(cmd, BuildExpectation::None) } pub fn is_verbose(&self) -> bool { @@ -600,7 +619,7 @@ impl Build { /// specified. fn cflags(&self, target: Interned) -> Vec { // Filter out -O and /O (the optimization flags) that we picked up from - // gcc-rs because the build scripts will determine that for themselves. + // cc-rs because the build scripts will determine that for themselves. let mut base = self.cc[&target].0.args().iter() .map(|s| s.to_string_lossy().into_owned()) .filter(|s| !s.starts_with("-O") && !s.starts_with("/O")) @@ -717,7 +736,7 @@ impl Build { fn force_use_stage1(&self, compiler: Compiler, target: Interned) -> bool { !self.config.full_bootstrap && compiler.stage >= 2 && - self.hosts.iter().any(|h| *h == target) + (self.hosts.iter().any(|h| *h == target) || target == self.build) } /// Returns the directory that OpenSSL artifacts are compiled into if @@ -797,6 +816,11 @@ impl Build { self.rust_info.version(self, channel::CFG_RELEASE_NUM) } + /// Return the full commit hash + fn rust_sha(&self) -> Option<&str> { + self.rust_info.sha() + } + /// Returns the `a.b.c` version that the given package is at. fn release_num(&self, package: &str) -> String { let mut toml = String::new(); diff --git a/src/bootstrap/mk/Makefile.in b/src/bootstrap/mk/Makefile.in index 9410927824..004f0c3102 100644 --- a/src/bootstrap/mk/Makefile.in +++ b/src/bootstrap/mk/Makefile.in @@ -8,8 +8,6 @@ # option. This file may not be copied, modified, or distributed # except according to those terms. -include config.mk - ifdef VERBOSE Q := BOOTSTRAP_ARGS := -v @@ -57,6 +55,8 @@ check-aux: src/tools/cargotest \ src/tools/cargo \ src/tools/rls \ + src/tools/rustfmt \ + src/tools/miri \ src/test/pretty \ src/test/run-pass/pretty \ src/test/run-fail/pretty \ diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index 8429b64513..c4e8063031 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -27,7 +27,7 @@ use std::process::Command; use build_helper::output; use cmake; -use gcc; +use cc; use Build; use util; @@ -289,7 +289,7 @@ impl Step for TestHelpers { let _folder = build.fold_output(|| "build_test_helpers"); println!("Building test helpers"); t!(fs::create_dir_all(&dst)); - let mut cfg = gcc::Config::new(); + let mut cfg = cc::Build::new(); // We may have found various cross-compilers a little differently due to our // extra configuration, so inform gcc of these compilers. Note, though, that @@ -306,6 +306,7 @@ impl Step for TestHelpers { .target(&target) .host(&build.build) .opt_level(0) + .warnings(false) .debug(false) .file(build.src.join("src/rt/rust_test_helpers.c")) .compile("librust_test_helpers.a"); @@ -366,7 +367,7 @@ impl Step for Openssl { if !ok { panic!("failed to download openssl source") } - let mut shasum = if target.contains("apple") { + let mut shasum = if target.contains("apple") || build.build.contains("netbsd") { let mut cmd = Command::new("shasum"); cmd.arg("-a").arg("256"); cmd @@ -386,9 +387,10 @@ impl Step for Openssl { let dst = build.openssl_install_dir(target).unwrap(); drop(fs::remove_dir_all(&obj)); drop(fs::remove_dir_all(&dst)); - build.run(Command::new("tar").arg("xf").arg(&tarball).current_dir(&out)); + build.run(Command::new("tar").arg("zxf").arg(&tarball).current_dir(&out)); - let mut configure = Command::new(obj.join("Configure")); + let mut configure = Command::new("perl"); + configure.arg(obj.join("Configure")); configure.arg(format!("--prefix={}", dst.display())); configure.arg("no-dso"); configure.arg("no-ssl2"); @@ -397,6 +399,7 @@ impl Step for Openssl { let os = match &*target { "aarch64-linux-android" => "linux-aarch64", "aarch64-unknown-linux-gnu" => "linux-aarch64", + "aarch64-unknown-linux-musl" => "linux-aarch64", "arm-linux-androideabi" => "android", "arm-unknown-linux-gnueabi" => "linux-armv4", "arm-unknown-linux-gnueabihf" => "linux-armv4", @@ -407,6 +410,7 @@ impl Step for Openssl { "i686-unknown-freebsd" => "BSD-x86-elf", "i686-unknown-linux-gnu" => "linux-elf", "i686-unknown-linux-musl" => "linux-elf", + "i686-unknown-netbsd" => "BSD-x86-elf", "mips-unknown-linux-gnu" => "linux-mips32", "mips64-unknown-linux-gnuabi64" => "linux64-mips64", "mips64el-unknown-linux-gnuabi64" => "linux64-mips64", @@ -415,6 +419,7 @@ impl Step for Openssl { "powerpc64-unknown-linux-gnu" => "linux-ppc64", "powerpc64le-unknown-linux-gnu" => "linux-ppc64le", "s390x-unknown-linux-gnu" => "linux64-s390x", + "sparc64-unknown-netbsd" => "BSD-sparc64", "x86_64-apple-darwin" => "darwin64-x86_64-cc", "x86_64-linux-android" => "linux-x86_64", "x86_64-unknown-freebsd" => "BSD-x86_64", @@ -434,6 +439,15 @@ impl Step for Openssl { configure.arg("-mandroid"); configure.arg("-fomit-frame-pointer"); } + if target == "sparc64-unknown-netbsd" { + // Need -m64 to get assembly generated correctly for sparc64. + configure.arg("-m64"); + if build.build.contains("netbsd") { + // Disable sparc64 asm on NetBSD builders, it uses + // m4(1)'s -B flag, which NetBSD m4 does not support. + configure.arg("no-asm"); + } + } // Make PIE binaries // Non-PIE linker support was removed in Lollipop // https://source.android.com/security/enhancements/enhancements50 diff --git a/src/bootstrap/sanity.rs b/src/bootstrap/sanity.rs index 54208d8bb5..8b23be69a8 100644 --- a/src/bootstrap/sanity.rs +++ b/src/bootstrap/sanity.rs @@ -221,8 +221,9 @@ $ pacman -R cmake && pacman -S mingw-w64-x86_64-cmake let run = |cmd: &mut Command| { cmd.output().map(|output| { String::from_utf8_lossy(&output.stdout) - .lines().next().unwrap() - .to_string() + .lines().next().unwrap_or_else(|| { + panic!("{:?} failed {:?}", cmd, output) + }).to_string() }) }; build.lldb_version = run(Command::new("lldb").arg("--version")).ok(); diff --git a/src/bootstrap/tool.rs b/src/bootstrap/tool.rs index e759f1a3e6..a05e58e6a2 100644 --- a/src/bootstrap/tool.rs +++ b/src/bootstrap/tool.rs @@ -21,6 +21,8 @@ use compile::{self, libtest_stamp, libstd_stamp, librustc_stamp}; use native; use channel::GitInfo; use cache::Interned; +use toolstate::ToolState; +use build_helper::BuildExpectation; #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct CleanTools { @@ -62,7 +64,9 @@ struct ToolBuild { compiler: Compiler, target: Interned, tool: &'static str, + path: &'static str, mode: Mode, + expectation: BuildExpectation, } impl Step for ToolBuild { @@ -81,6 +85,8 @@ impl Step for ToolBuild { let compiler = self.compiler; let target = self.target; let tool = self.tool; + let path = self.path; + let expectation = self.expectation; match self.mode { Mode::Libstd => builder.ensure(compile::Std { compiler, target }), @@ -92,21 +98,22 @@ impl Step for ToolBuild { let _folder = build.fold_output(|| format!("stage{}-{}", compiler.stage, tool)); println!("Building stage{} tool {} ({})", compiler.stage, tool, target); - let mut cargo = prepare_tool_cargo(builder, compiler, target, tool); - build.run(&mut cargo); + let mut cargo = prepare_tool_cargo(builder, compiler, target, "build", path); + build.run_expecting(&mut cargo, expectation); build.cargo_out(compiler, Mode::Tool, target).join(exe(tool, &compiler.host)) } } -fn prepare_tool_cargo( +pub fn prepare_tool_cargo( builder: &Builder, compiler: Compiler, target: Interned, - tool: &'static str, + command: &'static str, + path: &'static str, ) -> Command { let build = builder.build; - let mut cargo = builder.cargo(compiler, Mode::Tool, target, "build"); - let dir = build.src.join("src/tools").join(tool); + let mut cargo = builder.cargo(compiler, Mode::Tool, target, command); + let dir = build.src.join(path); cargo.arg("--manifest-path").arg(dir.join("Cargo.toml")); // We don't want to build tools dynamically as they'll be running across @@ -119,7 +126,12 @@ fn prepare_tool_cargo( cargo.env("LIBZ_SYS_STATIC", "1"); } + // if tools are using lzma we want to force the build script to build its + // own copy + cargo.env("LZMA_API_STATIC", "1"); + cargo.env("CFG_RELEASE_CHANNEL", &build.config.channel); + cargo.env("CFG_VERSION", build.rust_version()); let info = GitInfo::new(&build.config, &dir); if let Some(sha) = info.sha() { @@ -145,15 +157,27 @@ macro_rules! tool { impl<'a> Builder<'a> { pub fn tool_exe(&self, tool: Tool) -> PathBuf { + let stage = self.tool_default_stage(tool); match tool { $(Tool::$name => self.ensure($name { - compiler: self.compiler(0, self.build.build), + compiler: self.compiler(stage, self.build.build), target: self.build.build, }), )+ } } + + pub fn tool_default_stage(&self, tool: Tool) -> u32 { + // Compile the error-index in the top stage as it depends on + // rustdoc, so we want to avoid recompiling rustdoc twice if we + // can. Otherwise compile everything else in stage0 as there's + // no need to rebootstrap everything + match tool { + Tool::ErrorIndex => self.top_stage, + _ => 0, + } + } } $( @@ -183,6 +207,8 @@ macro_rules! tool { target: self.target, tool: $tool_name, mode: $mode, + path: $path, + expectation: BuildExpectation::None, }) } } @@ -200,7 +226,7 @@ tool!( Compiletest, "src/tools/compiletest", "compiletest", Mode::Libtest; BuildManifest, "src/tools/build-manifest", "build-manifest", Mode::Libstd; RemoteTestClient, "src/tools/remote-test-client", "remote-test-client", Mode::Libstd; - RustInstaller, "src/tools/rust-installer", "rust-installer", Mode::Libstd; + RustInstaller, "src/tools/rust-installer", "fabricate", Mode::Libstd; ); #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] @@ -229,6 +255,8 @@ impl Step for RemoteTestServer { target: self.target, tool: "remote-test-server", mode: Mode::Libstd, + path: "src/tools/remote-test-server", + expectation: BuildExpectation::None, }) } } @@ -275,7 +303,16 @@ impl Step for Rustdoc { let _folder = build.fold_output(|| format!("stage{}-rustdoc", target_compiler.stage)); println!("Building rustdoc for stage{} ({})", target_compiler.stage, target_compiler.host); - let mut cargo = prepare_tool_cargo(builder, build_compiler, target, "rustdoc"); + let mut cargo = prepare_tool_cargo(builder, + build_compiler, + target, + "build", + "src/tools/rustdoc"); + + // Most tools don't get debuginfo, but rustdoc should. + cargo.env("RUSTC_DEBUGINFO", builder.config.rust_debuginfo.to_string()) + .env("RUSTC_DEBUGINFO_LINES", builder.config.rust_debuginfo_lines.to_string()); + build.run(&mut cargo); // Cargo adds a number of paths to the dylib search path on windows, which results in // the wrong rustdoc being executed. To avoid the conflicting rustdocs, we name the "tool" @@ -336,6 +373,48 @@ impl Step for Cargo { target: self.target, tool: "cargo", mode: Mode::Librustc, + path: "src/tools/cargo", + expectation: BuildExpectation::None, + }) + } +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct Clippy { + pub compiler: Compiler, + pub target: Interned, +} + +impl Step for Clippy { + type Output = PathBuf; + const DEFAULT: bool = false; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/clippy") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Clippy { + compiler: run.builder.compiler(run.builder.top_stage, run.builder.build.build), + target: run.target, + }); + } + + fn run(self, builder: &Builder) -> PathBuf { + // Clippy depends on procedural macros (serde), which requires a full host + // compiler to be available, so we need to depend on that. + builder.ensure(compile::Rustc { + compiler: self.compiler, + target: builder.build.build, + }); + builder.ensure(ToolBuild { + compiler: self.compiler, + target: self.target, + tool: "clippy", + mode: Mode::Librustc, + path: "src/tools/clippy", + expectation: builder.build.config.toolstate.clippy.passes(ToolState::Compiling), }) } } @@ -378,6 +457,79 @@ impl Step for Rls { target: self.target, tool: "rls", mode: Mode::Librustc, + path: "src/tools/rls", + expectation: builder.build.config.toolstate.rls.passes(ToolState::Compiling), + }) + } +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct Rustfmt { + pub compiler: Compiler, + pub target: Interned, +} + +impl Step for Rustfmt { + type Output = PathBuf; + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + let builder = run.builder; + run.path("src/tools/rustfmt").default_condition(builder.build.config.extended) + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Rustfmt { + compiler: run.builder.compiler(run.builder.top_stage, run.builder.build.build), + target: run.target, + }); + } + + fn run(self, builder: &Builder) -> PathBuf { + builder.ensure(ToolBuild { + compiler: self.compiler, + target: self.target, + tool: "rustfmt", + mode: Mode::Librustc, + path: "src/tools/rustfmt", + expectation: builder.build.config.toolstate.rustfmt.passes(ToolState::Compiling), + }) + } +} + + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct Miri { + pub compiler: Compiler, + pub target: Interned, +} + +impl Step for Miri { + type Output = PathBuf; + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + let build_miri = run.builder.build.config.test_miri; + run.path("src/tools/miri").default_condition(build_miri) + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Miri { + compiler: run.builder.compiler(run.builder.top_stage, run.builder.build.build), + target: run.target, + }); + } + + fn run(self, builder: &Builder) -> PathBuf { + builder.ensure(ToolBuild { + compiler: self.compiler, + target: self.target, + tool: "miri", + mode: Mode::Librustc, + path: "src/tools/miri", + expectation: builder.build.config.toolstate.miri.passes(ToolState::Compiling), }) } } @@ -387,7 +539,7 @@ impl<'a> Builder<'a> { /// `host`. pub fn tool_cmd(&self, tool: Tool) -> Command { let mut cmd = Command::new(self.tool_exe(tool)); - let compiler = self.compiler(0, self.build.build); + let compiler = self.compiler(self.tool_default_stage(tool), self.build.build); self.prepare_tool_cmd(compiler, &mut cmd); cmd } diff --git a/src/bootstrap/toolstate.rs b/src/bootstrap/toolstate.rs new file mode 100644 index 0000000000..8a113f6b4d --- /dev/null +++ b/src/bootstrap/toolstate.rs @@ -0,0 +1,51 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use build_helper::BuildExpectation; + +#[derive(Copy, Clone, Debug, Deserialize, PartialEq, Eq)] +/// Whether a tool can be compiled, tested or neither +pub enum ToolState { + /// The tool compiles successfully, but the test suite fails + Compiling = 1, + /// The tool compiles successfully and its test suite passes + Testing = 2, + /// The tool can't even be compiled + Broken = 0, +} + +impl ToolState { + /// If a tool with the current toolstate should be working on + /// the given toolstate + pub fn passes(self, other: ToolState) -> BuildExpectation { + if self as usize >= other as usize { + BuildExpectation::Succeeding + } else { + BuildExpectation::Failing + } + } +} + +impl Default for ToolState { + fn default() -> Self { + // err on the safe side + ToolState::Broken + } +} + +#[derive(Copy, Clone, Debug, Deserialize, Default)] +/// Used to express which tools should (not) be compiled or tested. +/// This is created from `toolstate.toml`. +pub struct ToolStates { + pub miri: ToolState, + pub clippy: ToolState, + pub rls: ToolState, + pub rustfmt: ToolState, +} diff --git a/src/bootstrap/util.rs b/src/bootstrap/util.rs index 092fb04637..a521dd0945 100644 --- a/src/bootstrap/util.rs +++ b/src/bootstrap/util.rs @@ -34,8 +34,12 @@ pub fn staticlib(name: &str, target: &str) -> String { /// Copies a file from `src` to `dst` pub fn copy(src: &Path, dst: &Path) { let _ = fs::remove_file(&dst); - let res = fs::copy(src, dst); - if let Err(e) = res { + // Attempt to "easy copy" by creating a hard link (symlinks don't work on + // windows), but if that fails just fall back to a slow `copy` operation. + if let Ok(()) = fs::hard_link(src, dst) { + return + } + if let Err(e) = fs::copy(src, dst) { panic!("failed to copy `{}` to `{}`: {}", src.display(), dst.display(), e) } @@ -44,7 +48,6 @@ pub fn copy(src: &Path, dst: &Path) { let atime = FileTime::from_last_access_time(&metadata); let mtime = FileTime::from_last_modification_time(&metadata); t!(filetime::set_file_times(dst, atime, mtime)); - } /// Copies the `src` directory recursively to `dst`. Both are assumed to exist @@ -279,7 +282,7 @@ pub fn symlink_dir(src: &Path, dest: &Path) -> io::Result<()> { ptr::null_mut()); let mut data = [0u8; MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; - let mut db = data.as_mut_ptr() + let db = data.as_mut_ptr() as *mut REPARSE_MOUNTPOINT_DATA_BUFFER; let buf = &mut (*db).ReparseTarget as *mut _; let mut i = 0; diff --git a/src/build_helper/lib.rs b/src/build_helper/lib.rs index 8b4c7f2ac3..e81dab70b4 100644 --- a/src/build_helper/lib.rs +++ b/src/build_helper/lib.rs @@ -35,55 +35,97 @@ macro_rules! t { }) } -pub fn run(cmd: &mut Command) { +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum BuildExpectation { + Succeeding, + Failing, + None, +} + +pub fn run(cmd: &mut Command, expect: BuildExpectation) { println!("running: {:?}", cmd); - run_silent(cmd); + run_silent(cmd, expect); } -pub fn run_silent(cmd: &mut Command) { - if !try_run_silent(cmd) { +pub fn run_silent(cmd: &mut Command, expect: BuildExpectation) { + if !try_run_silent(cmd, expect) { std::process::exit(1); } } -pub fn try_run_silent(cmd: &mut Command) -> bool { +pub fn try_run_silent(cmd: &mut Command, expect: BuildExpectation) -> bool { let status = match cmd.status() { Ok(status) => status, Err(e) => fail(&format!("failed to execute command: {:?}\nerror: {}", cmd, e)), }; - if !status.success() { - println!("\n\ncommand did not execute successfully: {:?}\n\ - expected success, got: {}\n\n", - cmd, - status); + process_status( + cmd, + status.success(), + expect, + || println!("\n\ncommand did not execute successfully: {:?}\n\ + expected success, got: {}\n\n", + cmd, + status)) +} + +fn process_status( + cmd: &Command, + success: bool, + expect: BuildExpectation, + f: F, +) -> bool { + use BuildExpectation::*; + match (expect, success) { + (None, false) => { f(); false }, + // Non-tool build succeeds, everything is good + (None, true) => true, + // Tool expected to work and is working + (Succeeding, true) => true, + // Tool expected to fail and is failing + (Failing, false) => { + println!("This failure is expected (see `src/tools/toolstate.toml`)"); + true + }, + // Tool expected to work, but is failing + (Succeeding, false) => { + f(); + println!("You can disable the tool in `src/tools/toolstate.toml`"); + false + }, + // Tool expected to fail, but is working + (Failing, true) => { + println!("Expected `{:?}` to fail, but it succeeded.\n\ + Please adjust `src/tools/toolstate.toml` accordingly", cmd); + false + } } - status.success() } -pub fn run_suppressed(cmd: &mut Command) { - if !try_run_suppressed(cmd) { +pub fn run_suppressed(cmd: &mut Command, expect: BuildExpectation) { + if !try_run_suppressed(cmd, expect) { std::process::exit(1); } } -pub fn try_run_suppressed(cmd: &mut Command) -> bool { +pub fn try_run_suppressed(cmd: &mut Command, expect: BuildExpectation) -> bool { let output = match cmd.output() { Ok(status) => status, Err(e) => fail(&format!("failed to execute command: {:?}\nerror: {}", cmd, e)), }; - if !output.status.success() { - println!("\n\ncommand did not execute successfully: {:?}\n\ + process_status( + cmd, + output.status.success(), + expect, + || println!("\n\ncommand did not execute successfully: {:?}\n\ expected success, got: {}\n\n\ stdout ----\n{}\n\ stderr ----\n{}\n\n", cmd, output.status, String::from_utf8_lossy(&output.stdout), - String::from_utf8_lossy(&output.stderr)); - } - output.status.success() + String::from_utf8_lossy(&output.stderr))) } pub fn gnu_target(target: &str) -> String { diff --git a/src/ci/docker/README.md b/src/ci/docker/README.md index 627b5062df..adce6a00d4 100644 --- a/src/ci/docker/README.md +++ b/src/ci/docker/README.md @@ -20,7 +20,7 @@ Images will output artifacts in an `obj` dir at the root of a repository. - Each directory, excluding `scripts` and `disabled`, corresponds to a docker image - `scripts` contains files shared by docker images -- `disabled` contains images that are not build travis +- `disabled` contains images that are not built on travis ## Cross toolchains diff --git a/src/ci/docker/arm-android/Dockerfile b/src/ci/docker/arm-android/Dockerfile index 49d07d28d3..f2773a720c 100644 --- a/src/ci/docker/arm-android/Dockerfile +++ b/src/ci/docker/arm-android/Dockerfile @@ -5,21 +5,27 @@ RUN sh /scripts/android-base-apt-get.sh COPY scripts/android-ndk.sh /scripts/ RUN . /scripts/android-ndk.sh && \ - download_and_make_toolchain android-ndk-r13b-linux-x86_64.zip arm 9 + download_and_make_toolchain android-ndk-r15c-linux-x86_64.zip arm 14 +# Note: +# Do not upgrade to `openjdk-9-jre-headless`, as it will cause certificate error +# when installing the Android SDK (see PR #45193). This is unfortunate, but +# every search result suggested either disabling HTTPS or replacing JDK 9 by +# JDK 8 as the solution (e.g. https://stackoverflow.com/q/41421340). :| RUN dpkg --add-architecture i386 && \ apt-get update && \ apt-get install -y --no-install-recommends \ libgl1-mesa-glx \ libpulse0 \ libstdc++6:i386 \ - openjdk-9-jre-headless \ + openjdk-8-jre-headless \ tzdata COPY scripts/android-sdk.sh /scripts/ RUN . /scripts/android-sdk.sh && \ - download_and_create_avd tools_r25.2.5-linux.zip armeabi-v7a 18 + download_and_create_avd 4333796 armeabi-v7a 18 +ENV PATH=$PATH:/android/sdk/emulator ENV PATH=$PATH:/android/sdk/tools ENV PATH=$PATH:/android/sdk/platform-tools @@ -27,7 +33,7 @@ ENV TARGETS=arm-linux-androideabi ENV RUST_CONFIGURE_ARGS \ --target=$TARGETS \ - --arm-linux-androideabi-ndk=/android/ndk/arm-9 + --arm-linux-androideabi-ndk=/android/ndk/arm-14 ENV SCRIPT python2.7 ../x.py test --target $TARGETS diff --git a/src/ci/docker/cross/Dockerfile b/src/ci/docker/cross/Dockerfile index a83bbe9c60..05745709a0 100644 --- a/src/ci/docker/cross/Dockerfile +++ b/src/ci/docker/cross/Dockerfile @@ -14,6 +14,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ zlib1g-dev \ g++-arm-linux-gnueabi \ g++-arm-linux-gnueabihf \ + g++-aarch64-linux-gnu \ gcc-sparc64-linux-gnu \ libc6-dev-sparc64-cross \ bzip2 \ @@ -46,6 +47,7 @@ ENV TARGETS=$TARGETS,mipsel-unknown-linux-musl ENV TARGETS=$TARGETS,arm-unknown-linux-musleabi ENV TARGETS=$TARGETS,arm-unknown-linux-musleabihf ENV TARGETS=$TARGETS,armv7-unknown-linux-musleabihf +ENV TARGETS=$TARGETS,aarch64-unknown-linux-musl ENV TARGETS=$TARGETS,sparc64-unknown-linux-gnu ENV TARGETS=$TARGETS,x86_64-unknown-redox @@ -62,7 +64,8 @@ ENV RUST_CONFIGURE_ARGS \ --target=$TARGETS \ --musl-root-arm=/usr/local/arm-linux-musleabi \ --musl-root-armhf=/usr/local/arm-linux-musleabihf \ - --musl-root-armv7=/usr/local/armv7-linux-musleabihf + --musl-root-armv7=/usr/local/armv7-linux-musleabihf \ + --musl-root-aarch64=/usr/local/aarch64-linux-musl ENV SCRIPT python2.7 ../x.py dist --target $TARGETS # sccache diff --git a/src/ci/docker/cross/build-arm-musl.sh b/src/ci/docker/cross/build-arm-musl.sh index 938e69834e..780099e2ec 100755 --- a/src/ci/docker/cross/build-arm-musl.sh +++ b/src/ci/docker/cross/build-arm-musl.sh @@ -65,11 +65,24 @@ CFLAGS="-march=armv7-a" \ hide_output make -j$(nproc) hide_output make install cd .. +rm -rf musl-$MUSL + +tar xf musl-$MUSL.tar.gz +cd musl-$MUSL +CC=aarch64-linux-gnu-gcc \ +CFLAGS="" \ + hide_output ./configure \ + --prefix=/usr/local/aarch64-linux-musl \ + --enable-wrapper=gcc +hide_output make -j$(nproc) +hide_output make install +cd .. rm -rf musl-$MUSL* ln -nsf ../arm-linux-musleabi/bin/musl-gcc /usr/local/bin/arm-linux-musleabi-gcc ln -nsf ../arm-linux-musleabihf/bin/musl-gcc /usr/local/bin/arm-linux-musleabihf-gcc ln -nsf ../armv7-linux-musleabihf/bin/musl-gcc /usr/local/bin/armv7-linux-musleabihf-gcc +ln -nsf ../aarch64-linux-musl/bin/musl-gcc /usr/local/bin/aarch64-unknown-linux-musl-gcc curl -L https://github.com/llvm-mirror/llvm/archive/release_39.tar.gz | tar xzf - curl -L https://github.com/llvm-mirror/libunwind/archive/release_39.tar.gz | tar xzf - @@ -116,5 +129,19 @@ cp lib/libunwind.a /usr/local/armv7-linux-musleabihf/lib cd .. rm -rf libunwind-build +mkdir libunwind-build +cd libunwind-build +cmake ../libunwind-release_39 \ + -DLLVM_PATH=/tmp/llvm-release_39 \ + -DLIBUNWIND_ENABLE_SHARED=0 \ + -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc \ + -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++ \ + -DCMAKE_C_FLAGS="" \ + -DCMAKE_CXX_FLAGS="" +make -j$(nproc) +cp lib/libunwind.a /usr/local/aarch64-linux-musl/lib +cd .. +rm -rf libunwind-build + rm -rf libunwind-release_39 rm -rf llvm-release_39 diff --git a/src/ci/docker/cross2/Dockerfile b/src/ci/docker/cross2/Dockerfile new file mode 100644 index 0000000000..029d2c18d4 --- /dev/null +++ b/src/ci/docker/cross2/Dockerfile @@ -0,0 +1,52 @@ +FROM ubuntu:16.04 + +COPY scripts/cross-apt-packages.sh /scripts/ +RUN sh /scripts/cross-apt-packages.sh + +RUN apt-get build-dep -y clang llvm && apt-get install -y --no-install-recommends \ + build-essential \ + libedit-dev \ + libgmp-dev \ + libisl-dev \ + libmpc-dev \ + libmpfr-dev \ + ninja-build \ + nodejs \ + python2.7-dev \ + software-properties-common \ + unzip + +RUN apt-key adv --batch --yes --keyserver keyserver.ubuntu.com --recv-keys 74DA7924C5513486 +RUN add-apt-repository -y 'deb http://apt.dilos.org/dilos dilos2-testing main' + +WORKDIR /tmp +COPY cross2/shared.sh cross2/build-fuchsia-toolchain.sh /tmp/ +COPY cross2/build-solaris-toolchain.sh /tmp/ +RUN /tmp/build-fuchsia-toolchain.sh +RUN /tmp/build-solaris-toolchain.sh x86_64 amd64 solaris-i386 +RUN /tmp/build-solaris-toolchain.sh sparcv9 sparcv9 solaris-sparc + +COPY scripts/sccache.sh /scripts/ +RUN sh /scripts/sccache.sh + +ENV \ + AR_x86_64_unknown_fuchsia=x86_64-unknown-fuchsia-ar \ + CC_x86_64_unknown_fuchsia=x86_64-unknown-fuchsia-clang \ + CXX_x86_64_unknown_fuchsia=x86_64-unknown-fuchsia-clang++ \ + AR_aarch64_unknown_fuchsia=aarch64-unknown-fuchsia-ar \ + CC_aarch64_unknown_fuchsia=aarch64-unknown-fuchsia-clang \ + CXX_aarch64_unknown_fuchsia=aarch64-unknown-fuchsia-clang++ \ + AR_sparcv9_sun_solaris=sparcv9-sun-solaris2.10-ar \ + CC_sparcv9_sun_solaris=sparcv9-sun-solaris2.10-gcc \ + CXX_sparcv9_sun_solaris=sparcv9-sun-solaris2.10-g++ \ + AR_x86_64_sun_solaris=x86_64-sun-solaris2.10-ar \ + CC_x86_64_sun_solaris=x86_64-sun-solaris2.10-gcc \ + CXX_x86_64_sun_solaris=x86_64-sun-solaris2.10-g++ + +ENV TARGETS=x86_64-unknown-fuchsia +ENV TARGETS=$TARGETS,aarch64-unknown-fuchsia +ENV TARGETS=$TARGETS,sparcv9-sun-solaris +ENV TARGETS=$TARGETS,x86_64-sun-solaris + +ENV RUST_CONFIGURE_ARGS --target=$TARGETS --enable-extended +ENV SCRIPT python2.7 ../x.py dist --target $TARGETS diff --git a/src/ci/docker/cross2/build-fuchsia-toolchain.sh b/src/ci/docker/cross2/build-fuchsia-toolchain.sh new file mode 100755 index 0000000000..756013a235 --- /dev/null +++ b/src/ci/docker/cross2/build-fuchsia-toolchain.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# Copyright 2017 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +# ignore-tidy-linelength + +set -ex +source shared.sh + +ZIRCON=e9a26dbc70d631029f8ee9763103910b7e3a2fe1 + +mkdir -p zircon +pushd zircon > /dev/null + +# Download sources +git init +git remote add origin https://fuchsia.googlesource.com/zircon +git fetch --depth=1 origin $ZIRCON +git reset --hard FETCH_HEAD + +# Download toolchain +./scripts/download-toolchain +chmod -R a+rx prebuilt/downloads/clang+llvm-x86_64-linux +cp -a prebuilt/downloads/clang+llvm-x86_64-linux/. /usr/local + +build() { + local arch="$1" + + case "${arch}" in + x86_64) tgt="zircon-pc-x86-64" ;; + aarch64) tgt="zircon-qemu-arm64" ;; + esac + + hide_output make -j$(getconf _NPROCESSORS_ONLN) $tgt + dst=/usr/local/${arch}-unknown-fuchsia + mkdir -p $dst + cp -a build-${tgt}/sysroot/include $dst/ + cp -a build-${tgt}/sysroot/lib $dst/ +} + +# Build sysroot +for arch in x86_64 aarch64; do + build ${arch} +done + +popd > /dev/null +rm -rf zircon + +for arch in x86_64 aarch64; do + for tool in clang clang++; do + cat >/usr/local/bin/${arch}-unknown-fuchsia-${tool} < or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex +source shared.sh + +ARCH=$1 +LIB_ARCH=$2 +APT_ARCH=$3 +BINUTILS=2.28.1 +GCC=6.4.0 + +# First up, build binutils +mkdir binutils +cd binutils + +curl https://ftp.gnu.org/gnu/binutils/binutils-$BINUTILS.tar.xz | tar xJf - +mkdir binutils-build +cd binutils-build +hide_output ../binutils-$BINUTILS/configure --target=$ARCH-sun-solaris2.10 +hide_output make -j10 +hide_output make install + +cd ../.. +rm -rf binutils + +# Next, download and install the relevant solaris packages +mkdir solaris +cd solaris + +dpkg --add-architecture $APT_ARCH +apt-get update +apt-get download $(apt-cache depends --recurse --no-replaces \ + libc-dev:$APT_ARCH \ + libm-dev:$APT_ARCH \ + libpthread-dev:$APT_ARCH \ + libresolv-dev:$APT_ARCH \ + librt-dev:$APT_ARCH \ + libsocket-dev:$APT_ARCH \ + system-crt:$APT_ARCH \ + system-header:$APT_ARCH \ + | grep "^\w") + +for deb in *$APT_ARCH.deb; do + dpkg -x $deb . +done + +# Remove Solaris 11 functions that are optionally used by libbacktrace. +# This is for Solaris 10 compatibility. +rm usr/include/link.h +patch -p0 << 'EOF' +--- usr/include/string.h ++++ usr/include/string10.h +@@ -93 +92,0 @@ +-extern size_t strnlen(const char *, size_t); +EOF + +mkdir /usr/local/$ARCH-sun-solaris2.10/usr +mv usr/include /usr/local/$ARCH-sun-solaris2.10/usr/include +mv usr/lib/$LIB_ARCH/* /usr/local/$ARCH-sun-solaris2.10/lib +mv lib/$LIB_ARCH/* /usr/local/$ARCH-sun-solaris2.10/lib + +ln -s usr/include /usr/local/$ARCH-sun-solaris2.10/sys-include +ln -s usr/include /usr/local/$ARCH-sun-solaris2.10/include + +cd .. +rm -rf solaris + +# Finally, download and build gcc to target solaris +mkdir gcc +cd gcc + +curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.xz | tar xJf - +cd gcc-$GCC + +mkdir ../gcc-build +cd ../gcc-build +hide_output ../gcc-$GCC/configure \ + --enable-languages=c,c++ \ + --target=$ARCH-sun-solaris2.10 \ + --with-gnu-as \ + --with-gnu-ld \ + --disable-multilib \ + --disable-nls \ + --disable-libgomp \ + --disable-libquadmath \ + --disable-libssp \ + --disable-libvtv \ + --disable-libcilkrts \ + --disable-libada \ + --disable-libsanitizer \ + --disable-libquadmath-support \ + --disable-lto + +hide_output make -j10 +hide_output make install + +cd ../.. +rm -rf gcc diff --git a/src/ci/docker/dist-fuchsia/shared.sh b/src/ci/docker/cross2/shared.sh similarity index 100% rename from src/ci/docker/dist-fuchsia/shared.sh rename to src/ci/docker/cross2/shared.sh diff --git a/src/ci/docker/disabled/aarch64-gnu/Dockerfile b/src/ci/docker/disabled/aarch64-gnu/Dockerfile index 9a0e453122..fedb4094c8 100644 --- a/src/ci/docker/disabled/aarch64-gnu/Dockerfile +++ b/src/ci/docker/disabled/aarch64-gnu/Dockerfile @@ -31,7 +31,7 @@ WORKDIR /build # The `config` config file was a previously generated config file for # the kernel. This file was generated by running `make defconfig` # followed by `make menuconfig` and then enabling the IPv6 protocol page. -COPY disabled/aarch64-gnu/config /build/.config +COPY aarch64-gnu/config /build/.config RUN curl https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.4.42.tar.xz | \ tar xJf - && \ cd /build/linux-4.4.42 && \ diff --git a/src/ci/docker/disabled/dist-aarch64-android/Dockerfile b/src/ci/docker/disabled/dist-aarch64-android/Dockerfile index 20d823a3d7..ce5e8cfaf0 100644 --- a/src/ci/docker/disabled/dist-aarch64-android/Dockerfile +++ b/src/ci/docker/disabled/dist-aarch64-android/Dockerfile @@ -5,7 +5,7 @@ RUN sh /scripts/android-base-apt-get.sh COPY scripts/android-ndk.sh /scripts/ RUN . /scripts/android-ndk.sh && \ - download_and_make_toolchain android-ndk-r13b-linux-x86_64.zip arm64 21 + download_and_make_toolchain android-ndk-r15c-linux-x86_64.zip arm64 21 ENV PATH=$PATH:/android/ndk/arm64-21/bin diff --git a/src/ci/docker/disabled/dist-armv7-android/Dockerfile b/src/ci/docker/disabled/dist-armv7-android/Dockerfile index 3435d641a1..3177fa2147 100644 --- a/src/ci/docker/disabled/dist-armv7-android/Dockerfile +++ b/src/ci/docker/disabled/dist-armv7-android/Dockerfile @@ -5,17 +5,17 @@ RUN sh /scripts/android-base-apt-get.sh COPY scripts/android-ndk.sh /scripts/ RUN . /scripts/android-ndk.sh && \ - download_ndk android-ndk-r13b-linux-x86_64.zip && \ - make_standalone_toolchain arm 9 && \ + download_ndk android-ndk-r15c-linux-x86_64.zip && \ + make_standalone_toolchain arm 14 && \ make_standalone_toolchain arm 21 && \ remove_ndk RUN chmod 777 /android/ndk && \ ln -s /android/ndk/arm-21 /android/ndk/arm -ENV PATH=$PATH:/android/ndk/arm-9/bin +ENV PATH=$PATH:/android/ndk/arm-14/bin -ENV DEP_Z_ROOT=/android/ndk/arm-9/sysroot/usr/ +ENV DEP_Z_ROOT=/android/ndk/arm-14/sysroot/usr/ ENV HOSTS=armv7-linux-androideabi @@ -27,18 +27,18 @@ ENV RUST_CONFIGURE_ARGS \ --enable-extended \ --enable-cargo-openssl-static -# We support api level 9, but api level 21 is required to build llvm. To +# We support api level 14, but api level 21 is required to build llvm. To # overcome this problem we use a ndk with api level 21 to build llvm and then -# switch to a ndk with api level 9 to complete the build. When the linker is +# switch to a ndk with api level 14 to complete the build. When the linker is # invoked there are missing symbols (like sigsetempty, not available with api -# level 9), the default linker behavior is to generate an error, to allow the +# level 14), the default linker behavior is to generate an error, to allow the # build to finish we use --warn-unresolved-symbols. Note that the missing # symbols does not affect std, only the compiler (llvm) and cargo (openssl). ENV SCRIPT \ python2.7 ../x.py build src/llvm --host $HOSTS --target $HOSTS && \ (export RUSTFLAGS="\"-C link-arg=-Wl,--warn-unresolved-symbols\""; \ rm /android/ndk/arm && \ - ln -s /android/ndk/arm-9 /android/ndk/arm && \ + ln -s /android/ndk/arm-14 /android/ndk/arm && \ python2.7 ../x.py dist --host $HOSTS --target $HOSTS) COPY scripts/sccache.sh /scripts/ diff --git a/src/ci/docker/disabled/dist-i686-android/Dockerfile b/src/ci/docker/disabled/dist-i686-android/Dockerfile index 4bb7053760..ace9c4feb4 100644 --- a/src/ci/docker/disabled/dist-i686-android/Dockerfile +++ b/src/ci/docker/disabled/dist-i686-android/Dockerfile @@ -5,17 +5,17 @@ RUN sh /scripts/android-base-apt-get.sh COPY scripts/android-ndk.sh /scripts/ RUN . /scripts/android-ndk.sh && \ - download_ndk android-ndk-r13b-linux-x86_64.zip && \ - make_standalone_toolchain x86 9 && \ + download_ndk android-ndk-r15c-linux-x86_64.zip && \ + make_standalone_toolchain x86 14 && \ make_standalone_toolchain x86 21 && \ remove_ndk RUN chmod 777 /android/ndk && \ ln -s /android/ndk/x86-21 /android/ndk/x86 -ENV PATH=$PATH:/android/ndk/x86-9/bin +ENV PATH=$PATH:/android/ndk/x86-14/bin -ENV DEP_Z_ROOT=/android/ndk/x86-9/sysroot/usr/ +ENV DEP_Z_ROOT=/android/ndk/x86-14/sysroot/usr/ ENV HOSTS=i686-linux-android @@ -27,18 +27,18 @@ ENV RUST_CONFIGURE_ARGS \ --enable-extended \ --enable-cargo-openssl-static -# We support api level 9, but api level 21 is required to build llvm. To +# We support api level 14, but api level 21 is required to build llvm. To # overcome this problem we use a ndk with api level 21 to build llvm and then -# switch to a ndk with api level 9 to complete the build. When the linker is +# switch to a ndk with api level 14 to complete the build. When the linker is # invoked there are missing symbols (like sigsetempty, not available with api -# level 9), the default linker behavior is to generate an error, to allow the +# level 14), the default linker behavior is to generate an error, to allow the # build to finish we use --warn-unresolved-symbols. Note that the missing # symbols does not affect std, only the compiler (llvm) and cargo (openssl). ENV SCRIPT \ python2.7 ../x.py build src/llvm --host $HOSTS --target $HOSTS && \ (export RUSTFLAGS="\"-C link-arg=-Wl,--warn-unresolved-symbols\""; \ rm /android/ndk/x86 && \ - ln -s /android/ndk/x86-9 /android/ndk/x86 && \ + ln -s /android/ndk/x86-14 /android/ndk/x86 && \ python2.7 ../x.py dist --host $HOSTS --target $HOSTS) COPY scripts/sccache.sh /scripts/ diff --git a/src/ci/docker/disabled/dist-x86_64-android/Dockerfile b/src/ci/docker/disabled/dist-x86_64-android/Dockerfile index 525b218417..322d26f0ad 100644 --- a/src/ci/docker/disabled/dist-x86_64-android/Dockerfile +++ b/src/ci/docker/disabled/dist-x86_64-android/Dockerfile @@ -5,7 +5,7 @@ RUN sh /scripts/android-base-apt-get.sh COPY scripts/android-ndk.sh /scripts/ RUN . /scripts/android-ndk.sh && \ - download_and_make_toolchain android-ndk-r13b-linux-x86_64.zip x86_64 21 + download_and_make_toolchain android-ndk-r15c-linux-x86_64.zip x86_64 21 ENV PATH=$PATH:/android/ndk/x86_64-21/bin diff --git a/src/ci/docker/disabled/dist-x86_64-haiku/Dockerfile b/src/ci/docker/disabled/dist-x86_64-haiku/Dockerfile new file mode 100644 index 0000000000..621976b5cb --- /dev/null +++ b/src/ci/docker/disabled/dist-x86_64-haiku/Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + autoconf \ + automake \ + bison \ + bzip2 \ + ca-certificates \ + cmake \ + curl \ + file \ + flex \ + g++ \ + gawk \ + git \ + libcurl4-openssl-dev \ + libssl-dev \ + make \ + nasm \ + pkg-config \ + python2.7 \ + sudo \ + texinfo \ + wget \ + xz-utils \ + zlib1g-dev + +COPY dist-x86_64-haiku/llvm-config.sh /bin/llvm-config-haiku + +ENV ARCH=x86_64 + +WORKDIR /tmp +COPY dist-x86_64-haiku/build-toolchain.sh /tmp/ +RUN /tmp/build-toolchain.sh $ARCH + +COPY dist-x86_64-haiku/fetch-packages.sh /tmp/ +RUN /tmp/fetch-packages.sh + +COPY scripts/sccache.sh /scripts/ +RUN sh /scripts/sccache.sh + +ENV HOST=x86_64-unknown-haiku +ENV TARGET=target.$HOST + +ENV RUST_CONFIGURE_ARGS --host=$HOST --target=$HOST --disable-jemalloc \ + --set=$TARGET.cc=x86_64-unknown-haiku-gcc \ + --set=$TARGET.cxx=x86_64-unknown-haiku-g++ \ + --set=$TARGET.llvm-config=/bin/llvm-config-haiku +ENV SCRIPT python2.7 ../x.py dist diff --git a/src/ci/docker/disabled/dist-x86_64-haiku/build-toolchain.sh b/src/ci/docker/disabled/dist-x86_64-haiku/build-toolchain.sh new file mode 100755 index 0000000000..0776d44898 --- /dev/null +++ b/src/ci/docker/disabled/dist-x86_64-haiku/build-toolchain.sh @@ -0,0 +1,74 @@ +#!/bin/bash +# Copyright 2017 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex + +ARCH=$1 + +TOP=$(pwd) + +BUILDTOOLS=$TOP/buildtools +HAIKU=$TOP/haiku +OUTPUT=/tools +SYSROOT=$OUTPUT/cross-tools-$ARCH/sysroot +PACKAGE_ROOT=/system + +hide_output() { + set +x + on_err=" +echo ERROR: An error was encountered with the build. +cat /tmp/build.log +exit 1 +" + trap "$on_err" ERR + bash -c "while true; do sleep 30; echo \$(date) - building ...; done" & + PING_LOOP_PID=$! + $@ &> /tmp/build.log + trap - ERR + kill $PING_LOOP_PID + set -x +} + +# First up, build a cross-compiler +git clone --depth=1 https://git.haiku-os.org/haiku +git clone --depth=1 https://git.haiku-os.org/buildtools +cd $BUILDTOOLS/jam +hide_output make +hide_output ./jam0 install +mkdir -p $OUTPUT +cd $OUTPUT +hide_output $HAIKU/configure --build-cross-tools $ARCH $TOP/buildtools + +# Set up sysroot to redirect to /system +mkdir -p $SYSROOT/boot +mkdir -p $PACKAGE_ROOT +ln -s $PACKAGE_ROOT $SYSROOT/boot/system + +# Build needed packages and tools for the cross-compiler +hide_output jam -q haiku.hpkg haiku_devel.hpkg 'package' + +# Set up our sysroot +cp $OUTPUT/objects/linux/lib/*.so /lib/x86_64-linux-gnu +cp $OUTPUT/objects/linux/x86_64/release/tools/package/package /bin/ +find $SYSROOT/../bin/ -type f -exec ln -s {} /bin/ \; + +# Extract packages +package extract -C $PACKAGE_ROOT $OUTPUT/objects/haiku/$ARCH/packaging/packages/haiku.hpkg +package extract -C $PACKAGE_ROOT $OUTPUT/objects/haiku/$ARCH/packaging/packages/haiku_devel.hpkg +find $OUTPUT/download/ -name '*.hpkg' -exec package extract -C $PACKAGE_ROOT {} \; + +# Fix libgcc_s so we can link to it +cd $PACKAGE_ROOT/develop/lib +ln -s ../../lib/libgcc_s.so libgcc_s.so + +# Clean up +rm -rf $BUILDTOOLS $HAIKU $OUTPUT/Jamfile $OUTPUT/attributes $OUTPUT/build \ + $OUTPUT/build_packages $OUTPUT/download $OUTPUT/objects diff --git a/src/ci/docker/disabled/dist-x86_64-haiku/fetch-packages.sh b/src/ci/docker/disabled/dist-x86_64-haiku/fetch-packages.sh new file mode 100755 index 0000000000..0f6034cdb8 --- /dev/null +++ b/src/ci/docker/disabled/dist-x86_64-haiku/fetch-packages.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright 2017 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +wget http://packages.haiku-os.org/haikuports/master/hpkg/llvm-4.0.1-2-x86_64.hpkg +wget http://packages.haiku-os.org/haikuports/master/hpkg/llvm_libs-4.0.1-2-x86_64.hpkg + +package extract -C /system llvm-4.0.1-2-x86_64.hpkg +package extract -C /system llvm_libs-4.0.1-2-x86_64.hpkg + +rm -f *.hpkg diff --git a/src/ci/docker/disabled/dist-x86_64-haiku/llvm-config.sh b/src/ci/docker/disabled/dist-x86_64-haiku/llvm-config.sh new file mode 100755 index 0000000000..fb5206bed2 --- /dev/null +++ b/src/ci/docker/disabled/dist-x86_64-haiku/llvm-config.sh @@ -0,0 +1,67 @@ +#!/bin/sh +# Copyright 2017 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +case $1 in +--version) echo 4.0.1;; +--prefix) echo $SCRATCH/haiku-cross/sysroot/boot/system;; +--bindir) echo $SCRATCH/haiku-cross/sysroot/boot/system/bin;; +--includedir) echo $SCRATCH/haiku-cross/sysroot/boot/system/develop/headers;; +--libdir) echo $SCRATCH/haiku-/cross/sysroot/boot/system/develop/lib;; +--cmakedir) echo $SCRATCH/haiku-/cross/sysroot/boot/system/develop/lib/cmake/llvm;; +--cppflags) echo -I$SCRATCH/haiku-/cross/sysroot/boot/system/develop/headers \ + -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS;; +--cflags) echo -I$SCRATCH/haiku-cross/sysroot/boot/system/develop/headers \ + -fPIC -Wall -W -Wno-unused-parameter -Wwrite-strings \ + -Wno-missing-field-initializers -pedantic -Wno-long-long -Wno-comment \ + -Werror=date-time -ffunction-sections -fdata-sections -O3 -DNDEBUG \ + -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS;; +--cxxflags) echo -I/$SCRATCH/haiku-cross/sysroot/boot/system/develop/headers \ + -fPIC -fvisibility-inlines-hidden -Wall -W -Wno-unused-parameter \ + -Wwrite-strings -Wcast-qual -Wno-missing-field-initializers -pedantic \ + -Wno-long-long -Wno-maybe-uninitialized -Wdelete-non-virtual-dtor \ + -Wno-comment -Werror=date-time -std=c++11 -ffunction-sections \ + -fdata-sections -O3 -DNDEBUG -fno-exceptions \ + -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS;; +--ldflags) echo -L$SCRATCH/haiku-cross/sysroot/boot/system/develop/lib ;; +--system-libs) echo ;; +--libs) echo -lLLVM-4.0;; +--libfiles) echo $SCRATCH/haiku-cross/sysroot/boot/system/develop/lib/libLLVM-4.0.so;; +--components) echo aarch64 aarch64asmparser aarch64asmprinter aarch64codegen \ + aarch64desc aarch64disassembler aarch64info aarch64utils all \ + all-targets amdgpu amdgpuasmparser amdgpuasmprinter amdgpucodegen \ + amdgpudesc amdgpudisassembler amdgpuinfo amdgpuutils analysis arm \ + armasmparser armasmprinter armcodegen armdesc armdisassembler \ + arminfo asmparser asmprinter bitreader bitwriter bpf bpfasmprinter \ + bpfcodegen bpfdesc bpfdisassembler bpfinfo codegen core coroutines \ + coverage debuginfocodeview debuginfodwarf debuginfomsf debuginfopdb \ + demangle engine executionengine globalisel hexagon hexagonasmparser \ + hexagoncodegen hexagondesc hexagondisassembler hexagoninfo \ + instcombine instrumentation interpreter ipo irreader lanai \ + lanaiasmparser lanaicodegen lanaidesc lanaidisassembler lanaiinfo \ + lanaiinstprinter libdriver lineeditor linker lto mc mcdisassembler \ + mcjit mcparser mips mipsasmparser mipsasmprinter mipscodegen \ + mipsdesc mipsdisassembler mipsinfo mirparser msp430 msp430asmprinter \ + msp430codegen msp430desc msp430info native nativecodegen nvptx \ + nvptxasmprinter nvptxcodegen nvptxdesc nvptxinfo objcarcopts object \ + objectyaml option orcjit passes powerpc powerpcasmparser \ + powerpcasmprinter powerpccodegen powerpcdesc powerpcdisassembler \ + powerpcinfo profiledata riscv riscvcodegen riscvdesc riscvinfo \ + runtimedyld scalaropts selectiondag sparc sparcasmparser \ + sparcasmprinter sparccodegen sparcdesc sparcdisassembler sparcinfo \ + support symbolize systemz systemzasmparser systemzasmprinter \ + systemzcodegen systemzdesc systemzdisassembler systemzinfo tablegen \ + target transformutils vectorize x86 x86asmparser x86asmprinter \ + x86codegen x86desc x86disassembler x86info x86utils xcore \ + xcoreasmprinter xcorecodegen xcoredesc xcoredisassembler xcoreinfo;; +--host-target) echo x86_64-unknown-haiku;; +--has-rtti) echo YES;; +--shared-mode) echo shared;; +esac diff --git a/src/ci/docker/disabled/wasm32-exp/Dockerfile b/src/ci/docker/disabled/wasm32-exp/Dockerfile index 6323369421..8653b0e8b4 100644 --- a/src/ci/docker/disabled/wasm32-exp/Dockerfile +++ b/src/ci/docker/disabled/wasm32-exp/Dockerfile @@ -17,7 +17,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ # emscripten COPY scripts/emscripten-wasm.sh /scripts/ -COPY disabled/wasm32-exp/node.sh /usr/local/bin/node +COPY wasm32-exp/node.sh /usr/local/bin/node RUN bash /scripts/emscripten-wasm.sh # cache diff --git a/src/ci/docker/dist-android/Dockerfile b/src/ci/docker/dist-android/Dockerfile index a36f7fc1ac..5d7545a3c2 100644 --- a/src/ci/docker/dist-android/Dockerfile +++ b/src/ci/docker/dist-android/Dockerfile @@ -6,9 +6,9 @@ RUN sh /scripts/android-base-apt-get.sh # ndk COPY scripts/android-ndk.sh /scripts/ RUN . /scripts/android-ndk.sh && \ - download_ndk android-ndk-r13b-linux-x86_64.zip && \ - make_standalone_toolchain arm 9 && \ - make_standalone_toolchain x86 9 && \ + download_ndk android-ndk-r15c-linux-x86_64.zip && \ + make_standalone_toolchain arm 14 && \ + make_standalone_toolchain x86 14 && \ make_standalone_toolchain arm64 21 && \ make_standalone_toolchain x86_64 21 && \ remove_ndk @@ -23,9 +23,9 @@ ENV TARGETS=$TARGETS,x86_64-linux-android ENV RUST_CONFIGURE_ARGS \ --target=$TARGETS \ --enable-extended \ - --arm-linux-androideabi-ndk=/android/ndk/arm-9 \ - --armv7-linux-androideabi-ndk=/android/ndk/arm-9 \ - --i686-linux-android-ndk=/android/ndk/x86-9 \ + --arm-linux-androideabi-ndk=/android/ndk/arm-14 \ + --armv7-linux-androideabi-ndk=/android/ndk/arm-14 \ + --i686-linux-android-ndk=/android/ndk/x86-14 \ --aarch64-linux-android-ndk=/android/ndk/arm64-21 \ --x86_64-linux-android-ndk=/android/ndk/x86_64-21 diff --git a/src/ci/docker/dist-fuchsia/Dockerfile b/src/ci/docker/dist-fuchsia/Dockerfile deleted file mode 100644 index e18cb453ba..0000000000 --- a/src/ci/docker/dist-fuchsia/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -FROM ubuntu:16.04 - -RUN apt-get update && apt-get install -y --no-install-recommends \ - g++ \ - make \ - ninja-build \ - file \ - curl \ - ca-certificates \ - python2.7-dev \ - git \ - sudo \ - bzip2 \ - xz-utils \ - swig \ - libedit-dev \ - libncurses5-dev \ - patch - -RUN curl -L https://cmake.org/files/v3.8/cmake-3.8.0-rc1-Linux-x86_64.tar.gz | \ - tar xzf - -C /usr/local --strip-components=1 - -WORKDIR /tmp -COPY dist-fuchsia/shared.sh dist-fuchsia/build-toolchain.sh dist-fuchsia/compiler-rt-dso-handle.patch /tmp/ -RUN /tmp/build-toolchain.sh - -COPY scripts/sccache.sh /scripts/ -RUN sh /scripts/sccache.sh - -ENV \ - AR_x86_64_unknown_fuchsia=x86_64-unknown-fuchsia-ar \ - CC_x86_64_unknown_fuchsia=x86_64-unknown-fuchsia-clang \ - CXX_x86_64_unknown_fuchsia=x86_64-unknown-fuchsia-clang++ \ - AR_aarch64_unknown_fuchsia=aarch64-unknown-fuchsia-ar \ - CC_aarch64_unknown_fuchsia=aarch64-unknown-fuchsia-clang \ - CXX_aarch64_unknown_fuchsia=aarch64-unknown-fuchsia-clang++ - -ENV TARGETS=x86_64-unknown-fuchsia -ENV TARGETS=$TARGETS,aarch64-unknown-fuchsia - -ENV RUST_CONFIGURE_ARGS --target=$TARGETS --enable-extended -ENV SCRIPT python2.7 ../x.py dist --target $TARGETS diff --git a/src/ci/docker/dist-fuchsia/build-toolchain.sh b/src/ci/docker/dist-fuchsia/build-toolchain.sh deleted file mode 100755 index 10b285a546..0000000000 --- a/src/ci/docker/dist-fuchsia/build-toolchain.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -# ignore-tidy-linelength - -set -ex -source shared.sh - -# Download sources -SRCS=( - "https://fuchsia.googlesource.com/magenta magenta d17073dc8de344ead3b65e8cc6a12280dec38c84" - "https://llvm.googlesource.com/llvm llvm 3f58a16d8eec385e2b3ebdfbb84ff9d3bf27e025" - "https://llvm.googlesource.com/clang llvm/tools/clang 727ea63e6e82677f6e10e05e08bc7d6bdbae3111" - "https://llvm.googlesource.com/lld llvm/tools/lld a31286c1366e5e89b8872803fded13805a1a084b" - "https://llvm.googlesource.com/lldb llvm/tools/lldb 0b2384abec4cb99ad66687712e07dee4dd9d187e" - "https://llvm.googlesource.com/compiler-rt llvm/runtimes/compiler-rt 9093a35c599fe41278606a20b51095ea8bd5a081" - "https://llvm.googlesource.com/libcxx llvm/runtimes/libcxx 607e0c71ec4f7fd377ad3f6c47b08dbe89f66eaa" - "https://llvm.googlesource.com/libcxxabi llvm/runtimes/libcxxabi 0a3a1a8a5ca5ef69e0f6b7d5b9d13e63e6fd2c19" - "https://llvm.googlesource.com/libunwind llvm/runtimes/libunwind e128003563d99d9ee62247c4cee40f07d21c03e3" -) - -fetch() { - mkdir -p $2 - pushd $2 > /dev/null - git init - git remote add origin $1 - git fetch --depth=1 origin $3 - git reset --hard FETCH_HEAD - popd > /dev/null -} - -for i in "${SRCS[@]}"; do - fetch $i -done - -# Remove this once https://reviews.llvm.org/D28791 is resolved -cd llvm/runtimes/compiler-rt -patch -Np1 < /tmp/compiler-rt-dso-handle.patch -cd ../../.. - -# Build toolchain -cd llvm -mkdir build -cd build -hide_output cmake -GNinja \ - -DFUCHSIA_SYSROOT=${PWD}/../../magenta/third_party/ulib/musl \ - -DLLVM_ENABLE_LTO=OFF \ - -DCLANG_BOOTSTRAP_PASSTHROUGH=LLVM_ENABLE_LTO \ - -C ../tools/clang/cmake/caches/Fuchsia.cmake \ - .. -hide_output ninja stage2-distribution -hide_output ninja stage2-install-distribution -cd ../.. - -# Build sysroot -rm -rf llvm/runtimes/compiler-rt -./magenta/scripts/download-toolchain - -build_sysroot() { - local arch="$1" - - case "${arch}" in - x86_64) tgt="magenta-pc-x86-64" ;; - aarch64) tgt="magenta-qemu-arm64" ;; - esac - - hide_output make -C magenta -j$(getconf _NPROCESSORS_ONLN) $tgt - dst=/usr/local/${arch}-unknown-fuchsia - mkdir -p $dst - cp -r magenta/build-${tgt}/sysroot/include $dst/ - cp -r magenta/build-${tgt}/sysroot/lib $dst/ - - cd llvm - mkdir build-runtimes-${arch} - cd build-runtimes-${arch} - hide_output cmake -GNinja \ - -DCMAKE_C_COMPILER=clang \ - -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_AR=/usr/local/bin/llvm-ar \ - -DCMAKE_RANLIB=/usr/local/bin/llvm-ranlib \ - -DCMAKE_INSTALL_PREFIX= \ - -DLLVM_MAIN_SRC_DIR=${PWD}/.. \ - -DLLVM_BINARY_DIR=${PWD}/../build \ - -DLLVM_ENABLE_WERROR=OFF \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_INCLUDE_TESTS=ON \ - -DCMAKE_SYSTEM_NAME=Fuchsia \ - -DCMAKE_C_COMPILER_TARGET=${arch}-fuchsia \ - -DCMAKE_CXX_COMPILER_TARGET=${arch}-fuchsia \ - -DUNIX=1 \ - -DLIBCXX_HAS_MUSL_LIBC=ON \ - -DLIBCXXABI_USE_LLVM_UNWINDER=ON \ - -DCMAKE_SYSROOT=${dst} \ - -DCMAKE_C_COMPILER_FORCED=TRUE \ - -DCMAKE_CXX_COMPILER_FORCED=TRUE \ - -DLLVM_ENABLE_LIBCXX=ON \ - -DCMAKE_EXE_LINKER_FLAGS="-nodefaultlibs -lc" \ - -DCMAKE_SHARED_LINKER_FLAGS="$(clang --target=${arch}-fuchsia -print-libgcc-file-name)" \ - ../runtimes - hide_output env DESTDIR="${dst}" ninja install - cd ../.. -} - -build_sysroot "x86_64" -build_sysroot "aarch64" - -rm -rf magenta llvm - -for arch in x86_64 aarch64; do - for tool in clang clang++; do - cat >/usr/local/bin/${arch}-unknown-fuchsia-${tool} < /dev/null + sdkmanager platform-tools emulator \ + "platforms;android-$api" \ + "system-images;android-$api;default;$abi" > /dev/null } create_avd() { - # See https://developer.android.com/studio/tools/help/android.html abi=$1 api=$2 - echo no | \ - /android/sdk/tools/android create avd \ - --name $abi-$api \ - --target android-$api \ - --abi $abi + # See https://developer.android.com/studio/command-line/avdmanager.html for + # usage of `avdmanager`. + echo no | avdmanager create avd \ + -n "$abi-$api" \ + -k "system-images;android-$api;default;$abi" } download_and_create_avd() { @@ -51,3 +51,15 @@ download_and_create_avd() { download_sysimage $2 $3 create_avd $2 $3 } + +# Usage: +# +# setup_android_sdk 4333796 armeabi-v7a 18 +# +# 4333796 => +# SDK tool version. +# Copy from https://developer.android.com/studio/index.html#command-tools +# armeabi-v7a => +# System image ABI +# 18 => +# Android API Level (18 = Android 4.3 = Jelly Bean MR2) diff --git a/src/ci/docker/x86_64-gnu-aux/Dockerfile b/src/ci/docker/x86_64-gnu-aux/Dockerfile index 35a387221c..a453c62cc9 100644 --- a/src/ci/docker/x86_64-gnu-aux/Dockerfile +++ b/src/ci/docker/x86_64-gnu-aux/Dockerfile @@ -17,5 +17,5 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh -ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu +ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu --enable-test-miri ENV RUST_CHECK_TARGET check-aux diff --git a/src/ci/docker/x86_64-gnu-distcheck/Dockerfile b/src/ci/docker/x86_64-gnu-distcheck/Dockerfile index 786f59eb9f..f16dd98099 100644 --- a/src/ci/docker/x86_64-gnu-distcheck/Dockerfile +++ b/src/ci/docker/x86_64-gnu-distcheck/Dockerfile @@ -18,6 +18,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh -ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu +ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu --set rust.ignore-git=false ENV SCRIPT python2.7 ../x.py test distcheck ENV DIST_SRC 1 diff --git a/src/ci/run.sh b/src/ci/run.sh index 388a9c80d7..20b9d0e063 100755 --- a/src/ci/run.sh +++ b/src/ci/run.sh @@ -52,7 +52,11 @@ if [ "$DEPLOY$DEPLOY_ALT" != "" ]; then RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-llvm-assertions" fi else - RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-debug-assertions" + # We almost always want debug assertions enabled, but sometimes this takes too + # long for too little benefit, so we just turn them off. + if [ "$NO_DEBUG_ASSERTIONS" = "" ]; then + RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-debug-assertions" + fi # In general we always want to run tests with LLVM assertions enabled, but not # all platforms currently support that, so we have an option to disable. diff --git a/src/doc/book/.travis.yml b/src/doc/book/.travis.yml index 9d899602d9..96f8f1813a 100644 --- a/src/doc/book/.travis.yml +++ b/src/doc/book/.travis.yml @@ -1,4 +1,5 @@ sudo: false +dist: trusty language: rust cache: cargo rust: diff --git a/src/doc/book/README.md b/src/doc/book/README.md index 3bec4b5086..b7d0aad6ab 100644 --- a/src/doc/book/README.md +++ b/src/doc/book/README.md @@ -19,12 +19,14 @@ starting with the second edition. ## Requirements -Building the book requires [mdBook] >= v0.0.13. To get it: +Building the book requires [mdBook], ideally the same version that +[rust-lang/rust uses in this file][rust-mdbook]. To get it: [mdBook]: https://github.com/azerupi/mdBook +[rust-mdbook]: https://github.com/rust-lang/rust/blob/master/src/tools/rustbook/Cargo.toml ```bash -$ cargo install mdbook +$ cargo install mdbook --vers [version-num] ``` ## Building diff --git a/src/doc/book/second-edition/dictionary.txt b/src/doc/book/second-edition/dictionary.txt index 955cf66b6b..86dea7dc7b 100644 --- a/src/doc/book/second-edition/dictionary.txt +++ b/src/doc/book/second-edition/dictionary.txt @@ -9,13 +9,16 @@ aggregator AGraph aliasability alignof +allocator Amir anotherusername APIs app's aren args +associativity async +atomics AveragedCollection backend backtrace @@ -50,6 +53,8 @@ ChangeColorMessage charset chXX chYY +coercions +combinator ConcreteType config Config @@ -57,6 +62,7 @@ const constant's copyeditor couldn +CPUs cratesio CRLF cryptographically @@ -64,11 +70,15 @@ CStr CString ctrl Ctrl +customizable CustomSmartPointer +CustomSmartPointers +deallocate deallocated deallocating deallocation debuginfo +decrementing deps deref Deref @@ -84,12 +94,14 @@ destructured destructures destructuring Destructuring +deterministically didn Dobrý doccargo doccratesio DOCTYPE doesn +disambiguating DraftPost DSTs ebooks @@ -104,6 +116,7 @@ enums enum's Enums eprintln +Erlang ErrorKind Executables extern @@ -124,6 +137,7 @@ FnOnce formatter FromIterator frontend +getter GGraph GitHub gitignore @@ -149,7 +163,10 @@ html Iceburgh IEEE impl +implementor +implementors ImportantExcerpt +incrementing indices init inline @@ -157,6 +174,7 @@ instantiation internet IntoIterator InvalidDigit +invariants ioerror iokind ioresult @@ -180,6 +198,7 @@ librarys libreoffice libstd lifecycle +LimitTracker lobally locators login @@ -197,7 +216,9 @@ Mibbit minigrep mixup mkdir +MockMessenger modifiability +modularity monomorphization Monomorphization monomorphized @@ -206,10 +227,12 @@ Mozilla mpsc multithreaded mutex +mutex's Mutex mutexes Mutexes MutexGuard +MyBox namespace namespaced namespaces @@ -230,6 +253,7 @@ OCaml offsetof online OpenGL +optimizations OptionalFloatingPointNumber OptionalNumber OsStr @@ -245,6 +269,7 @@ PartialOrd PendingReview PendingReviewPost PlaceholderType +polymorphism PoolCreationError portia powershell @@ -277,12 +302,15 @@ RefMut refutability reimplement repr +representable request's resizes resizing retweet +rewordings rint ripgrep +runnable runtime runtimes Rustacean @@ -301,6 +329,7 @@ shouldn Simula situps sizeof +Smalltalk someproject someusername SPDX @@ -322,6 +351,7 @@ Struct structs struct's Structs +subclasses subcommand subcommands subdirectories @@ -364,9 +394,11 @@ unary Unary uncomment Uncomment +unevaluated Uninstalling uninstall unix +unpopulated unoptimized UnsafeCell unsafety diff --git a/src/doc/book/second-edition/nostarch/chapter02.md b/src/doc/book/second-edition/nostarch/chapter02.md index 48cb31ff2e..7b02b9f5ac 100644 --- a/src/doc/book/second-edition/nostarch/chapter02.md +++ b/src/doc/book/second-edition/nostarch/chapter02.md @@ -20,7 +20,7 @@ game will print congratulations and exit. To set up a new project, go to the *projects* directory that you created in Chapter 1, and make a new project using Cargo, like so: -```bash +``` $ cargo new guessing_game --bin $ cd guessing_game ``` @@ -34,7 +34,7 @@ Look at the generated *Cargo.toml* file: Filename: Cargo.toml -```toml +``` [package] name = "guessing_game" version = "0.1.0" @@ -51,7 +51,7 @@ you. Check out the *src/main.rs* file: Filename: src/main.rs -```rust +``` fn main() { println!("Hello, world!"); } @@ -60,9 +60,10 @@ fn main() { Now let’s compile this “Hello, world!” program and run it in the same step using the `cargo run` command: -```bash +``` $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 1.50 secs Running `target/debug/guessing_game` Hello, world! ``` @@ -81,7 +82,7 @@ to input a guess. Enter the code in Listing 2-1 into *src/main.rs*. Filename: src/main.rs -```rust,ignore +``` use std::io; fn main() { @@ -98,51 +99,51 @@ fn main() { } ``` - -Listing 2-1: Code to get a guess from the user and print it out - +Listing 2-1: Code to get a guess from the user and print +it out This code contains a lot of information, so let’s go over it bit by bit. To -obtain user input and then print the result as output, we need to import the -`io` (input/output) library from the standard library (which is known as `std`): +obtain user input and then print the result as output, we need to bring the +`io` (input/output) library into scope. The `io` library comes from the +standard library (which is known as `std`): -```rust,ignore +``` use std::io; ``` -By default, Rust imports only a few types into every program in the -*prelude*. If a type you want to use isn’t in the -prelude, you have to import that type into your program explicitly with a `use` +By default, Rust brings only a few types into the scope of every program in +the *prelude*. If a type you want to use isn’t in the +prelude, you have to bring that type into scope explicitly with a `use` statement. Using the `std::io` library provides you with a number of useful `io`-related features, including the functionality to accept user input. As you saw in Chapter 1, the `main` function is the entry point into the program: -```rust,ignore +``` fn main() { ``` The `fn` syntax declares a new function, the `()` indicate there are no -arguments, and `{` starts the body of the function. +parameters, and `{` starts the body of the function. As you also learned in Chapter 1, `println!` is a macro that prints a string to the screen: -```rust,ignore +``` println!("Guess the number!"); println!("Please input your guess."); ``` -This code is just printing a prompt stating what the game is and requesting -input from the user. +This code is printing a prompt stating what the game is and requesting input +from the user. ### Storing Values with Variables Next, we’ll create a place to store the user input, like this: -```rust,ignore +``` let mut guess = String::new(); ``` @@ -150,7 +151,7 @@ Now the program is getting interesting! There’s a lot going on in this little line. Notice that this is a `let` statement, which is used to create *variables*. Here’s another example: -```rust,ignore +``` let foo = bar; ``` @@ -158,7 +159,7 @@ This line will create a new variable named `foo` and bind it to the value `bar`. In Rust, variables are immutable by default. The following example shows how to use `mut` before the variable name to make a variable mutable: -```rust +``` let foo = 5; // immutable let mut bar = 5; // mutable ``` @@ -189,7 +190,7 @@ Recall that we included the input/output functionality from the standard library with `use std::io;` on the first line of the program. Now we’ll call an associated function, `stdin`, on `io`: -```rust,ignore +``` io::stdin().read_line(&mut guess) .expect("Failed to read line"); ``` @@ -222,7 +223,7 @@ We’re not quite done with this line of code. Although it’s a single line of text, it’s only the first part of the single logical line of code. The second part is this method: -```rust,ignore +``` .expect("Failed to read line"); ``` @@ -230,7 +231,7 @@ When you call a method with the `.foo()` syntax, it’s often wise to introduce newline and other whitespace to help break up long lines. We could have written this code as: -```rust,ignore +``` io::stdin().read_line(&mut guess).expect("Failed to read line"); ``` @@ -264,32 +265,35 @@ argument to `expect`. If the `read_line` method returns an `Err`, it would likely be the result of an error coming from the underlying operating system. If this instance of `io::Result` is an `Ok` value, `expect` will take the return value that `Ok` is holding and return just that value to you so you -could use it. In this case, that value is the number of characters the user +could use it. In this case, that value is the number of bytes in what the user entered into standard input. If we don’t call `expect`, the program will compile, but we’ll get a warning: -```bash +``` $ cargo build Compiling guessing_game v0.1.0 (file:///projects/guessing_game) -src/main.rs:10:5: 10:39 warning: unused result which must be used, -#[warn(unused_must_use)] on by default -src/main.rs:10 io::stdin().read_line(&mut guess); - ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +warning: unused `std::result::Result` which must be used + --> src/main.rs:10:5 + | +10 | io::stdin().read_line(&mut guess); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: #[warn(unused_must_use)] on by default ``` Rust warns that we haven’t used the `Result` value returned from `read_line`, indicating that the program hasn’t handled a possible error. The right way to -suppress the warning is to actually write error handling, but since we just -want to crash this program when a problem occurs, we can use `expect`. You’ll -learn about recovering from errors in Chapter 9. +suppress the warning is to actually write error handling, but since we want to +crash this program when a problem occurs, we can use `expect`. You’ll learn +about recovering from errors in Chapter 9. ### Printing Values with `println!` Placeholders -Aside from the closing curly brace, there’s only one more line to discuss in +Aside from the closing curly brackets, there’s only one more line to discuss in the code added so far, which is the following: -```rust,ignore +``` println!("You guessed: {}", guess); ``` @@ -299,7 +303,7 @@ using `{}`: the first set of `{}` holds the first value listed after the format string, the second set holds the second value, and so on. Printing out multiple values in one call to `println!` would look like this: -```rust +``` let x = 5; let y = 10; @@ -310,11 +314,13 @@ This code would print out `x = 5 and y = 10`. ### Testing the First Part -Let’s test the first part of the guessing game. You can run it using `cargo run`: +Let’s test the first part of the guessing game. You can run it using +`cargo run`: -```bash +``` $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 2.53 secs Running `target/debug/guessing_game` Guess the number! Please input your guess. @@ -348,7 +354,7 @@ you: Filename: Cargo.toml -```toml +``` [dependencies] rand = "0.3.14" @@ -367,7 +373,7 @@ version 0.3.14.” Now, without changing any of the code, let’s build the project, as shown in Listing 2-2: -```bash +``` $ cargo build Updating registry `https://github.com/rust-lang/crates.io-index` Downloading rand v0.3.14 @@ -375,12 +381,11 @@ $ cargo build Compiling libc v0.2.14 Compiling rand v0.3.14 Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 2.53 secs ``` - -Listing 2-2: The output from running `cargo build` after adding the rand crate -as a dependency - +Listing 2-2: The output from running `cargo build` after +adding the rand crate as a dependency You may see different version numbers (but they will all be compatible with the code, thanks to SemVer!), and the lines may be in a different order. @@ -405,7 +410,7 @@ doesn’t recompile that either. With nothing to do, it simply exits. If you ope up the *src/main.rs* file, make a trivial change, then save it and build again, you’ll only see one line of output: -```bash +``` $ cargo build Compiling guessing_game v0.1.0 (file:///projects/guessing_game) ``` @@ -448,7 +453,7 @@ But by default, Cargo will only look for versions larger than `0.3.0` and smaller than `0.4.0`. If the `rand` crate has released two new versions, `0.3.15` and `0.4.0`, you would see the following if you ran `cargo update`: -```bash +``` $ cargo update Updating registry `https://github.com/rust-lang/crates.io-index` Updating rand v0.3.14 -> v0.3.15 @@ -460,7 +465,7 @@ that the version of the `rand` crate you are now using is `0.3.15`. If you wanted to use `rand` version `0.4.0` or any version in the `0.4.x` series, you’d have to update the *Cargo.toml* file to look like this instead: -```toml +``` [dependencies] rand = "0.4.0" @@ -471,7 +476,7 @@ available and reevaluate your `rand` requirements according to the new version you specified. There’s a lot more to say about Cargo and its -ecosystem that Chapter XX will discuss, but for +ecosystem that Chapter 14 will discuss, but for now, that’s all you need to know. Cargo makes it very easy to reuse libraries, so Rustaceans are able to write smaller projects that are assembled from a number of packages. @@ -483,7 +488,7 @@ in Listing 2-3: Filename: src/main.rs -```rust,ignore +``` extern crate rand; use std::io; @@ -507,9 +512,8 @@ fn main() { } ``` - -Listing 2-3: Code changes needed in order to generate a random number - +Listing 2-3: Code changes needed in order to generate a +random number We’re adding a `extern crate rand;` line to the top that lets Rust know we’ll be using that external dependency. This also does the equivalent of calling `use @@ -530,7 +534,7 @@ numbers as arguments and generates a random number between them. It’s inclusiv on the lower bound but exclusive on the upper bound, so we need to specify `1` and `101` to request a number between 1 and 100. -Knowing which traits to import and which functions and methods to use from a +Knowing which traits to use and which functions and methods to call from a crate isn’t something that you’ll just *know*. Instructions for using a crate are in each crate’s documentation. Another neat feature of Cargo is that you can run the `cargo doc --open` command that will build documentation provided @@ -545,9 +549,10 @@ the answer as soon as it starts! Try running the program a few times: -```bash +``` $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 2.53 secs Running `target/debug/guessing_game` Guess the number! The secret number is: 7 @@ -573,7 +578,7 @@ step is shown in Listing 2-4: Filename: src/main.rs -```rust,ignore +``` extern crate rand; use std::io; @@ -604,9 +609,8 @@ fn main() { } ``` - -Listing 2-4: Handling the possible return values of comparing two numbers - +Listing 2-4: Handling the possible return values of +comparing two numbers The first new bit here is another `use`, bringing a type called `std::cmp::Ordering` into scope from the standard library. `Ordering` is @@ -616,7 +620,7 @@ compare two values. Then we add five new lines at the bottom that use the `Ordering` type: -```rust,ignore +``` match guess.cmp(&secret_number) { Ordering::Less => println!("Too small!"), Ordering::Greater => println!("Too big!"), @@ -627,7 +631,7 @@ match guess.cmp(&secret_number) { The `cmp` method compares two values and can be called on anything that can be compared. It takes a reference to whatever you want to compare with: here it’s comparing the `guess` to the `secret_number`. `cmp` returns a variant of the -`Ordering` enum we imported with the `use` statement. We use a +`Ordering` enum we brought into scope with the `use` statement. We use a `match` expression to decide what to do next based on which variant of `Ordering` was returned from the call to `cmp` with the values in `guess` and `secret_number`. @@ -638,7 +642,7 @@ expression fits that arm’s pattern. Rust takes the value given to `match` and looks through each arm’s pattern in turn. The `match` construct and patterns are powerful features in Rust that let you express a variety of situations your code might encounter and helps ensure that you handle them all. These features -will be covered in detail in Chapter 6 and Chapter XX, respectively. +will be covered in detail in Chapter 6 and Chapter 18, respectively. Let’s walk through an example of what would happen with the `match` expression used here. Say that the user has guessed 50, and the randomly generated secret @@ -646,7 +650,7 @@ number this time is 38. When the code compares 50 to 38, the `cmp` method will return `Ordering::Greater`, because 50 is greater than 38. `Ordering::Greater` is the value that the `match` expression gets. It looks at the first arm’s pattern, `Ordering::Less`, but the value `Ordering::Greater` does not match -`Ordering::Less`. So it ignores the code in that arm and moves to the next arm. +`Ordering::Less`, so it ignores the code in that arm and moves to the next arm. The next arm’s pattern, `Ordering::Greater`, *does* match `Ordering::Greater`! The associated code in that arm will execute and print `Too big!` to the screen. The `match` expression ends because it has no need to @@ -654,7 +658,7 @@ look at the last arm in this particular scenario. However, the code in Listing 2-4 won’t compile yet. Let’s try it: -```bash +``` $ cargo build Compiling guessing_game v0.1.0 (file:///projects/guessing_game) error[E0308]: mismatched types @@ -687,7 +691,7 @@ that by adding the following two lines to the `main` function body: Filename: src/main.rs -```rust,ignore +``` extern crate rand; use std::io; @@ -723,7 +727,7 @@ fn main() { The two new lines are: -```rust,ignore +``` let guess: u32 = guess.trim().parse() .expect("Please type a number!"); ``` @@ -740,11 +744,12 @@ We bind `guess` to the expression `guess.trim().parse()`. The `guess` in the expression refers to the original `guess` that was a `String` with the input in it. The `trim` method on a `String` instance will eliminate any whitespace at the beginning and end. `u32` can only contain numerical characters, but the -user must press the Return key to satisfy `read_line`. When the user presses -Return, a newline character is added to the string. For example, if the user -types 5 and presses return, `guess` looks like this: `5\n`. The `\n` represents -“newline,” the return key. The `trim` method eliminates `\n`, resulting in just -`5`. +user must press the enter key to satisfy +`read_line`. When the user presses enter, a +newline character is added to the string. For example, if the user types 5 and presses enter, +`guess` looks like this: `5\n`. The `\n` represents “newline,” the enter key. +The `trim` method eliminates `\n`, resulting in just `5`. The `parse` method on strings parses a string into some kind of number. Because this method can parse a variety of number types, we @@ -761,7 +766,7 @@ The call to `parse` could easily cause an error. If, for example, the string contained `A👍%`, there would be no way to convert that to a number. Because it might fail, the `parse` method returns a `Result` type, much like the `read_line` method does as discussed earlier in “Handling Potential Failure -with the Result Type” on page XX. We’ll treat this `Result` the same way by +with the Result Type”. We’ll treat this `Result` the same way by using the `expect` method again. If `parse` returns an `Err` `Result` variant because it couldn’t create a number from the string, the `expect` call will crash the game and print the message we give it. If `parse` can successfully @@ -770,9 +775,10 @@ and `expect` will return the number that we want from the `Ok` value. Let’s run the program now! -```bash +``` $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 0.43 secs Running `target/guessing_game` Guess the number! The secret number is: 58 @@ -797,7 +803,7 @@ chances at guessing the number: Filename: src/main.rs -```rust,ignore +``` extern crate rand; use std::io; @@ -839,13 +845,13 @@ program again. Notice that there is a new problem because the program is doing exactly what we told it to do: ask for another guess forever! It doesn’t seem like the user can quit! -The user could always halt the program by using the keyboard shortcut `Ctrl-C`. -But there’s another way to escape this insatiable monster that we mentioned in -the `parse` discussion in “Comparing the Guesses” on page XX: if the user -enters a non-number answer, the program will crash. The user can take advantage -of that in order to quit, as shown here: +The user could always halt the program by using the keyboard shortcut +ctrl-C. But there’s another way to escape this +insatiable monster that we mentioned in the `parse` discussion in “Comparing the +Guess to the Secret Number”: if the user enters a non-number answer, the program +will crash. The user can take advantage of that in order to quit, as shown here: -```bash +``` $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) Running `target/guessing_game` @@ -880,7 +886,7 @@ Let’s program the game to quit when the user wins by adding a `break`: Filename: src/main.rs -```rust,ignore +``` extern crate rand; use std::io; @@ -930,7 +936,7 @@ the user inputs a non-number, let’s make the game ignore a non-number so the user can continue guessing. We can do that by altering the line where `guess` is converted from a `String` to a `u32`: -```rust,ignore +``` let guess: u32 = match guess.trim().parse() { Ok(num) => num, Err(_) => continue, @@ -962,7 +968,7 @@ might encounter! Now everything in the program should work as expected. Let’s try it by running `cargo run`: -```bash +``` $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) Running `target/guessing_game` @@ -991,7 +997,7 @@ secret number. Listing 2-5 shows the final code: Filename: src/main.rs -```rust,ignore +``` extern crate rand; use std::io; @@ -1030,9 +1036,7 @@ fn main() { } ``` - Listing 2-5: Complete code of the guessing game - ## Summary diff --git a/src/doc/book/second-edition/nostarch/chapter03.md b/src/doc/book/second-edition/nostarch/chapter03.md index 04fde81d07..7905cbc20b 100644 --- a/src/doc/book/second-edition/nostarch/chapter03.md +++ b/src/doc/book/second-edition/nostarch/chapter03.md @@ -12,19 +12,15 @@ Specifically, you’ll learn about variables, basic types, functions, comments, and control flow. These foundations will be in every Rust program, and learning them early will give you a strong core to start from. -PROD: START BOX - -### Keywords - -The Rust language has a set of *keywords* that have been reserved for use by -the language only, much like other languages do. Keep in mind that you cannot -use these words as names of variables or functions. Most of the keywords have -special meanings, and you’ll be using them to do various tasks in your Rust -programs; a few have no current functionality associated with them but have -been reserved for functionality that might be added to Rust in the future. You -can find a list of the keywords in Appendix A. - -PROD: END BOX +> ### Keywords +> +> The Rust language has a set of *keywords* that have been reserved for use by +> the language only, much like other languages do. Keep in mind that you cannot +> use these words as names of variables or functions. Most of the keywords have +> special meanings, and you’ll be using them to do various tasks in your Rust +> programs; a few have no current functionality associated with them but have +> been reserved for functionality that might be added to Rust in the future. You +> can find a list of the keywords in Appendix A. ## Variables and Mutability @@ -43,7 +39,7 @@ code with the following: Filename: src/main.rs -```rust,ignore +``` fn main() { let x = 5; println!("The value of x is: {}", x); @@ -97,7 +93,7 @@ For example, change *src/main.rs* to the following: Filename: src/main.rs -```rust +``` fn main() { let mut x = 5; println!("The value of x is: {}", x); @@ -108,9 +104,10 @@ fn main() { When we run this program, we get the following: -```bash +``` $ cargo run Compiling variables v0.1.0 (file:///projects/variables) + Finished dev [unoptimized + debuginfo] target(s) in 0.30 secs Running `target/debug/variables` The value of x is: 5 The value of x is: 6 @@ -161,7 +158,7 @@ const MAX_POINTS: u32 = 100_000; Constants are valid for the entire time a program runs, within the scope they were declared in, making them a useful choice for values in your application -domain that multiple part of the program might need to know about, such as the +domain that multiple parts of the program might need to know about, such as the maximum number of points any player of a game is allowed to earn or the speed of light. @@ -172,8 +169,8 @@ hardcoded value needed to be updated in the future. ### Shadowing -As we saw in the guessing game tutorial in Chapter 2, we can declare new -variables with the same name as a previous variables, and the new variable +As we saw in the guessing game tutorial in Chapter 2, we can declare a new +variable with the same name as a previous variable, and the new variable *shadows* the previous variable. Rustaceans say that the first variable is *shadowed* by the second, which means that the second variable’s value is what we’ll see when we use the variable. We can shadow a variable by using the same @@ -181,7 +178,7 @@ variable’s name and repeating the use of the `let` keyword as follows: Filename: src/main.rs -```rust +``` fn main() { let x = 5; @@ -199,9 +196,10 @@ repeating `let x =`, taking the original value and adding `1` so the value of previous value and multiplying it by `2` to give `x` a final value of `12`. When you run this program, it will output the following: -```bash +``` $ cargo run Compiling variables v0.1.0 (file:///projects/variables) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/variables` The value of x is: 12 ``` @@ -217,7 +215,7 @@ change the type of the value, but reuse the same name. For example, say our program asks a user to show how many spaces they want between some text by inputting space characters, but we really want to store that input as a number: -```rust +``` let spaces = " "; let spaces = spaces.len(); ``` @@ -229,7 +227,7 @@ from having to come up with different names, like `spaces_str` and `spaces_num`; instead, we can reuse the simpler `spaces` name. However, if we try to use `mut` for this, as shown here: -```rust,ignore +``` let mut spaces = " "; spaces = spaces.len(); ``` @@ -237,7 +235,7 @@ spaces = spaces.len(); we’ll get a compile-time error because we’re not allowed to mutate a variable’s type: -```bash +``` error[E0308]: mismatched types --> src/main.rs:3:14 | @@ -273,14 +271,15 @@ If we don’t add the type annotation here, Rust will display the following error, which means the compiler needs more information from us to know which possible type we want to use: -```bash -error[E0282]: unable to infer enough type information about `_` +``` +error[E0282]: type annotations needed --> src/main.rs:2:9 | 2 | let guess = "42".parse().expect("Not a number!"); - | ^^^^^ cannot infer type for `_` - | - = note: type annotations or generic parameter binding required + | ^^^^^ + | | + | cannot infer type for `_` + | consider giving `guess` a type ``` You’ll see different type annotations as we discuss the various data types. @@ -295,16 +294,14 @@ work in Rust. #### Integer Types An *integer* is a number without a fractional component. We used one integer -type earlier in this chapter, the `i32` type. This type declaration indicates -that the value it’s associated with should be a signed integer (hence the `i`, -as opposed to a `u` for unsigned) that takes up 32 bits of space. Table 3-1 -shows the built-in integer types in Rust. Each variant in the Signed and -Unsigned columns (for example, *i32*) can be used to declare the type of an +type earlier in this chapter, the `u32` type. This type declaration indicates +that the value it’s associated with should be an unsigned integer (signed +integer types start with `i` instead of `u`) that takes up 32 bits of space. +Table 3-1 shows the built-in integer types in Rust. Each variant in the Signed +and Unsigned columns (for example, *i16*) can be used to declare the type of an integer value. - Table 3-1: Integer Types in Rust - | Length | Signed | Unsigned | |--------|--------|----------| @@ -325,11 +322,11 @@ Signed numbers are stored using two’s complement representation (if you’re unsure what this is, you can search for it online; an explanation is outside the scope of this book). -Each signed variant can store numbers from -2n - 1 to 2n - 1 - 1 inclusive, -where `n` is the number of bits that variant uses. So an `i8` can store numbers -from -27 to 27 - 1, which equals -128 to 127. Unsigned variants can store -numbers from 0 to 2n - 1, so a `u8` can store numbers from 0 to 28 - 1, which -equals 0 to 255. +Each signed variant can store numbers from -(2n - 1) to 2n - +1 - 1 inclusive, where `n` is the number of bits that variant uses. So an +`i8` can store numbers from -(27) to 27 - 1, which equals +-128 to 127. Unsigned variants can store numbers from 0 to 2n - 1, +so a `u8` can store numbers from 0 to 28 - 1, which equals 0 to 255. Additionally, the `isize` and `usize` types depend on the kind of computer your program is running on: 64-bits if you’re on a 64-bit architecture and 32-bits @@ -339,9 +336,7 @@ You can write integer literals in any of the forms shown in Table 3-2. Note that all number literals except the byte literal allow a type suffix, such as `57u8`, and `_` as a visual separator, such as `1_000`. - Table 3-2: Integer Literals in Rust - | Number literals | Example | |------------------|---------------| @@ -361,18 +356,14 @@ you’d use `isize` or `usize` is when indexing some sort of collection. Rust also has two primitive types for *floating-point numbers*, which are numbers with decimal points. Rust’s floating-point types are `f32` and `f64`, which are 32 bits and 64 bits in size, respectively. The default type is `f64` -because it’s roughly the same speed as `f32` but is capable of more precision. -It’s possible to use an `f64` type on 32-bit systems, but it will be slower -than using an `f32` type on those systems. Most of the time, trading potential -worse performance for better precision is a reasonable initial choice, and you -should benchmark your code if you suspect floating-point size is a problem in -your situation. +because on modern CPUs it’s roughly the same speed as `f32` but is capable of +more precision. Here’s an example that shows floating-point numbers in action: Filename: src/main.rs -```rust +``` fn main() { let x = 2.0; // f64 @@ -385,13 +376,13 @@ Floating-point numbers are represented according to the IEEE-754 standard. The #### Numeric Operations -Rust supports the usual basic mathematic operations you’d expect for all of the +Rust supports the usual basic mathematical operations you’d expect for all of the number types: addition, subtraction, multiplication, division, and remainder. The following code shows how you’d use each one in a `let` statement: Filename: src/main.rs -```rust +``` fn main() { // addition let sum = 5 + 10; @@ -422,7 +413,7 @@ For example: Filename: src/main.rs -```rust +``` fn main() { let t = true; @@ -431,18 +422,19 @@ fn main() { ``` The main way to consume boolean values is through conditionals, such as an `if` -statement. We’ll cover how `if` statements work in Rust in the “Control Flow” +expression. We’ll cover how `if` expressions work in Rust in the “Control Flow” section. #### The Character Type So far we’ve only worked with numbers, but Rust supports letters too. Rust’s `char` type is the language’s most primitive alphabetic type, and the following -code shows one way to use it: +code shows one way to use it. Note that the `char` type is specified with +single quotes, as opposed to strings that use double quotes: Filename: src/main.rs -```rust +``` fn main() { let c = 'z'; let z = 'ℤ'; @@ -476,7 +468,7 @@ type annotations in this example: Filename: src/main.rs -```rust +``` fn main() { let tup: (i32, f64, u8) = (500, 6.4, 1); } @@ -488,7 +480,7 @@ use pattern matching to destructure a tuple value, like this: Filename: src/main.rs -```rust +``` fn main() { let tup = (500, 6.4, 1); @@ -510,7 +502,7 @@ value we want to access. For example: Filename: src/main.rs -```rust +``` fn main() { let x: (i32, f64, u8) = (500, 6.4, 1); @@ -538,7 +530,7 @@ inside square brackets: Filename: src/main.rs -```rust +``` fn main() { let a = [1, 2, 3, 4, 5]; } @@ -557,7 +549,7 @@ program that needs to know the names of the months of the year. It’s very unlikely that such a program will need to add or remove months, so you can use an array because you know it will always contain 12 items: -```rust +``` let months = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]; ``` @@ -569,7 +561,7 @@ elements of an array using indexing, like this: Filename: src/main.rs -```rust +``` fn main() { let a = [1, 2, 3, 4, 5]; @@ -589,7 +581,7 @@ the array? Say we change the example to the following: Filename: src/main.rs -```rust,ignore +``` fn main() { let a = [1, 2, 3, 4, 5]; let index = 10; @@ -602,9 +594,10 @@ fn main() { Running this code using `cargo run` produces the following result: -```bash +``` $ cargo run Compiling arrays v0.1.0 (file:///projects/arrays) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/arrays` thread '
' panicked at 'index out of bounds: the len is 5 but the index is 10', src/main.rs:6 @@ -636,7 +629,7 @@ Here’s a program that contains an example function definition: Filename: src/main.rs -```rust +``` fn main() { println!("Hello, world!"); @@ -649,8 +642,8 @@ fn another_function() { ``` Function definitions in Rust start with `fn` and have a set of parentheses -after the function name. The curly braces tell the compiler where the function -body begins and ends. +after the function name. The curly brackets tell the compiler where the +function body begins and ends. We can call any function we’ve defined by entering its name followed by a set of parentheses. Because `another_function` is defined in the program, it can be @@ -663,9 +656,10 @@ Let’s start a new binary project named *functions* to explore functions further. Place the `another_function` example in *src/main.rs* and run it. You should see the following output: -```bash +``` $ cargo run Compiling functions v0.1.0 (file:///projects/functions) + Finished dev [unoptimized + debuginfo] target(s) in 0.28 secs Running `target/debug/functions` Hello, world! Another function. @@ -690,7 +684,7 @@ look like in Rust: Filename: src/main.rs -```rust +``` fn main() { another_function(5); } @@ -702,16 +696,17 @@ fn another_function(x: i32) { Try running this program; you should get the following output: -```bash +``` $ cargo run Compiling functions v0.1.0 (file:///projects/functions) + Finished dev [unoptimized + debuginfo] target(s) in 1.21 secs Running `target/debug/functions` The value of x is: 5 ``` The declaration of `another_function` has one parameter named `x`. The type of `x` is specified as `i32`. When `5` is passed to `another_function`, the -`println!` macro puts `5` where the pair of curly braces were in the format +`println!` macro puts `5` where the pair of curly brackets were in the format string. In function signatures, you *must* declare the type of each parameter. This is @@ -724,7 +719,7 @@ declarations with commas, like this: Filename: src/main.rs -```rust +``` fn main() { another_function(5, 6); } @@ -744,9 +739,10 @@ Let’s try running this code. Replace the program currently in your *function* project’s *src/main.rs* file with the preceding example, and run it using `cargo run`: -```bash +``` $ cargo run Compiling functions v0.1.0 (file:///projects/functions) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/functions` The value of x is: 5 The value of y is: 6 @@ -776,15 +772,13 @@ statement. In Listing 3-3, `let y = 6;` is a statement: Filename: src/main.rs -```rust +``` fn main() { let y = 6; } ``` - Listing 3-3: A `main` function declaration containing one statement. - Function definitions are also statements; the entire preceding example is a statement in itself. @@ -794,7 +788,7 @@ to another variable, as the following code tries to do: Filename: src/main.rs -```rust,ignore +``` fn main() { let x = (let y = 6); } @@ -802,7 +796,7 @@ fn main() { When you run this program, you’ll get an error like this: -```bash +``` $ cargo run Compiling functions v0.1.0 (file:///projects/functions) error: expected expression, found statement (`let`) @@ -830,7 +824,7 @@ new scopes, `{}`, is an expression, for example: Filename: src/main.rs -```rust +``` fn main() { let x = 5; @@ -845,7 +839,7 @@ fn main() { This expression: -```rust,ignore +``` { let x = 3; x + 1 @@ -853,23 +847,25 @@ This expression: ``` is a block that, in this case, evaluates to `4`. That value gets bound to `y` -as part of the `let` statement. Note the line without a semicolon at the end, -unlike most of the lines you’ve seen so far. Expressions do not include ending -semicolons. If you add a semicolon to the end of an expression, you turn it -into a statement, which will then not return a value. Keep this in mind as you -explore function return values and expressions next. +as part of the `let` statement. Note the `x + 1` line without a semicolon at +the end, unlike most of the lines you’ve seen so far. Expressions do not +include ending semicolons. If you add a semicolon to the end of an expression, +you turn it into a statement, which will then not return a value. Keep this in +mind as you explore function return values and expressions next. ### Functions with Return Values Functions can return values to the code that calls them. We don’t name return values, but we do declare their type after an arrow (`->`). In Rust, the return value of the function is synonymous with the value of the final expression in -the block of the body of a function. Here’s an example of a function that -returns a value: +the block of the body of a function. You can return early from a function by +using the `return` keyword and specifying a value, but most functions return +the last expression implicitly. Here’s an example of a function that returns a +value: Filename: src/main.rs -```rust +``` fn five() -> i32 { 5 } @@ -886,9 +882,10 @@ function—just the number `5` by itself. That’s a perfectly valid function in Rust. Note that the function’s return type is specified, too, as `-> i32`. Try running this code; the output should look like this: -```bash +``` $ cargo run Compiling functions v0.1.0 (file:///projects/functions) + Finished dev [unoptimized + debuginfo] target(s) in 0.30 secs Running `target/debug/functions` The value of x is: 5 ``` @@ -899,7 +896,7 @@ first, the line `let x = five();` shows that we’re using the return value of a function to initialize a variable. Because the function `five` returns a `5`, that line is the same as the following: -```rust +``` let x = 5; ``` @@ -910,7 +907,7 @@ example: Filename: src/main.rs -```rust +``` fn main() { let x = plus_one(5); @@ -928,7 +925,7 @@ expression to a statement? Filename: src/main.rs -```rust,ignore +``` fn main() { let x = plus_one(5); @@ -947,18 +944,14 @@ error[E0308]: mismatched types --> src/main.rs:7:28 | 7 | fn plus_one(x: i32) -> i32 { - | ____________________________^ starting here... + | ____________________________^ 8 | | x + 1; + | | - help: consider removing this semicolon 9 | | } - | |_^ ...ending here: expected i32, found () + | |_^ expected i32, found () | = note: expected type `i32` found type `()` -help: consider removing this semicolon: - --> src/main.rs:8:10 - | -8 | x + 1; - | ^ ``` The main error message, “mismatched types,” reveals the core issue with this @@ -978,7 +971,7 @@ reading the source code may find useful. Here’s a simple comment: -```rust +``` // Hello, world. ``` @@ -986,7 +979,7 @@ In Rust, comments must start with two slashes and continue until the end of the line. For comments that extend beyond a single line, you’ll need to include `//` on each line, like this: -```rust +``` // So we’re doing something complicated here, long enough that we need // multiple lines of comments to do it! Whew! Hopefully, this comment will // explain what’s going on. @@ -996,7 +989,7 @@ Comments can also be placed at the end of lines containing code: Filename: src/main.rs -```rust +``` fn main() { let lucky_number = 7; // I’m feeling lucky today. } @@ -1007,14 +1000,15 @@ separate line above the code it’s annotating: Filename: src/main.rs -```rust +``` fn main() { // I’m feeling lucky today. let lucky_number = 7; } ``` -That’s all there is to comments. They’re not particularly complicated. +Rust also has another kind of comment, documentation comments, which we’ll +discuss in Chapter 14. ## Control Flow @@ -1035,7 +1029,7 @@ the `if` expression. In the *src/main.rs* file, input the following: Filename: src/main.rs -```rust +``` fn main() { let number = 3; @@ -1047,24 +1041,27 @@ fn main() { } ``` + + All `if` expressions start with the keyword `if`, which is followed by a condition. In this case, the condition checks whether or not the variable `number` has a value less than 5. The block of code we want to execute if the condition is true is placed immediately after the condition inside curly -braces. Blocks of code associated with the conditions in `if` expressions are +brackets. Blocks of code associated with the conditions in `if` expressions are sometimes called *arms*, just like the arms in `match` expressions that we -discussed in the “Comparing the Guess to the Secret Number” section of Chapter -2. Optionally, we can also include an `else` expression, which we chose to do -here, to give the program an alternative block of code to execute should the -condition evaluate to false. If you don’t provide an `else` expression and the -condition is false, the program will just skip the `if` block and move on to -the next bit of code. +discussed in the “Comparing the Guess to the Secret Number” section of +Chapter 2. Optionally, we can also include an `else` expression, which we chose +to do here, to give the program an alternative block of code to execute should +the condition evaluate to false. If you don’t provide an `else` expression and +the condition is false, the program will just skip the `if` block and move on +to the next bit of code. Try running this code; you should see the following output: -```bash +``` $ cargo run Compiling branches v0.1.0 (file:///projects/branches) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/branches` condition was true ``` @@ -1072,15 +1069,16 @@ condition was true Let’s try changing the value of `number` to a value that makes the condition `false` to see what happens: -```rust,ignore +``` let number = 7; ``` Run the program again, and look at the output: -```bash +``` $ cargo run Compiling branches v0.1.0 (file:///projects/branches) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/branches` condition was false ``` @@ -1091,7 +1089,7 @@ code: Filename: src/main.rs -```rust,ignore +``` fn main() { let number = 3; @@ -1124,7 +1122,7 @@ expression to the following: Filename: src/main.rs -```rust +``` fn main() { let number = 3; @@ -1143,7 +1141,7 @@ expression. For example: Filename: src/main.rs -```rust +``` fn main() { let number = 6; @@ -1162,9 +1160,10 @@ fn main() { This program has four possible paths it can take. After running it, you should see the following output: -```bash +``` $ cargo run Compiling branches v0.1.0 (file:///projects/branches) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/branches` number is divisible by 3 ``` @@ -1187,7 +1186,7 @@ statement, for instance in Listing 3-4: Filename: src/main.rs -```rust +``` fn main() { let condition = true; let number = if condition { @@ -1200,16 +1199,16 @@ fn main() { } ``` - -Listing 3-4: Assigning the result of an `if` expression to a variable - +Listing 3-4: Assigning the result of an `if` expression +to a variable The `number` variable will be bound to a value based on the outcome of the `if` expression. Run this code to see what happens: -```bash +``` $ cargo run Compiling branches v0.1.0 (file:///projects/branches) + Finished dev [unoptimized + debuginfo] target(s) in 0.30 secs Running `target/debug/branches` The value of number is: 5 ``` @@ -1224,7 +1223,7 @@ the following example? Filename: src/main.rs -```rust,ignore +``` fn main() { let condition = true; @@ -1247,15 +1246,15 @@ error[E0308]: if and else have incompatible types --> src/main.rs:4:18 | 4 | let number = if condition { - | __________________^ starting here... + | __________________^ 5 | | 5 6 | | } else { 7 | | "six" 8 | | }; - | |_____^ ...ending here: expected integral variable, found reference + | |_____^ expected integral variable, found reference | = note: expected type `{integer}` - found type `&'static str` + found type `&str` ``` The expression in the `if` block evaluates to an integer, and the expression in @@ -1286,7 +1285,7 @@ like this: Filename: src/main.rs -```rust,ignore +``` fn main() { loop { println!("again!"); @@ -1296,11 +1295,13 @@ fn main() { When we run this program, we’ll see `again!` printed over and over continuously until we stop the program manually. Most terminals support a keyboard shortcut, - ctrl-C, to halt a program that is stuck in a continual loop. Give it a try: +ctrl-C, to halt a program that is stuck in a +continual loop. Give it a try: -```bash +``` $ cargo run Compiling loops v0.1.0 (file:///projects/loops) + Finished dev [unoptimized + debuginfo] target(s) in 0.29 secs Running `target/debug/loops` again! again! @@ -1309,9 +1310,9 @@ again! ^Cagain! ``` -The symbol `^C` represents where you pressed ctrl-C. You may or may not see the -word `again!` printed after the `^C`, depending on where the code was in the -loop when it received the halt signal. +The symbol `^C` represents where you pressed ctrl-C +. You may or may not see the word `again!` printed after the `^C`, +depending on where the code was in the loop when it received the halt signal. Fortunately, Rust provides another, more reliable way to break out of a loop. You can place the `break` keyword within the loop to tell the program when to @@ -1334,11 +1335,11 @@ prints another message and exits: Filename: src/main.rs -```rust +``` fn main() { let mut number = 3; - while number != 0 { + while number != 0 { println!("{}!", number); number = number - 1; @@ -1355,11 +1356,11 @@ true, the code runs; otherwise, it exits the loop. #### Looping Through a Collection with `for` You could use the `while` construct to loop over the elements of a collection, -such as an array. For example: +such as an array. For example, let’s look at Listing 3-5: Filename: src/main.rs -```rust +``` fn main() { let a = [10, 20, 30, 40, 50]; let mut index = 0; @@ -1372,18 +1373,18 @@ fn main() { } ``` - -Listing 3-5: Looping through each element of a collection using a `while` loop - +Listing 3-5: Looping through each element of a collection +using a `while` loop Here, the code counts up through the elements in the array. It starts at index `0`, and then loops until it reaches the final index in the array (that is, when `index < 5` is no longer true). Running this code will print out every element in the array: -```bash +``` $ cargo run Compiling loops v0.1.0 (file:///projects/loops) + Finished dev [unoptimized + debuginfo] target(s) in 0.32 secs Running `target/debug/loops` the value is: 10 the value is: 20 @@ -1402,11 +1403,11 @@ code to perform the conditional check on every element on every iteration through the loop. As a more efficient alternative, you can use a `for` loop and execute some code -for each item in a collection. A `for` loop looks like this: +for each item in a collection. A `for` loop looks like this code in Listing 3-6: Filename: src/main.rs -```rust +``` fn main() { let a = [10, 20, 30, 40, 50]; @@ -1416,9 +1417,8 @@ fn main() { } ``` - -Listing 3-6: Looping through each element of a collection using a `for` loop - +Listing 3-6: Looping through each element of a collection +using a `for` loop When we run this code, we’ll see the same output as in Listing 3-5. More importantly, we’ve now increased the safety of the code and eliminated the @@ -1443,7 +1443,7 @@ we’ve not yet talked about, `rev`, to reverse the range: Filename: src/main.rs -```rust +``` fn main() { for number in (1..4).rev() { println!("{}!", number); diff --git a/src/doc/book/second-edition/nostarch/chapter05.md b/src/doc/book/second-edition/nostarch/chapter05.md index 0e766dfdf6..1cdde1247c 100644 --- a/src/doc/book/second-edition/nostarch/chapter05.md +++ b/src/doc/book/second-edition/nostarch/chapter05.md @@ -8,10 +8,10 @@ together multiple related values that make up a meaningful group. If you’re familiar with an object-oriented language, a *struct* is like an object’s data attributes. In this chapter, we’ll compare and contrast tuples with structs, demonstrate how to use structs, and discuss how to define methods and -associated functions on structs to specify behavior associated with a struct’s -data. The struct and *enum* (which is discussed in Chapter 6) concepts are the -building blocks for creating new types in your program’s domain to take full -advantage of Rust’s compile time type checking. +associated functions to specify behavior associated with a struct’s data. The +struct and *enum* (which is discussed in Chapter 6) concepts are the building +blocks for creating new types in your program’s domain to take full advantage +of Rust’s compile time type checking. ## Defining and Instantiating Structs @@ -61,9 +61,9 @@ Listing 5-2: Creating an instance of the `User` struct To get a specific value from a struct, we can use dot notation. If we wanted just this user’s email address, we can use `user1.email` wherever we want to -use this value. To change a value in a struct, if the instance is mutable, we -can use the dot notation and assign into a particular field. Listing 5-3 shows -how to change the value in the `email` field of a mutable `User` instance: +use this value. If the instance is mutable, we can change a value by using the +dot notation and assigning into a particular field. Listing 5-3 shows how to +change the value in the `email` field of a mutable `User` instance: ``` let mut user1 = User { @@ -78,11 +78,14 @@ user1.email = String::from("anotheremail@example.com"); Listing 5-3: Changing the value in the `email` field of a `User` instance -Like any expression, we can implicitly return a new instance of a struct from a -function by constructing the new instance as the last expression in the -function body. Listing 5-4 shows a `build_user` function that returns a `User` -instance with the given `email` and `username`. The `active` field gets the -value of `true`, and the `sign_in_count` gets a value of `1`. +Note that the entire instance must be mutable; Rust doesn’t allow us to mark +only certain fields as mutable. Also note that with any expression, we can +construct a new instance of the struct as the last expression in the function +body to implicitly return that new instance. + +Listing 5-4 shows a `build_user` function that returns a `User` instance with +the given email and username. The `active` field gets the value of `true`, and +the `sign_in_count` gets a value of `1`. ``` fn build_user(email: String, username: String) -> User { @@ -98,24 +101,17 @@ fn build_user(email: String, username: String) -> User { Listing 5-4: A `build_user` function that takes an email and username and returns a `User` instance -Repeating the `email` field name and `email` variable, and the same for -`username`, is a bit tedious, though. It makes sense to name the function -arguments with the same name as the struct fields, but if the struct had more -fields, repeating each name would get even more annoying. Luckily, there's a -convenient shorthand! - -### Field Init Shorthand when Variables Have the Same Name as Fields +It makes sense to name the function arguments with the same name as the struct +fields, but having to repeat the `email` and `username` field names and +variables is a bit tedious. If the struct had more fields, repeating each name +would get even more annoying. Luckily, there's a convenient shorthand! -If you have variables with the same names as struct fields, you can use *field -init shorthand*. This can make functions that create new instances of structs -more concise. +### Using the Field Init Shorthand when Variables and Fields Have the Same Name -In Listing 5-4, the parameter names `email` and `username` are the same as the -`User` struct’s field names `email` and `username`. Because the names are -exactly the same, we can write `build_user` without the repetition of `email` -and `username` as shown in Listing 5-5. This version of `build_user` behaves -the same way as the one in Listing 5-4. The field init syntax can make cases -like this shorter to write, especially when structs have many fields. +Because the parameter names and the struct field names are exactly the same in +Listing 5-4, we can use the *field init shorthand* syntax to rewrite +`build_user` so that it behaves exactly the same but doesn’t have the +repetition of `email` and `username` in the way shown in Listing 5-5. ``` fn build_user(email: String, username: String) -> User { @@ -128,16 +124,23 @@ fn build_user(email: String, username: String) -> User { } ``` -Listing 5-5: A `build_user` function that uses field init syntax since the +Listing 5-5: A `build_user` function that uses field init shorthand since the `email` and `username` parameters have the same name as struct fields +Here, we’re creating a new instance of the `User` struct, which has a field +named `email`. We want to set the `email` field’s value to the value in the +`email` parameter of the `build_user` function. Because the `email` field and +the `email` parameter have the same name, we only need to write `email` rather +than `email: email`. + ### Creating Instances From Other Instances With Struct Update Syntax -It’s often useful to create a new instance from an old instance, using most of -the old instance’s values but changing some. Listing 5-6 shows an example of -creating a new `User` instance in `user2` by setting the values of `email` and -`username` but using the same values for the rest of the fields from the -`user1` instance we created in Listing 5-2: +It’s often useful to create a new instance of a struct that uses most of an old +instance’s values, but changes some. We do this using *struct update syntax*. + +First, Listing 5-6 shows how we create a new `User` instance in `user2` without +the update syntax. We set new values for `email` and `username`, but otherwise +use the same values from `user1` that we created in Listing 5-2: ``` let user2 = User { @@ -148,15 +151,12 @@ let user2 = User { }; ``` -Listing 5-6: Creating a new `User` instance, `user2`, and setting some fields -to the values of the same fields from `user1` +Listing 5-6: Creating a new `User` instance using some of the values from +`user1` -The *struct update syntax* achieves the same effect as the code in Listing 5-6 -using less code. The struct update syntax uses `..` to specify that the -remaining fields not set explicitly should have the same value as the fields in -the given instance. The code in Listing 5-7 also creates an instance in `user2` -that has a different value for `email` and `username` but has the same values -for the `active` and `sign_in_count` fields that `user1` has: +Using struct update syntax, we can achieve the same effect with less code, +shown in Listing 5-7. The syntax `..` specifies that the remaining fields not +explicitly set should have the same value as the fields in the given instance. ``` let user2 = User { @@ -170,14 +170,22 @@ Listing 5-7: Using struct update syntax to set a new `email` and `username` values for a `User` instance but use the rest of the values from the fields of the instance in the `user1` variable +The code in Listing 5-7 also creates an instance in `user2` that has a +different value for `email` and `username` but has the same values for the +`active` and `sign_in_count` fields from `user1`. + ### Tuple Structs without Named Fields to Create Different Types We can also define structs that look similar to tuples, called *tuple structs*, that have the added meaning the struct name provides, but don’t have names -associated with their fields, just the types of the fields. The definition of a -tuple struct still starts with the `struct` keyword and the struct name, which -are followed by the types in the tuple. For example, here are definitions and -usages of tuple structs named `Color` and `Point`: +associated with their fields, just the types of the fields. Tuple structs are +useful when you want to give the whole tuple a name and make the tuple be a +different type than other tuples, but naming each field as in a regular struct +would be verbose or redundant. + +To define a tuple struct you start with the `struct` keyword and the struct +name followed by the types in the tuple. For example, here are definitions and +usages of two tuple structs named `Color` and `Point`: ``` struct Color(i32, i32, i32); @@ -189,8 +197,12 @@ let origin = Point(0, 0, 0); Note that the `black` and `origin` values are different types, since they’re instances of different tuple structs. Each struct we define is its own type, -even though the fields within the struct have the same types. Otherwise, tuple -struct instances behave like tuples, which we covered in Chapter 3. +even though the fields within the struct have the same types. For example, a +function that takes a parameter of type `Color` cannot take a `Point` as an +argument, even though both types are made up of three `i32` values. Otherwise, +tuple struct instances behave like tuples, which we covered in Chapter 3: you +can destructure them into their individual pieces, you can use a `.` followed +by the index to access an individual value, and so on. ### Unit-Like Structs without Any Fields @@ -204,10 +216,10 @@ PROD: START BOX ### Ownership of Struct Data -In the `User` struct definition in Listing 5-1, we used the owned `String` -type rather than the `&str` string slice type. This is a deliberate choice -because we want instances of this struct to own all of its data and for that -data to be valid for as long as the entire struct is valid. +In the `User` struct definition in Listing 5-1, we used the owned `String` type +rather than the `&str` string slice type. This is a deliberate choice because +we want instances of this struct to own all of its data and for that data to be +valid for as long as the entire struct is valid. It’s possible for structs to store references to data owned by something else, but to do so requires the use of *lifetimes*, a Rust feature that is discussed @@ -251,8 +263,8 @@ error[E0106]: missing lifetime specifier | ^ expected lifetime parameter ``` -We’ll discuss how to fix these errors so you can store references in structs -in Chapter 10, but for now, we’ll fix errors like these using owned types like +We’ll discuss how to fix these errors so you can store references in structs in +Chapter 10, but for now, we’ll fix errors like these using owned types like `String` instead of references like `&str`. PROD: END BOX @@ -264,7 +276,7 @@ calculates the area of a rectangle. We’ll start with single variables, and the refactor the program until we’re using structs instead. Let’s make a new binary project with Cargo called *rectangles* that will take -the length and width of a rectangle specified in pixels and will calculate the +the width and height of a rectangle specified in pixels and will calculate the area of the rectangle. Listing 5-8 shows a short program with one way of doing just that in our project’s *src/main.rs*: @@ -272,22 +284,22 @@ Filename: src/main.rs ``` fn main() { - let length1 = 50; let width1 = 30; + let height1 = 50; println!( "The area of the rectangle is {} square pixels.", - area(length1, width1) + area(width1, height1) ); } -fn area(length: u32, width: u32) -> u32 { - length * width +fn area(width: u32, height: u32) -> u32 { + width * height } ``` -Listing 5-8: Calculating the area of a rectangle specified by its length and -width in separate variables +Listing 5-8: Calculating the area of a rectangle specified by its width and +height in separate variables Now, run this program using `cargo run`: @@ -298,20 +310,20 @@ The area of the rectangle is 1500 square pixels. ### Refactoring with Tuples Even though Listing 5-8 works and figures out the area of the rectangle by -calling the `area` function with each dimension, we can do better. The length -and the width are related to each other because together they describe one +calling the `area` function with each dimension, we can do better. The width +and the height are related to each other because together they describe one rectangle. The issue with this method is evident in the signature of `area`: ``` -fn area(length: u32, width: u32) -> u32 { +fn area(width: u32, height: u32) -> u32 { ``` The `area` function is supposed to calculate the area of one rectangle, but the function we wrote has two parameters. The parameters are related, but that’s not expressed anywhere in our program. It would be more readable and more -manageable to group length and width together. We’ve already discussed one way +manageable to group width and height together. We’ve already discussed one way we might do that in the Grouping Values into Tuples section of Chapter 3 on page XX: by using tuples. Listing 5-9 shows another version of our program that uses tuples: @@ -320,7 +332,7 @@ Filename: src/main.rs ``` fn main() { - let rect1 = (50, 30); + let rect1 = (30, 50); println!( "The area of the rectangle is {} square pixels.", @@ -333,16 +345,16 @@ fn area(dimensions: (u32, u32)) -> u32 { } ``` -Listing 5-8: Specifying the length and width of the rectangle with a tuple +Listing 5-8: Specifying the width and height of the rectangle with a tuple In one way, this program is better. Tuples let us add a bit of structure, and we’re now passing just one argument. But in another way this version is less clear: tuples don’t name their elements, so our calculation has become more confusing because we have to index into the parts of the tuple. -It doesn’t matter if we mix up length and width for the area calculation, but +It doesn’t matter if we mix up width and height for the area calculation, but if we want to draw the rectangle on the screen, it would matter! We would have -to keep in mind that `length` is the tuple index `0` and `width` is the tuple +to keep in mind that `width` is the tuple index `0` and `height` is the tuple index `1`. If someone else worked on this code, they would have to figure this out and keep it in mind as well. It would be easy to forget or mix up these values and cause errors, because we haven’t conveyed the meaning of our data in @@ -358,12 +370,12 @@ Filename: src/main.rs ``` struct Rectangle { - length: u32, width: u32, + height: u32, } fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; + let rect1 = Rectangle { width: 30, height: 50 }; println!( "The area of the rectangle is {} square pixels.", @@ -372,16 +384,16 @@ fn main() { } fn area(rectangle: &Rectangle) -> u32 { - rectangle.length * rectangle.width + rectangle.width * rectangle.height } ``` Listing 5-10: Defining a `Rectangle` struct Here we’ve defined a struct and named it `Rectangle`. Inside the `{}` we -defined the fields as `length` and `width`, both of which have type `u32`. Then -in `main` we create a particular instance of a `Rectangle` that has a length of -50 and a width of 30. +defined the fields as `width` and `height`, both of which have type `u32`. Then +in `main` we create a particular instance of a `Rectangle` that has a width of +30 and a height of 50. Our `area` function is now defined with one parameter, which we’ve named `rectangle`, whose type is an immutable borrow of a struct `Rectangle` @@ -390,10 +402,10 @@ take ownership of it. This way, `main` retains its ownership and can continue using `rect1`, which is the reason we use the `&` in the function signature and where we call the function. -The `area` function accesses the `length` and `width` fields of the `Rectangle` +The `area` function accesses the `width` and `height` fields of the `Rectangle` instance. Our function signature for `area` now indicates exactly what we mean: -calculate the area of a `Rectangle` using its `length` and `width` fields. This -conveys that the length and width are related to each other, and gives +calculate the area of a `Rectangle` using its `width` and `height` fields. This +conveys that the width and height are related to each other, and gives descriptive names to the values rather than using the tuple index values of `0` and `1`—a win for clarity. @@ -408,12 +420,12 @@ Filename: src/main.rs ``` struct Rectangle { - length: u32, width: u32, + height: u32, } fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; + let rect1 = Rectangle { width: 30, height: 50 }; println!("rect1 is {}", rect1); } @@ -473,12 +485,12 @@ Filename: src/main.rs ``` #[derive(Debug)] struct Rectangle { - length: u32, width: u32, + height: u32, } fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; + let rect1 = Rectangle { width: 30, height: 50 }; println!("rect1 is {:?}", rect1); } @@ -491,7 +503,7 @@ Now when we run the program, we won’t get any errors and we’ll see the following output: ``` -rect1 is Rectangle { length: 50, width: 30 } +rect1 is Rectangle { width: 30, height: 50 } ``` Nice! It’s not the prettiest output, but it shows the values of all the fields @@ -502,8 +514,8 @@ When we use the `{:#?}` style in the example, the output will look like this: ``` rect1 is Rectangle { - length: 50, - width: 30 + width: 30, + height: 50 } ``` @@ -539,18 +551,18 @@ Filename: src/main.rs ``` #[derive(Debug)] struct Rectangle { - length: u32, width: u32, + height: u32, } impl Rectangle { fn area(&self) -> u32 { - self.length * self.width + self.width * self.height } } fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; + let rect1 = Rectangle { width: 30, height: 50 }; println!( "The area of the rectangle is {} square pixels.", @@ -638,9 +650,9 @@ Filename: src/main.rs ``` fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; - let rect2 = Rectangle { length: 40, width: 10 }; - let rect3 = Rectangle { length: 45, width: 60 }; + let rect1 = Rectangle { width: 30, height: 50 }; + let rect2 = Rectangle { width: 10, height: 40 }; + let rect3 = Rectangle { width: 60, height: 45 }; println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2)); println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3)); @@ -667,8 +679,8 @@ parameter will be by looking at the code that calls the method: read `rect2` (rather than write, which would mean we’d need a mutable borrow), and we want `main` to retain ownership of `rect2` so we can use it again after calling the `can_hold` method. The return value of `can_hold` will be a -boolean, and the implementation will check whether the length and width of -`self` are both greater than the length and width of the other `Rectangle`, +boolean, and the implementation will check whether the width and height of +`self` are both greater than the width and height of the other `Rectangle`, respectively. Let’s add the new `can_hold` method to the `impl` block from Listing 5-13, shown in Listing 5-15: @@ -677,11 +689,11 @@ Filename: src/main.rs ``` impl Rectangle { fn area(&self) -> u32 { - self.length * self.width + self.width * self.height } fn can_hold(&self, other: &Rectangle) -> bool { - self.length > other.length && self.width > other.width + self.width > other.width && self.height > other.height } } ``` @@ -705,7 +717,7 @@ function. Associated functions are often used for constructors that will return a new instance of the struct. For example, we could provide an associated function -that would have one dimension parameter and use that as both length and width, +that would have one dimension parameter and use that as both width and height, thus making it easier to create a square `Rectangle` rather than having to specify the same value twice: @@ -714,7 +726,7 @@ Filename: src/main.rs ``` impl Rectangle { fn square(size: u32) -> Rectangle { - Rectangle { length: size, width: size } + Rectangle { width: size, height: size } } } ``` @@ -733,13 +745,13 @@ in its own `impl` block: ``` impl Rectangle { fn area(&self) -> u32 { - self.length * self.width + self.width * self.height } } impl Rectangle { fn can_hold(&self, other: &Rectangle) -> bool { - self.length > other.length && self.width > other.width + self.width > other.width && self.height > other.height } } ``` diff --git a/src/doc/book/second-edition/nostarch/chapter08.md b/src/doc/book/second-edition/nostarch/chapter08.md index 1d88f47e1d..86a0ddd09f 100644 --- a/src/doc/book/second-edition/nostarch/chapter08.md +++ b/src/doc/book/second-edition/nostarch/chapter08.md @@ -3,71 +3,78 @@ # Common Collections -Rust’s standard library includes a number of really useful data structures -called *collections*. Most other data types represent one specific value, but +Rust’s standard library includes a number of very useful data structures called +*collections*. Most other data types represent one specific value, but collections can contain multiple values. Unlike the built-in array and tuple types, the data these collections point to is stored on the heap, which means the amount of data does not need to be known at compile time and can grow or shrink as the program runs. Each kind of collection has different capabilities -and costs, and choosing an appropriate one for the situation you’re in is a -skill you’ll develop over time. In this chapter, we’ll go over three -collections which are used very often in Rust programs: +and costs, and choosing an appropriate one for your current situation is a +skill you’ll develop over time. In this chapter, we’ll discuss three +collections that are used very often in Rust programs: * A *vector* allows us to store a variable number of values next to each other. -* A *string* is a collection of characters. We’ve seen the `String` type - before, but we’ll talk about it in depth now. +* A *string* is a collection of characters. We’ve discussed the `String` type + previously, but in this chapter we’ll talk about it in depth. * A *hash map* allows us to associate a value with a particular key. It’s a particular implementation of the more general data structure called a *map*. To learn about the other kinds of collections provided by the standard library, -see the documentation at *https://doc.rust-lang.org/stable/std/collections*. +see the documentation at *https://doc.rust-lang.org/stable/std/collections/*. -We’re going to discuss how to create and update vectors, strings, and hash -maps, as well as what makes each special. +We’ll discuss how to create and update vectors, strings, and hash maps, as well +as what makes each special. ## Vectors -The first type we’ll look at is `Vec`, also known as a *vector*. Vectors -allow us to store more than one value in a single data structure that puts all -the values next to each other in memory. Vectors can only store values of the -same type. They are useful in situations where you have a list of items, such -as the lines of text in a file or the prices of items in a shopping cart. +The first collection type we’ll look at is `Vec`, also known as a *vector*. +Vectors allow us to store more than one value in a single data structure that +puts all the values next to each other in memory. Vectors can only store values +of the same type. They are useful in situations in which you have a list of +items, such as the lines of text in a file or the prices of items in a shopping +cart. ### Creating a New Vector -To create a new, empty vector, we can call the `Vec::new` function: +To create a new, empty vector, we can call the `Vec::new` function as shown in +Listing 8-1: ```rust let v: Vec = Vec::new(); ``` -Note that we added a type annotation here. Since we aren’t inserting any values -into this vector, Rust doesn’t know what kind of elements we intend to store. -This is an important point. Vectors are homogeneous: they may store many -values, but those values must all be the same type. Vectors are implemented -using generics, which Chapter 10 will cover how to use in your own types. For -now, all you need to know is that the `Vec` type provided by the standard -library can hold any type, and when a specific `Vec` holds a specific type, the -type goes within angle brackets. We’ve told Rust that the `Vec` in `v` will +Listing 8-1: Creating a new, empty vector to hold values of type `i32` + +Note that we added a type annotation here. Because we aren’t inserting any +values into this vector, Rust doesn’t know what kind of elements we intend to +store. This is an important point. Vectors are implemented using generics; +we’ll cover how to use generics with your own types in Chapter 10. For now, +know that the `Vec` type provided by the standard library can hold any type, +and when a specific vector holds a specific type, the type is specified within +angle brackets. In Listing 8-1, we’ve told Rust that the `Vec` in `v` will hold elements of the `i32` type. -In real code, Rust can infer the type of value we want to store once we insert -values, so you rarely need to do this type annotation. It’s more common to -create a `Vec` that has initial values, and Rust provides the `vec!` macro for -convenience. The macro will create a new `Vec` that holds the values we give -it. This will create a new `Vec` that holds the values `1`, `2`, and `3`: +In more realistic code, Rust can often infer the type of value we want to store +once we insert values, so you rarely need to do this type annotation. It’s more +common to create a `Vec` that has initial values, and Rust provides the +`vec!` macro for convenience. The macro will create a new vector that holds the +values we give it. Listing 8-2 creates a new `Vec` that holds the values +`1`, `2`, and `3`: ```rust let v = vec![1, 2, 3]; ``` +Listing 8-2: Creating a new vector containing values + Because we’ve given initial `i32` values, Rust can infer that the type of `v` -is `Vec`, and the type annotation isn’t necessary. Let’s look at how to -modify a vector next. +is `Vec`, and the type annotation isn’t necessary. Next, we’ll look at how +to modify a vector. ### Updating a Vector -To create a vector then add elements to it, we can use the `push` method: +To create a vector and then add elements to it, we can use the `push` method as +shown in Listing 8-3: ```rust let mut v = Vec::new(); @@ -78,14 +85,17 @@ v.push(7); v.push(8); ``` -As with any variable as we discussed in Chapter 3, if we want to be able to -change its value, we need to make it mutable with the `mut` keyword. The +Listing 8-3: Using the `push` method to add values to a vector + +As with any variable, as discussed in Chapter 3, if we want to be able to +change its value, we need to make it mutable using the `mut` keyword. The numbers we place inside are all of type `i32`, and Rust infers this from the data, so we don’t need the `Vec` annotation. -### Dropping a Vector Drops its Elements +### Dropping a Vector Drops Its Elements -Like any other `struct`, a vector will be freed when it goes out of scope: +Like any other `struct`, a vector will be freed when it goes out of scope, as +annotated in Listing 8-4: ```rust { @@ -96,9 +106,11 @@ Like any other `struct`, a vector will be freed when it goes out of scope: } // <- v goes out of scope and is freed here ``` +Listing 8-4: Showing where the vector and its elements are dropped + When the vector gets dropped, all of its contents will also be dropped, meaning those integers it holds will be cleaned up. This may seem like a -straightforward point, but can get a little more complicated once we start to +straightforward point but can get a bit more complicated when we start to introduce references to the elements of the vector. Let’s tackle that next! ### Reading Elements of Vectors @@ -108,7 +120,7 @@ read their contents is a good next step. There are two ways to reference a value stored in a vector. In the examples, we’ve annotated the types of the values that are returned from these functions for extra clarity. -This example shows both methods of accessing a value in a vector either with +Listing 8-5 shows both methods of accessing a value in a vector either with indexing syntax or the `get` method: ```rust @@ -118,17 +130,20 @@ let third: &i32 = &v[2]; let third: Option<&i32> = v.get(2); ``` -There are a few things to note here. First, that we use the index value of `2` -to get the third element: vectors are indexed by number, starting at zero. -Second, the two different ways to get the third element are: using `&` and -`[]`, which gives us a reference, or using the `get` method with the index -passed as an argument, which gives us an `Option<&T>`. +Listing 8-5: Using indexing syntax or the `get` method to access an item in a +vector -The reason Rust has two ways to reference an element is so that you can choose -how the program behaves when you try to use an index value that the vector -doesn’t have an element for. As an example, what should a program do if it has -a vector that holds five elements then tries to access an element at index 100 -like this: +Note two details here. First, we use the index value of `2` to get the third +element: vectors are indexed by number, starting at zero. Second, the two +different ways to get the third element are by using `&` and `[]`, which gives +us a reference, or by using the `get` method with the index passed as an +argument, which gives us an `Option<&T>`. + +The reason Rust has two ways to reference an element is so you can choose how +the program behaves when you try to use an index value that the vector doesn’t +have an element for. As an example, what should a program do if it has a vector +that holds five elements and then tries to access an element at index 100, as +shown in Listing 8-6: ```rust,should_panic let v = vec![1, 2, 3, 4, 5]; @@ -137,31 +152,32 @@ let does_not_exist = &v[100]; let does_not_exist = v.get(100); ``` -When you run this, you will find that with the first `[]` method, Rust will -cause a `panic!` when a non-existent element is referenced. This method would -be preferable if you want your program to consider an attempt to access an -element past the end of the vector to be a fatal error that should crash the -program. +Listing 8-6: Attempting to access the element at index 100 in a vector +containing 5 elements + +When you run this code, the first `[]` method will cause a `panic!` because it +references a nonexistent element. This method is best used when you want your +program to consider an attempt to access an element past the end of the vector +to be a fatal error that crashes the program. -When the `get` method is passed an index that is outside the array, it will -return `None` without panicking. You would use this if accessing an element -beyond the range of the vector will happen occasionally under normal -circumstances. Your code can then have logic to handle having either -`Some(&element)` or `None`, as we discussed in Chapter 6. For example, the -index could be coming from a person entering a number. If they accidentally -enter a number that’s too large and your program gets a `None` value, you could -tell the user how many items are in the current `Vec` and give them another -chance to enter a valid value. That would be more user-friendly than crashing -the program for a typo! +When the `get` method is passed an index that is outside the vector, it returns +`None` without panicking. You would use this method if accessing an element +beyond the range of the vector happens occasionally under normal circumstances. +Your code will then have logic to handle having either `Some(&element)` or +`None`, as discussed in Chapter 6. For example, the index could be coming from +a person entering a number. If they accidentally enter a number that’s too +large and the program gets a `None` value, you could tell the user how many +items are in the current `Vec` and give them another chance to enter a valid +value. That would be more user-friendly than crashing the program due to a typo! #### Invalid References -Once the program has a valid reference, the borrow checker will enforce the -ownership and borrowing rules covered in Chapter 4 to ensure this reference and -any other references to the contents of the vector stay valid. Recall the rule -that says we can’t have mutable and immutable references in the same scope. -That rule applies in this example, where we hold an immutable reference to the -first element in a vector and try to add an element to the end: +When the program has a valid reference, the borrow checker enforces the +ownership and borrowing rules (covered in Chapter 4) to ensure this reference +and any other references to the contents of the vector remain valid. Recall the +rule that states we can’t have mutable and immutable references in the same +scope. That rule applies in Listing 8-7 where we hold an immutable reference to +the first element in a vector and try to add an element to the end: ```rust,ignore let mut v = vec![1, 2, 3, 4, 5]; @@ -171,9 +187,12 @@ let first = &v[0]; v.push(6); ``` -Compiling this will give us this error: +Listing 8-7: Attempting to add an element to a vector while holding a reference +to an item -``` +Compiling this code will result in this error: + +```text error[E0502]: cannot borrow `v` as mutable because it is also borrowed as immutable | @@ -186,33 +205,66 @@ immutable | - immutable borrow ends here ``` -This code might look like it should work: why should a reference to the first -element care about what changes about the end of the vector? The reason why -this code isn’t allowed is due to the way vectors work. Adding a new element +The code in Listing 8-7 might look like it should work: why should a reference +to the first element care about what changes at the end of the vector? The +reason behind this error is due to the way vectors work: adding a new element onto the end of the vector might require allocating new memory and copying the -old elements over to the new space, in the circumstance that there isn’t enough -room to put all the elements next to each other where the vector was. In that -case, the reference to the first element would be pointing to deallocated -memory. The borrowing rules prevent programs from ending up in that situation. +old elements to the new space if there isn’t enough room to put all the +elements next to each other where the vector was. In that case, the reference +to the first element would be pointing to deallocated memory. The borrowing +rules prevent programs from ending up in that situation. + +> Note: For more on the implementation details of the `Vec` type, see “The +> Nomicon” at https://doc.rust-lang.org/stable/nomicon/vec.html. + +### Iterating Over the Values in a Vector + +If we want to access each element in a vector in turn, rather than using +indexing to access one element, we can iterate through all of the elements. +Listing 8-8 shows how to use a `for` loop to get immutable references to each +element in a vector of `i32` values and print them out: + +```rust +let v = vec![100, 32, 57]; +for i in &v { + println!("{}", i); +} +``` + +Listing 8-8: Printing each element in a vector by iterating over the elements +using a `for` loop -> Note: For more on this, see The Nomicon at -*https://doc.rust-lang.org/stable/nomicon/vec.html*. +We can also iterate over mutable references to each element in a mutable vector +if we want to make changes to all the elements. The `for` loop in Listing 8-9 +will add `50` to each element: + +```rust +let mut v = vec![100, 32, 57]; +for i in &mut v { + *i += 50; +} +``` + +Listing 8-9: Iterating over mutable references to elements in a vector + +In order to change the value that the mutable reference refers to, before we +can use the `+=` operator with `i`, we have to use the dereference operator +(`*`) to get to the value. ### Using an Enum to Store Multiple Types At the beginning of this chapter, we said that vectors can only store values -that are all the same type. This can be inconvenient; there are definitely use -cases for needing to store a list of things of different types. Luckily, the -variants of an enum are all defined under the same enum type, so when we need -to store elements of a different type in a vector, we can define and use an -enum! - -For example, let’s say we want to get values from a row in a spreadsheet, where -some of the columns in the row contain integers, some floating point numbers, +that are the same type. This can be inconvenient; there are definitely use +cases for needing to store a list of items of different types. Fortunately, the +variants of an enum are defined under the same enum type, so when we need to +store elements of a different type in a vector, we can define and use an enum! + +For example, let’s say we want to get values from a row in a spreadsheet where +some of the columns in the row contain integers, some floating-point numbers, and some strings. We can define an enum whose variants will hold the different -value types, and then all of the enum variants will be considered the same -type, that of the enum. Then we can create a vector that holds that enum and -so, ultimately, holds different types: +value types, and then all the enum variants will be considered the same type, +that of the enum. Then we can create a vector that holds that enum and so, +ultimately, holds different types. We’ve demonstrated this in Listing 8-8: ```rust enum SpreadsheetCell { @@ -228,84 +280,86 @@ let row = vec![ ]; ``` -The reason Rust needs to know exactly what types will be in the vector at -compile time is so that it knows exactly how much memory on the heap will be -needed to store each element. A secondary advantage to this is that we can be -explicit about what types are allowed in this vector. If Rust allowed a vector -to hold any type, there would be a chance that one or more of the types would -cause errors with the operations performed on the elements of the vector. Using -an enum plus a `match` means that Rust will ensure at compile time that we -always handle every possible case, as we discussed in Chapter 6. +Listing 8-8: Defining an `enum` to store values of different types in one vector + +The reason Rust needs to know what types will be in the vector at compile time +is so it knows exactly how much memory on the heap will be needed to store each +element. A secondary advantage is that we can be explicit about what types are +allowed in this vector. If Rust allowed a vector to hold any type, there would +be a chance that one or more of the types would cause errors with the +operations performed on the elements of the vector. Using an enum plus a +`match` expression means that Rust will ensure at compile time that we always +handle every possible case, as discussed in Chapter 6. -If you don’t know at the time that you’re writing a program the exhaustive set -of types the program will get at runtime to store in a vector, the enum -technique won’t work. Instead, you can use a trait object, which we’ll cover in -Chapter 17. +If you don’t know when you’re writing a program the exhaustive set of types the +program will get at runtime to store in a vector, the enum technique won’t +work. Instead, you can use a trait object, which we’ll cover in Chapter 17. -Now that we’ve gone over some of the most common ways to use vectors, be sure -to take a look at the API documentation for all of the many useful methods -defined on `Vec` by the standard library. For example, in addition to `push` -there’s a `pop` method that will remove and return the last element. Let’s move -on to the next collection type: `String`! +Now that we’ve discussed some of the most common ways to use vectors, be sure +to review the API documentation for all the many useful methods defined on +`Vec` by the standard library. For example, in addition to `push`, a `pop` +method removes and returns the last element. Let’s move on to the next +collection type: `String`! ## Strings -We’ve already talked about strings a bunch in Chapter 4, but let’s take a more -in-depth look at them now. Strings are an area that new Rustaceans commonly get -stuck on. This is due to a combination of three things: Rust’s propensity for -making sure to expose possible errors, strings being a more complicated data -structure than many programmers give them credit for, and UTF-8. These things -combine in a way that can seem difficult when coming from other languages. +We talked about strings in Chapter 4, but we’ll look at them in more depth now. +New Rustaceans commonly get stuck on strings due to a combination of three +concepts: Rust’s propensity for exposing possible errors, strings being a more +complicated data structure than many programmers give them credit for, and +UTF-8. These concepts combine in a way that can seem difficult when you’re +coming from other programming languages. -The reason strings are in the collections chapter is that strings are +This discussion of strings is in the collections chapter because strings are implemented as a collection of bytes plus some methods to provide useful functionality when those bytes are interpreted as text. In this section, we’ll -talk about the operations on `String` that every collection type has, like +talk about the operations on `String` that every collection type has, such as creating, updating, and reading. We’ll also discuss the ways in which `String` is different than the other collections, namely how indexing into a `String` is -complicated by the differences in which people and computers interpret `String` -data. - -### What is a String? - -Before we can dig into those aspects, we need to talk about what exactly we -mean by the term *string*. Rust actually only has one string type in the core -language itself: `str`, the string slice, which is usually seen in its borrowed -form, `&str`. We talked about *string slices* in Chapter 4: these are a -reference to some UTF-8 encoded string data stored elsewhere. String literals, -for example, are stored in the binary output of the program, and are therefore -string slices. - -The type called `String` is provided in Rust’s standard library rather than -coded into the core language, and is a growable, mutable, owned, UTF-8 encoded -string type. When Rustaceans talk about “strings” in Rust, they usually mean -both the `String` and the string slice `&str` types, not just one of those. -This section is largely about `String`, but both these types are used heavily -in Rust’s standard library. Both `String` and string slices are UTF-8 encoded. +complicated by the differences between how people and computers interpret +`String` data. + +### What Is a String? + +We’ll first define what we mean by the term *string*. Rust has only one string +type in the core language, which is the string slice `str` that is usually seen +in its borrowed form `&str`. In Chapter 4, we talked about *string slices*, +which are references to some UTF-8 encoded string data stored elsewhere. String +literals, for example, are stored in the binary output of the program and are +therefore string slices. + +The `String` type is provided in Rust’s standard library rather than coded into +the core language and is a growable, mutable, owned, UTF-8 encoded string type. +When Rustaceans refer to “strings” in Rust, they usually mean the `String` and +the string slice `&str` types, not just one of those types. Although this +section is largely about `String`, both types are used heavily in Rust’s +standard library and both `String` and string slices are UTF-8 encoded. Rust’s standard library also includes a number of other string types, such as -`OsString`, `OsStr`, `CString`, and `CStr`. Library crates may provide even +`OsString`, `OsStr`, `CString`, and `CStr`. Library crates can provide even more options for storing string data. Similar to the `*String`/`*Str` naming, they often provide an owned and borrowed variant, just like `String`/`&str`. -These string types may store different encodings or be represented in memory in -a different way, for example. We won’t be talking about these other string +These string types can store text in different encodings or be represented in +memory in a different way, for example. We won’t discuss these other string types in this chapter; see their API documentation for more about how to use them and when each is appropriate. ### Creating a New String Many of the same operations available with `Vec` are available with `String` as -well, starting with the `new` function to create a string, like so: +well, starting with the `new` function to create a string, shown in Listing 8-9: ```rust -let s = String::new(); +let mut s = String::new(); ``` -This creates a new empty string called `s` that we can then load data into. +Listing 8-9: Creating a new, empty `String` -Often, we’ll have some initial data that we’d like to start the string off +This line creates a new empty string called `s` that we can then load data +into. Often, we’ll have some initial data that we want to start the string with. For that, we use the `to_string` method, which is available on any type -that implements the `Display` trait, which string literals do: +that implements the `Display` trait, which string literals do. Listing 8-10 +shows two examples: ```rust let data = "initial contents"; @@ -316,78 +370,100 @@ let s = data.to_string(); let s = "initial contents".to_string(); ``` -This creates a string containing `initial contents`. +Listing 8-10: Using the `to_string` method to create a `String` from a string +literal + +This code creates a string containing `initial contents`. We can also use the function `String::from` to create a `String` from a string -literal. This is equivalent to using `to_string`: +literal. The code in Listing 8-11 is equivalent to the code from Listing 8-10 +that uses `to_string`: ```rust let s = String::from("initial contents"); ``` -Because strings are used for so many things, there are many different generic -APIs that can be used for strings, so there are a lot of options. Some of them -can feel redundant, but they all have their place! In this case, `String::from` -and `.to_string` end up doing the exact same thing, so which you choose is a -matter of style. +Listing 8-11: Using the `String::from` function to create a `String` from a +string literal + +Because strings are used for so many things, we can use many different generic +APIs for strings, providing us with a lot of options. Some of them can seem +redundant, but they all have their place! In this case, `String::from` and +`to_string` do the same thing, so which you choose is a matter of style. Remember that strings are UTF-8 encoded, so we can include any properly encoded -data in them: +data in them, as shown in Listing 8-12: ```rust -let hello = "السلام عليكم"; -let hello = "Dobrý den"; -let hello = "Hello"; -let hello = "שָׁלוֹם"; -let hello = "नमस्ते"; -let hello = "こんにちは"; -let hello = "안녕하세요"; -let hello = "你好"; -let hello = "Olá"; -let hello = "Здравствуйте"; -let hello = "Hola"; +let hello = String::from("السلام عليكم"); +let hello = String::from("Dobrý den"); +let hello = String::from("Hello"); +let hello = String::from("שָׁלוֹם"); +let hello = String::from("नमस्ते"); +let hello = String::from("こんにちは"); +let hello = String::from("안녕하세요"); +let hello = String::from("你好"); +let hello = String::from("Olá"); +let hello = String::from("Здравствуйте"); +let hello = String::from("Hola"); ``` +Listing 8-12: Storing greetings in different languages in strings + +All of these are valid `String` values. + ### Updating a String -A `String` can grow in size and its contents can change just like the contents -of a `Vec`, by pushing more data into it. In addition, `String` has -concatenation operations implemented with the `+` operator for convenience. +A `String` can grow in size and its contents can change, just like the contents +of a `Vec`, by pushing more data into it. In addition, we can conveniently use +the `+` operator or the `format!` macro to concatenate `String` values together. -#### Appending to a String with Push +#### Appending to a String with `push_str` and `push` -We can grow a `String` by using the `push_str` method to append a string slice: +We can grow a `String` by using the `push_str` method to append a string slice, +as shown in Listing 8-13: ```rust let mut s = String::from("foo"); s.push_str("bar"); ``` -`s` will contain “foobar” after these two lines. The `push_str` method takes a +Listing 8-13: Appending a string slice to a `String` using the `push_str` method + +After these two lines, `s` will contain `foobar`. The `push_str` method takes a string slice because we don’t necessarily want to take ownership of the -parameter. For example, it would be unfortunate if we weren’t able to use `s2` -after appending its contents to `s1`: +parameter. For example, the code in Listing 8-14 shows that it would be +unfortunate if we weren’t able to use `s2` after appending its contents to `s1`: ```rust let mut s1 = String::from("foo"); -let s2 = String::from("bar"); +let s2 = "bar"; s1.push_str(&s2); +println!("s2 is {}", s2); ``` -The `push` method is defined to have a single character as a parameter and add -it to the `String`: +Listing 8-14: Using a string slice after appending its contents to a `String` + +If the `push_str` method took ownership of `s2`, we wouldn’t be able to print +out its value on the last line. However, this code works as we’d expect! + +The `push` method takes a single character as a parameter and adds it to the +`String`. Listing 8-15 shows code that adds an l to a `String` using the `push` +method: ```rust let mut s = String::from("lo"); s.push('l'); ``` -After this, `s` will contain “lol”. +Listing 8-15: Adding one character to a `String` value using `push` -#### Concatenation with the + Operator or the `format!` Macro +As a result of this code, `s` will contain `lol`. -Often, we’ll want to combine two existing strings together. One way is to use -the `+` operator like this: +#### Concatenation with the `+` Operator or the `format!` Macro + +Often, we’ll want to combine two existing strings. One way is to use the `+` +operator, as shown in Listing 8-16: ```rust let s1 = String::from("Hello, "); @@ -395,38 +471,45 @@ let s2 = String::from("world!"); let s3 = s1 + &s2; // Note that s1 has been moved here and can no longer be used ``` -After this code the String `s3` will contain `Hello, world!`. The reason that -`s1` is no longer valid after the addition and the reason that we used a +Listing 8-16: Using the `+` operator to combine two `String` values into a new +`String` value + +As a result of this code, the string `s3` will contain `Hello, world!`. The +reason `s1` is no longer valid after the addition and the reason we used a reference to `s2` has to do with the signature of the method that gets called when we use the `+` operator. The `+` operator uses the `add` method, whose signature looks something like this: -``` +```rust,ignore fn add(self, s: &str) -> String { ``` -This isn’t the exact signature that’s in the standard library; there `add` is -defined using generics. Here, we’re looking at the signature of `add` with -concrete types substituted for the generic ones, which is what happens when we -call this method with `String` values. We’ll be discussing generics in Chapter -10. This signature gives us the clues we need to understand the tricky bits of -the `+` operator. - -First of all, `s2` has an `&`, meaning that we are adding a *reference* of the -second string to the first string. This is because of the `s` parameter in the -`add` function: we can only add a `&str` to a `String`, we can’t add two -`String` values together. Remember back in Chapter 4 when we talked about how -`&String` will coerce to `&str`: we write `&s2` so that the `String` will -coerce to the proper type, `&str`. Because this method does not take ownership -of the parameter, `s2` will still be valid after this operation. +This isn’t the exact signature that’s in the standard library: in the standard +library, `add` is defined using generics. Here, we’re looking at the signature +of `add` with concrete types substituted for the generic ones, which is what +happens when we call this method with `String` values. We’ll discuss generics +in Chapter 10. This signature gives us the clues we need to understand the +tricky bits of the `+` operator. + +First, `s2` has an `&`, meaning that we’re adding a *reference* of the second +string to the first string because of the `s` parameter in the `add` function: +we can only add a `&str` to a `String`; we can’t add two `String` values +together. But wait - the type of `&s2` is `&String`, not `&str`, as specified +in the second parameter to `add`. Why does Listing 8-16 compile? We are able to +use `&s2` in the call to `add` because the compiler can *coerce* the `&String` +argument into a `&str`. When we call the `add` method, Rust uses something +called a *deref coercion*, which you could think of here as turning `&s2` into +`&s2[..]`. We’ll discuss deref coercion in more depth in Chapter 15. Because +`add` does not take ownership of the `s` parameter, `s2` will still be a valid +`String` after this operation. Second, we can see in the signature that `add` takes ownership of `self`, -because `self` does *not* have an `&`. This means `s1` in the above example -will be moved into the `add` call and no longer be valid after that. So while -`let s3 = s1 + &s2;` looks like it will copy both strings and create a new one, -this statement actually takes ownership of `s1`, appends a copy of the contents -of `s2`, then returns ownership of the result. In other words, it looks like -it’s making a lot of copies, but isn’t: the implementation is more efficient +because `self` does *not* have an `&`. This means `s1` in Listing 8-16 will be +moved into the `add` call and no longer be valid after that. So although `let +s3 = s1 + &s2;` looks like it will copy both strings and create a new one, this +statement actually takes ownership of `s1`, appends a copy of the contents of +`s2`, and then returns ownership of the result. In other words, it looks like +it’s making a lot of copies but isn’t: the implementation is more efficient than copying. If we need to concatenate multiple strings, the behavior of `+` gets unwieldy: @@ -439,8 +522,8 @@ let s3 = String::from("toe"); let s = s1 + "-" + &s2 + "-" + &s3; ``` -`s` will be “tic-tac-toe” at this point. With all of the `+` and `"` -characters, it gets hard to see what’s going on. For more complicated string +At this point, `s` will be `tic-tac-toe`. With all of the `+` and `"` +characters, it’s difficult to see what’s going on. For more complicated string combining, we can use the `format!` macro: ```rust @@ -451,24 +534,26 @@ let s3 = String::from("toe"); let s = format!("{}-{}-{}", s1, s2, s3); ``` -This code will also set `s` to “tic-tac-toe”. The `format!` macro works in the -same way as `println!`, but instead of printing the output to the screen, it -returns a `String` with the contents. This version is much easier to read, and -also does not take ownership of any of its parameters. +This code also sets `s` to `tic-tac-toe`. The `format!` macro works in the same +way as `println!`, but instead of printing the output to the screen, it returns +a `String` with the contents. The version of the code using `format!` is much +easier to read and also doesn’t take ownership of any of its parameters. ### Indexing into Strings -In many other languages, accessing individual characters in a string by -referencing them by index is a valid and common operation. In Rust, however, if -we try to access parts of a `String` using indexing syntax, we’ll get an error. -That is, this code: +In many other programming languages, accessing individual characters in a +string by referencing them by index is a valid and common operation. However, +if we try to access parts of a `String` using indexing syntax in Rust, we’ll +get an error. Consider the code in Listing 8-17: ```rust,ignore let s1 = String::from("hello"); let h = s1[0]; ``` -will result in this error: +Listing 8-17: Attempting to use indexing syntax with a String + +This code will result in the following error: ```text error: the trait bound `std::string::String: std::ops::Index<_>` is not @@ -479,33 +564,31 @@ satisfied [--explain E0277] note: the type `std::string::String` cannot be indexed by `_` ``` -The error and the note tell the story: Rust strings don’t support indexing. So -the follow-up question is, why not? In order to answer that, we have to talk a -bit about how Rust stores strings in memory. +The error and the note tell the story: Rust strings don’t support indexing. But +why not? To answer that question, we need to discuss how Rust stores strings in +memory. #### Internal Representation -A `String` is a wrapper over a `Vec`. Let’s take a look at some of our -properly-encoded UTF-8 example strings from before. First, this one: +A `String` is a wrapper over a `Vec`. Let’s look at some of our properly +encoded UTF-8 example strings from Listing 8-12. First, this one: ```rust let len = String::from("Hola").len(); ``` In this case, `len` will be four, which means the `Vec` storing the string -“Hola” is four bytes long: each of these letters takes one byte when encoded in -UTF-8. What about this example, though? +“Hola” is four bytes long. Each of these letters takes one byte when encoded in +UTF-8. But what about the following line? ```rust let len = String::from("Здравствуйте").len(); ``` -A person asked how long the string is might say 12. However, Rust’s answer is -24. This is the number of bytes that it takes to encode “Здравствуйте” in -UTF-8, since each Unicode scalar value takes two bytes of storage. Therefore, -an index into the string’s bytes will not always correlate to a valid Unicode -scalar value. - +Asked how long the string is, you might say 12. However, Rust’s answer is 24: +that’s the number of bytes it takes to encode “Здравствуйте” in UTF-8, because +each Unicode scalar value takes two bytes of storage. Therefore, an index into +the string’s bytes will not always correlate to a valid Unicode scalar value. To demonstrate, consider this invalid Rust code: ```rust,ignore @@ -516,29 +599,30 @@ let answer = &hello[0]; What should the value of `answer` be? Should it be `З`, the first letter? When encoded in UTF-8, the first byte of `З` is `208`, and the second is `151`, so `answer` should in fact be `208`, but `208` is not a valid character on its -own. Returning `208` is likely not what a person would want if they asked for -the first letter of this string, but that’s the only data that Rust has at byte -index 0. Returning the byte value is probably not what people want, even with -only Latin letters: `&"hello"[0]` would return `104`, not `h`. To avoid -returning an unexpected value and causing bugs that might not be discovered -immediately, Rust chooses to not compile this code at all and prevent -misunderstandings earlier. +own. Returning `208` is likely not what a user would want if they asked for the +first letter of this string; however, that’s the only data that Rust has at +byte index 0. Returning the byte value is probably not what users want, even if +the string contains only Latin letters: if `&"hello"[0]` was valid code that +returned the byte value, it would return `104`, not `h`. To avoid returning an +unexpected value and causing bugs that might not be discovered immediately, +Rust doesn’t compile this code at all and prevents misunderstandings earlier in +the development process. -#### Bytes and Scalar Values and Grapheme Clusters! Oh my! +#### Bytes and Scalar Values and Grapheme Clusters! Oh My! -This leads to another point about UTF-8: there are really three relevant ways -to look at strings, from Rust’s perspective: as bytes, scalar values, and -grapheme clusters (the closest thing to what people would call *letters*). +Another point about UTF-8 is that there are actually three relevant ways to +look at strings from Rust’s perspective: as bytes, scalar values, and grapheme +clusters (the closest thing to what we would call *letters*). If we look at the Hindi word “नमस्ते” written in the Devanagari script, it is ultimately stored as a `Vec` of `u8` values that looks like this: -``` +```text [224, 164, 168, 224, 164, 174, 224, 164, 184, 224, 165, 141, 224, 164, 164, 224, 165, 135] ``` -That’s 18 bytes, and is how computers ultimately store this data. If we look at +That’s 18 bytes and is how computers ultimately store this data. If we look at them as Unicode scalar values, which are what Rust’s `char` type is, those bytes look like this: @@ -546,10 +630,10 @@ bytes look like this: ['न', 'म', 'स', '्', 'त', 'े'] ``` -There are six `char` values here, but the fourth and sixth are not letters, +There are six `char` values here, but the fourth and sixth are not letters: they’re diacritics that don’t make sense on their own. Finally, if we look at them as grapheme clusters, we’d get what a person would call the four letters -that make up this word: +that make up the Hindi word: ```text ["न", "म", "स्", "ते"] @@ -559,19 +643,21 @@ Rust provides different ways of interpreting the raw string data that computers store so that each program can choose the interpretation it needs, no matter what human language the data is in. -A final reason Rust does not allow you to index into a `String` to get a +A final reason Rust doesn’t allow us to index into a `String` to get a character is that indexing operations are expected to always take constant time -(O(1)). It isn’t possible to guarantee that performance with a `String`, -though, since Rust would have to walk through the contents from the beginning -to the index to determine how many valid characters there were. +(O(1)). But it isn’t possible to guarantee that performance with a `String`, +because Rust would have to walk through the contents from the beginning to the +index to determine how many valid characters there were. ### Slicing Strings -Because it’s not clear what the return type of string indexing should be, and -it is often a bad idea to index into a string, Rust dissuades you from doing so -by asking you to be more specific if you really need it. The way you can be -more specific than indexing using `[]` with a single number is using `[]` with -a range to create a string slice containing particular bytes: +Indexing into a string is often a bad idea because it’s not clear what the +return type of the string indexing operation should be: a byte value, a +character, a grapheme cluster, or a string slice. Therefore, Rust asks you to +be more specific if you really need to use indices to create string slices. To +be more specific in your indexing and indicate that you want a string slice, +rather than indexing using `[]` with a single number, you can use `[]` with a +range to create a string slice containing particular bytes: ```rust let hello = "Здравствуйте"; @@ -580,27 +666,28 @@ let s = &hello[0..4]; ``` Here, `s` will be a `&str` that contains the first four bytes of the string. -Earlier, we mentioned that each of these characters was two bytes, so that -means that `s` will be “Зд”. +Earlier, we mentioned that each of these characters was two bytes, which means +`s` will be `Зд`. -What would happen if we did `&hello[0..1]`? The answer: it will panic at -runtime, in the same way that accessing an invalid index in a vector does: +What would happen if we used `&hello[0..1]`? The answer: Rust will panic at +runtime in the same way that accessing an invalid index in a vector does: ```text thread 'main' panicked at 'index 0 and/or 1 in `Здравствуйте` do not lie on character boundary', ../src/libcore/str/mod.rs:1694 ``` -You should use this with caution, since it can cause your program to crash. +You should use ranges to create string slices with caution, because it can +crash your program. ### Methods for Iterating Over Strings -Luckily, there are other ways we can access elements in a String. +Fortunately, we can access elements in a string in other ways. If we need to perform operations on individual Unicode scalar values, the best -way to do so is to use the `chars` method. Calling `chars` on “नमस्ते” -separates out and returns six values of type `char`, and you can iterate over -the result in order to access each element: +way to do so is to use the `chars` method. Calling `chars` on “नमस्ते” separates +out and returns six values of type `char`, and you can iterate over the result +in order to access each element: ```rust for c in "नमस्ते".chars() { @@ -608,7 +695,7 @@ for c in "नमस्ते".chars() { } ``` -This code will print: +This code will print the following: ```text न @@ -638,25 +725,25 @@ This code will print the 18 bytes that make up this `String`, starting with: // ... etc ``` -But make sure to remember that valid Unicode scalar values may be made up of -more than one byte. +But be sure to remember that valid Unicode scalar values may be made up of more +than one byte. Getting grapheme clusters from strings is complex, so this functionality is not -provided by the standard library. There are crates available on crates.io if -this is the functionality you need. +provided by the standard library. Crates are available on *https://crates.io* +if this is the functionality you need. -### Strings are Not so Simple +### Strings Are Not So Simple To summarize, strings are complicated. Different programming languages make different choices about how to present this complexity to the programmer. Rust has chosen to make the correct handling of `String` data the default behavior -for all Rust programs, which does mean programmers have to put more thought -into handling UTF-8 data upfront. This tradeoff exposes more of the complexity -of strings than other programming languages do, but this will prevent you from -having to handle errors involving non-ASCII characters later in your -development lifecycle. +for all Rust programs, which means programmers have to put more thought into +handling UTF-8 data upfront. This trade-off exposes more of the complexity of +strings than other programming languages do but prevents you from having to +handle errors involving non-ASCII characters later in your development life +cycle. -Let’s switch to something a bit less complex: hash map! +Let’s switch to something a bit less complex: hash maps! ## Hash Maps @@ -664,26 +751,25 @@ The last of our common collections is the *hash map*. The type `HashMap` stores a mapping of keys of type `K` to values of type `V`. It does this via a *hashing function*, which determines how it places these keys and values into memory. Many different programming languages support this kind of data -structure, but often with a different name: hash, map, object, hash table, or -associative array, just to name a few. +structure, but often use a different name, such as hash, map, object, hash +table, or associative array, just to name a few. -Hash maps are useful for when you want to be able to look up data not by an -index, as you can with vectors, but by using a key that can be of any type. For -example, in a game, you could keep track of each team’s score in a hash map -where each key is a team’s name and the values are each team’s score. Given a -team name, you can retrieve their score. +Hash maps are useful for when you want to look up data not by an index, as you +can with vectors, but by using a key that can be of any type. For example, in a +game, you could keep track of each team’s score in a hash map where each key is +a team’s name and the values are each team’s score. Given a team name, you can +retrieve its score. -We’ll go over the basic API of hash maps in this chapter, but there are many -more goodies hiding in the functions defined on `HashMap` by the standard -library. As always, check the standard library documentation for more -information. +We’ll go over the basic API of hash maps in this section, but many more goodies +are hiding in the functions defined on `HashMap` by the standard library. +As always, check the standard library documentation for more information. ### Creating a New Hash Map -We can create an empty `HashMap` with `new`, and add elements with `insert`. -Here we’re keeping track of the scores of two teams whose names are Blue and -Yellow. The Blue team will start with 10 points and the Yellow team starts with -50: +We can create an empty hash map with `new` and add elements with `insert`. In +Listing 8-18, we’re keeping track of the scores of two teams whose names are +Blue and Yellow. The Blue team will start with 10 points, and the Yellow team +starts with 50: ```rust use std::collections::HashMap; @@ -694,6 +780,8 @@ scores.insert(String::from("Blue"), 10); scores.insert(String::from("Yellow"), 50); ``` +Listing 8-18: Creating a new hash map and inserting some keys and values + Note that we need to first `use` the `HashMap` from the collections portion of the standard library. Of our three common collections, this one is the least often used, so it’s not included in the features imported automatically in the @@ -707,11 +795,11 @@ must have the same type. Another way of constructing a hash map is by using the `collect` method on a vector of tuples, where each tuple consists of a key and its value. The -`collect` method gathers up data into a number of collection types, including +`collect` method gathers data into a number of collection types, including `HashMap`. For example, if we had the team names and initial scores in two separate vectors, we can use the `zip` method to create a vector of tuples where “Blue” is paired with 10, and so forth. Then we can use the `collect` -method to turn that vector of tuples into a `HashMap`: +method to turn that vector of tuples into a `HashMap` as shown in Listing 8-19: ```rust use std::collections::HashMap; @@ -722,17 +810,19 @@ let initial_scores = vec![10, 50]; let scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect(); ``` +Listing 8-19: Creating a hash map from a list of teams and a list of scores + The type annotation `HashMap<_, _>` is needed here because it’s possible to `collect` into many different data structures, and Rust doesn’t know which you want unless you specify. For the type parameters for the key and value types, -however, we use underscores and Rust can infer the types that the hash map -contains based on the types of the data in the vector. +however, we use underscores, and Rust can infer the types that the hash map +contains based on the types of the data in the vectors. ### Hash Maps and Ownership For types that implement the `Copy` trait, like `i32`, the values are copied into the hash map. For owned values like `String`, the values will be moved and -the hash map will be the owner of those values: +the hash map will be the owner of those values as demonstrated in Listing 8-20: ```rust use std::collections::HashMap; @@ -742,20 +832,25 @@ let field_value = String::from("Blue"); let mut map = HashMap::new(); map.insert(field_name, field_value); -// field_name and field_value are invalid at this point +// field_name and field_value are invalid at this point, try using them and +// see what compiler error you get! ``` -We would not be able to use the bindings `field_name` and `field_value` after -they have been moved into the hash map with the call to `insert`. +Listing 8-20: Showing that keys and values are owned by the hash map once +they’re inserted + +We aren’t able to use the variables `field_name` and `field_value` after +they’ve been moved into the hash map with the call to `insert`. -If we insert references to values into the hash map, the values themselves will -not be moved into the hash map. The values that the references point to must be -valid for at least as long as the hash map is valid, though. We will talk more -about these issues in the Lifetimes section of Chapter 10. +If we insert references to values into the hash map, the values won’t be moved +into the hash map. The values that the references point to must be valid for at +least as long as the hash map is valid. We’ll talk more about these issues in +the “Validating References with Lifetimes” section in Chapter 10. ### Accessing Values in a Hash Map -We can get a value out of the hash map by providing its key to the `get` method: +We can get a value out of the hash map by providing its key to the `get` method +as shown in Listing 8-21: ```rust use std::collections::HashMap; @@ -769,11 +864,13 @@ let team_name = String::from("Blue"); let score = scores.get(&team_name); ``` +Listing 8-21: Accessing the score for the Blue team stored in the hash map + Here, `score` will have the value that’s associated with the Blue team, and the -result will be `Some(10)`. The result is wrapped in `Some` because `get` -returns an `Option`; if there’s no value for that key in the hash map, `get` -will return `None`. The program will need to handle the `Option` in one of the -ways that we covered in Chapter 6. +result will be `Some(&10)`. The result is wrapped in `Some` because `get` +returns an `Option<&V>`; if there’s no value for that key in the hash map, +`get` will return `None`. The program will need to handle the `Option` in one +of the ways that we covered in Chapter 6. We can iterate over each key/value pair in a hash map in a similar manner as we do with vectors, using a `for` loop: @@ -791,7 +888,7 @@ for (key, value) in &scores { } ``` -This will print each pair, in an arbitrary order: +This code will print each pair in an arbitrary order: ```text Yellow: 50 @@ -800,22 +897,22 @@ Blue: 10 ### Updating a Hash Map -While the number of keys and values is growable, each individual key can only -have one value associated with it at a time. When we want to change the data in -a hash map, we have to decide how to handle the case when a key already has a -value assigned. We could choose to replace the old value with the new value, -completely disregarding the old value. We could choose to keep the old value -and ignore the new value, and only add the new value if the key *doesn’t* -already have a value. Or we could combine the old value and the new value. -Let’s look at how to do each of these! +Although the number of keys and values is growable, each key can only have one +value associated with it at a time. When we want to change the data in a hash +map, we have to decide how to handle the case when a key already has a value +assigned. We could replace the old value with the new value, completely +disregarding the old value. We could keep the old value and ignore the new +value, and only add the new value if the key *doesn’t* already have a value. Or +we could combine the old value and the new value. Let’s look at how to do each +of these! #### Overwriting a Value -If we insert a key and a value into a hash map, then insert that same key with -a different value, the value associated with that key will be replaced. Even -though this following code calls `insert` twice, the hash map will only contain -one key/value pair because we’re inserting the value for the Blue team’s key -both times: +If we insert a key and a value into a hash map, and then insert that same key +with a different value, the value associated with that key will be replaced. +Even though the code in Listing 8-22 calls `insert` twice, the hash map will +only contain one key/value pair because we’re inserting the value for the Blue +team’s key both times: ```rust use std::collections::HashMap; @@ -828,18 +925,21 @@ scores.insert(String::from("Blue"), 25); println!("{:?}", scores); ``` -This will print `{"Blue": 25}`. The original value of 10 has been overwritten. +Listing 8-22: Replacing a value stored with a particular key + +This code will print `{"Blue": 25}`. The original value of `10` has been +overwritten. #### Only Insert If the Key Has No Value -It’s common to want to check if a particular key has a value and, if it does -not, insert a value for it. Hash maps have a special API for this, called -`entry`, that takes the key we want to check as an argument. The return value -of the `entry` function is an enum, `Entry`, that represents a value that might -or might not exist. Let’s say that we want to check if the key for the Yellow +It’s common to check whether a particular key has a value, and if it doesn’t, +insert a value for it. Hash maps have a special API for this called `entry` +that takes the key we want to check as a parameter. The return value of the +`entry` function is an enum called `Entry` that represents a value that might +or might not exist. Let’s say we want to check whether the key for the Yellow team has a value associated with it. If it doesn’t, we want to insert the value -50, and the same for the Blue team. With the entry API, the code for this looks -like: +50, and the same for the Blue team. Using the `entry` API, the code looks like +Listing 8-23: ```rust use std::collections::HashMap; @@ -853,23 +953,29 @@ scores.entry(String::from("Blue")).or_insert(50); println!("{:?}", scores); ``` -The `or_insert` method on `Entry` returns the value for the corresponding -`Entry` key if it exists, and if not, inserts its argument as the new value for -this key and returns the modified `Entry`. This is much cleaner than writing -the logic ourselves, and in addition, plays more nicely with the borrow checker. +Listing 8-23: Using the `entry` method to only insert if the key does not +already have a value + +The `or_insert` method on `Entry` is defined to return the value for the +corresponding `Entry` key if that key exists, and if not, inserts the parameter +as the new value for this key and returns the modified `Entry`. This technique +is much cleaner than writing the logic ourselves, and in addition, plays more +nicely with the borrow checker. -This code will print `{"Yellow": 50, "Blue": 10}`. The first call to `entry` -will insert the key for the Yellow team with the value 50, since the Yellow -team doesn’t have a value already. The second call to `entry` will not change -the hash map since the Blue team already has the value 10. +Running the code in Listing 8-23 will print `{"Yellow": 50, "Blue": 10}`. The +first call to `entry` will insert the key for the Yellow team with the value +`50` because the Yellow team doesn’t have a value already. The second call to +`entry` will not change the hash map because the Blue team already has the +value `10`. -#### Update a Value Based on the Old Value +#### Updating a Value Based on the Old Value -Another common use case for hash maps is to look up a key’s value then update -it, based on the old value. For instance, if we wanted to count how many times -each word appeared in some text, we could use a hash map with the words as keys -and increment the value to keep track of how many times we’ve seen that word. -If this is the first time we’ve seen a word, we’ll first insert the value `0`. +Another common use case for hash maps is to look up a key’s value and then +update it based on the old value. For instance, Listing 8-24 shows code that +counts how many times each word appears in some text. We use a hash map with +the words as keys and increment the value to keep track of how many times we’ve +seen that word. If it’s the first time we’ve seen a word, we’ll first insert +the value `0`: ```rust use std::collections::HashMap; @@ -886,47 +992,50 @@ for word in text.split_whitespace() { println!("{:?}", map); ``` -This will print `{"world": 2, "hello": 1, "wonderful": 1}`. The `or_insert` -method actually returns a mutable reference (`&mut V`) to the value for this -key. Here we store that mutable reference in the `count` variable, so in order -to assign to that value we must first dereference `count` using the asterisk -(`*`). The mutable reference goes out of scope at the end of the `for` loop, so -all of these changes are safe and allowed by the borrowing rules. +Listing 8-24: Counting occurrences of words using a hash map that stores words +and counts + +This code will print `{"world": 2, "hello": 1, "wonderful": 1}`. The +`or_insert` method actually returns a mutable reference (`&mut V`) to the value +for this key. Here we store that mutable reference in the `count` variable, so +in order to assign to that value we must first dereference `count` using the +asterisk (`*`). The mutable reference goes out of scope at the end of the `for` +loop, so all of these changes are safe and allowed by the borrowing rules. ### Hashing Function By default, `HashMap` uses a cryptographically secure hashing function that can provide resistance to Denial of Service (DoS) attacks. This is not the fastest -hashing algorithm out there, but the tradeoff for better security that comes +hashing algorithm available, but the trade-off for better security that comes with the drop in performance is worth it. If you profile your code and find that the default hash function is too slow for your purposes, you can switch to another function by specifying a different *hasher*. A hasher is a type that -implements the `BuildHasher` trait. We’ll be talking about traits and how to +implements the `BuildHasher` trait. We’ll talk about traits and how to implement them in Chapter 10. You don’t necessarily have to implement your own -hasher from scratch; crates.io has libraries that others have shared that -provide hashers implementing many common hashing algorithms. +hasher from scratch; *https://crates.io* has libraries shared by other Rust +users that provide hashers implementing many common hashing algorithms. ## Summary -Vectors, strings, and hash maps will take you far in programs where you need to -store, access, and modify data. Here are some exercises you should now be -equipped to solve: +Vectors, strings, and hash maps will provide a large amount of functionality +that you need in programs where you need to store, access, and modify data. +Here are some exercises you should now be equipped to solve: * Given a list of integers, use a vector and return the mean (average), median (when sorted, the value in the middle position), and mode (the value that occurs most often; a hash map will be helpful here) of the list. -* Convert strings to Pig Latin, where the first consonant of each word is moved - to the end of the word with an added “ay”, so “first” becomes “irst-fay”. - Words that start with a vowel get “hay” added to the end instead (“apple” - becomes “apple-hay”). Remember about UTF-8 encoding! +* Convert strings to pig latin. The first consonant of each word is moved to + the end of the word and “ay” is added, so “first” becomes “irst-fay.” Words + that start with a vowel have “hay” added to the end instead (“apple” becomes + “apple-hay”). Keep in mind the details about UTF-8 encoding! * Using a hash map and vectors, create a text interface to allow a user to add - employee names to a department in the company. For example, “Add Sally to - Engineering” or “Add Amir to Sales”. Then let the user retrieve a list of all + employee names to a department in a company. For example, “Add Sally to + Engineering” or “Add Amir to Sales.” Then let the user retrieve a list of all people in a department or all people in the company by department, sorted alphabetically. -The standard library API documentation describes methods these types have that -will be helpful for these exercises! +The standard library API documentation describes methods that vectors, strings, +and hash maps have that will be helpful for these exercises! -We’re getting into more complex programs where operations can fail, which means -it’s a perfect time to go over error handling next! +We’re getting into more complex programs in which operations can fail; so, it’s +a perfect time to discuss error handling next! diff --git a/src/doc/book/second-edition/nostarch/chapter09.md b/src/doc/book/second-edition/nostarch/chapter09.md index f7921e1c65..1cc8c7d926 100644 --- a/src/doc/book/second-edition/nostarch/chapter09.md +++ b/src/doc/book/second-edition/nostarch/chapter09.md @@ -5,95 +5,100 @@ Rust’s commitment to reliability extends to error handling. Errors are a fact of life in software, so Rust has a number of features for handling situations -in which something goes wrong. In many cases, Rust will require you to -acknowledge the possibility of an error occurring and take some action before -your code will compile. This makes your program more robust by ensuring that -you won’t only discover errors after you’ve deployed your code to production. +in which something goes wrong. In many cases, Rust requires you to acknowledge +the possibility of an error occurring and take some action before your code +will compile. This requirement makes your program more robust by ensuring that +you’ll discover errors and handle them appropriately before you’ve deployed +your code to production! Rust groups errors into two major categories: *recoverable* and *unrecoverable* -errors. Recoverable errors are situations when it’s usually reasonable to -report the problem to the user and retry the operation, like a file not being -found. Unrecoverable errors are always symptoms of bugs, like trying to access -a location beyond the end of an array. +errors. Recoverable errors are situations in which it’s reasonable to report +the problem to the user and retry the operation, like a file not found error. +Unrecoverable errors are always symptoms of bugs, like trying to access a +location beyond the end of an array. -Most languages don’t distinguish between the two kinds of errors, and handle +Most languages don’t distinguish between these two kinds of errors and handle both in the same way using mechanisms like exceptions. Rust doesn’t have exceptions. Instead, it has the value `Result` for recoverable errors and the `panic!` macro that stops execution when it encounters unrecoverable -errors. This chapter will cover calling `panic!` first, then talk about -returning `Result` values. Finally, we’ll discuss considerations to take -into account when deciding whether to try to recover from an error or to stop -execution. +errors. This chapter covers calling `panic!` first and then talks about +returning `Result` values. Additionally, we’ll explore considerations to +take into account when deciding whether to try to recover from an error or to +stop execution. ## Unrecoverable Errors with `panic!` -Sometimes, bad things happen, and there’s nothing that you can do about it. For -these cases, Rust has the `panic!` macro. When this macro executes, your -program will print a failure message, unwind and clean up the stack, and then -quit. The most common situation this occurs in is when a bug of some kind has -been detected and it’s not clear to the programmer how to handle the error. +Sometimes, bad things happen in your code, and there’s nothing you can do about +it. In these cases, Rust has the `panic!` macro. When the `panic!` macro +executes, your program will print a failure message, unwind and clean up the +stack, and then quit. The most common situation this occurs in is when a bug of +some kind has been detected, and it’s not clear to the programmer how to handle +the error. -> ### Unwinding the Stack Versus Aborting on Panic +> ### Unwinding the Stack or Aborting in Response to a `panic!` > -> By default, when a `panic!` occurs, the program starts -> *unwinding*, which means Rust walks back up the stack and cleans up the data -> from each function it encounters, but this walking and cleanup is a lot of -> work. The alternative is to immediately *abort*, which ends the program -> without cleaning up. Memory that the program was using will then need to be -> cleaned up by the operating system. If in your project you need to make the -> resulting binary as small as possible, you can switch from unwinding to -> aborting on panic by adding `panic = 'abort'` to the appropriate `[profile]` -> sections in your *Cargo.toml*. For example, if you want to abort on panic in -> release mode: +> By default, when a `panic!` occurs, the program starts *unwinding*, which +> means Rust walks back up the stack and cleans up the data from each function +> it encounters. But this walking back and cleanup is a lot of work. The +> alternative is to immediately *abort*, which ends the program without +> cleaning up. Memory that the program was using will then need to be cleaned +> up by the operating system. If in your project you need to make the resulting +> binary as small as possible, you can switch from unwinding to aborting on +> panic by adding `panic = 'abort'` to the appropriate `[profile]` sections in +> your *Cargo.toml* file. For example, if you want to abort on panic in release +> mode, add this: > -> ```toml +> ``` > [profile.release] > panic = 'abort' > ``` -Let’s try calling `panic!` with a simple program: +Let’s try calling `panic!` in a simple program: -Filename: src/main.rs +Filename: src/main.rs -```rust,should_panic +``` fn main() { panic!("crash and burn"); } ``` -If you run it, you’ll see something like this: +When you run the program, you’ll see something like this: -```text +``` $ cargo run Compiling panic v0.1.0 (file:///projects/panic) - Finished debug [unoptimized + debuginfo] target(s) in 0.25 secs + Finished dev [unoptimized + debuginfo] target(s) in 0.25 secs Running `target/debug/panic` thread 'main' panicked at 'crash and burn', src/main.rs:2 note: Run with `RUST_BACKTRACE=1` for a backtrace. error: Process didn't exit successfully: `target/debug/panic` (exit code: 101) ``` -The last three lines contain the error message caused by the call to `panic!`. -The first line shows our panic message and the place in our source code where -the panic occurred: *src/main.rs:2* indicates that it’s the second line of our -*src/main.rs* file. +The call to `panic!` causes the error message contained in the last three +lines. The first line shows our panic message and the place in our source code +where the panic occurred: *src/main.rs:2* indicates that it’s the second line +of our *src/main.rs* file. -In this case, the line indicated is part of our code, and if we go to that line -we see the `panic!` macro call. In other cases, the `panic!` call might be in -code that our code calls. The filename and line number reported by the error -message will be someone else’s code where the `panic!` macro is called, not the -line of our code that eventually led to the `panic!`. We can use the backtrace -of the functions the `panic!` call came from to figure this out. +In this case, the line indicated is part of our code, and if we go to that +line, we see the `panic!` macro call. In other cases, the `panic!` call might +be in code that our code calls. The filename and line number reported by the +error message will be someone else’s code where the `panic!` macro is called, +not the line of our code that eventually led to the `panic!` call. We can use +the backtrace of the functions the `panic!` call came from to figure out the +part of our code that is causing the problem. We’ll discuss what a backtrace is +in more detail next. ### Using a `panic!` Backtrace Let’s look at another example to see what it’s like when a `panic!` call comes from a library because of a bug in our code instead of from our code calling -the macro directly: +the macro directly. Listing 9-1 has some code that attempts to access an +element by index in a vector: -Filename: src/main.rs +Filename: src/main.rs -```rust,should_panic +``` fn main() { let v = vec![1, 2, 3]; @@ -101,104 +106,126 @@ fn main() { } ``` -We’re attempting to access the hundredth element of our vector, but it only has -three elements. In this situation, Rust will panic. Using `[]` is supposed to -return an element, but if you pass an invalid index, there’s no element that -Rust could return here that would be correct. +Listing 9-1: Attempting to access an element beyond the end of a vector, which +will cause a `panic!` -Other languages like C will attempt to give you exactly what you asked for in +Here, we’re attempting to access the hundredth element of our vector, but it +has only three elements. In this situation, Rust will panic. Using `[]` is +supposed to return an element, but if you pass an invalid index, there’s no +element that Rust could return here that would be correct. + +Other languages, like C, will attempt to give you exactly what you asked for in this situation, even though it isn’t what you want: you’ll get whatever is at the location in memory that would correspond to that element in the vector, even though the memory doesn’t belong to the vector. This is called a *buffer -overread*, and can lead to security vulnerabilities if an attacker can +overread* and can lead to security vulnerabilities if an attacker is able to manipulate the index in such a way as to read data they shouldn’t be allowed to that is stored after the array. -In order to protect your program from this sort of vulnerability, if you try to -read an element at an index that doesn’t exist, Rust will stop execution and -refuse to continue. Let’s try it and see: +To protect your program from this sort of vulnerability, if you try to read an +element at an index that doesn’t exist, Rust will stop execution and refuse to +continue. Let’s try it and see: -```text +``` $ cargo run Compiling panic v0.1.0 (file:///projects/panic) - Finished debug [unoptimized + debuginfo] target(s) in 0.27 secs + Finished dev [unoptimized + debuginfo] target(s) in 0.27 secs Running `target/debug/panic` + thread 'main' panicked at 'index out of bounds: the len is 3 but the index is 100', /stable-dist-rustc/build/src/libcollections/vec.rs:1362 note: Run with `RUST_BACKTRACE=1` for a backtrace. error: Process didn't exit successfully: `target/debug/panic` (exit code: 101) ``` -This points at a file we didn’t write, *libcollections/vec.rs*. That’s the -implementation of `Vec` in the standard library. The code that gets run when -we use `[]` on our vector `v` is in *libcollections/vec.rs*, and that is where -the `panic!` is actually happening. +This error points at a file we didn’t write, *libcollections/vec.rs*. That’s +the implementation of `Vec` in the standard library. The code that gets run +when we use `[]` on our vector `v` is in *libcollections/vec.rs*, and that is +where the `panic!` is actually happening. The next note line tells us that we can set the `RUST_BACKTRACE` environment -variable to get a backtrace of exactly what happened to cause the error. Let’s -try that. Listing 9-1 shows output similar to what you’ll see: - -
+variable to get a backtrace of exactly what happened to cause the error. A +*backtrace* is a list of all the functions that have been called to get to this +point. Backtraces in Rust work like they do in other languages: the key to +reading the backtrace is to start from the top and read until you see files you +wrote. That’s the spot where the problem originated. The lines above the lines +mentioning your files are code that your code called; the lines below are code +that called your code. These lines might include core Rust code, standard +library code, or crates that you’re using. Let’s try getting a backtrace: +Listing 9-2 shows output similar to what you’ll see: -```text +``` $ RUST_BACKTRACE=1 cargo run - Finished debug [unoptimized + debuginfo] target(s) in 0.0 secs + Finished dev [unoptimized + debuginfo] target(s) in 0.0 secs Running `target/debug/panic` thread 'main' panicked at 'index out of bounds: the len is 3 but the index is -100', /stable-dist-rustc/build/src/libcollections/vec.rs:1395 +100', /stable-dist-rustc/build/src/libcollections/vec.rs:1392 stack backtrace: - 1: 0x10922522c - -std::sys::imp::backtrace::tracing::imp::write::h1204ab053b688140 - 2: 0x10922649e - -std::panicking::default_hook::{{closure}}::h1204ab053b688140 - 3: 0x109226140 - std::panicking::default_hook::h1204ab053b688140 - 4: 0x109226897 - -std::panicking::rust_panic_with_hook::h1204ab053b688140 - 5: 0x1092266f4 - std::panicking::begin_panic::h1204ab053b688140 - 6: 0x109226662 - std::panicking::begin_panic_fmt::h1204ab053b688140 - 7: 0x1092265c7 - rust_begin_unwind - 8: 0x1092486f0 - core::panicking::panic_fmt::h1204ab053b688140 - 9: 0x109248668 - -core::panicking::panic_bounds_check::h1204ab053b688140 - 10: 0x1092205b5 - as -core::ops::Index>::index::h1204ab053b688140 - 11: 0x10922066a - panic::main::h1204ab053b688140 - 12: 0x1092282ba - __rust_maybe_catch_panic - 13: 0x109226b16 - std::rt::lang_start::h1204ab053b688140 - 14: 0x1092206e9 - main -``` - -
- -Listing 9-1: The backtrace generated by a call to `panic!` displayed when the + 1: 0x560ed90ec04c - +std::sys::imp::backtrace::tracing::imp::write::hf33ae72d0baa11ed + at +/stable-dist-rustc/build/src/libstd/sys/unix/backtrace/tracing/gcc_s.rs:42 + 2: 0x560ed90ee03e - +std::panicking::default_hook::{{closure}}::h59672b733cc6a455 + at /stable-dist-rustc/build/src/libstd/panicking.rs:351 + 3: 0x560ed90edc44 - std::panicking::default_hook::h1670459d2f3f8843 + at /stable-dist-rustc/build/src/libstd/panicking.rs:367 + 4: 0x560ed90ee41b - +std::panicking::rust_panic_with_hook::hcf0ddb069e7abcd7 + at /stable-dist-rustc/build/src/libstd/panicking.rs:555 + 5: 0x560ed90ee2b4 - std::panicking::begin_panic::hd6eb68e27bdf6140 + at /stable-dist-rustc/build/src/libstd/panicking.rs:517 + 6: 0x560ed90ee1d9 - std::panicking::begin_panic_fmt::abcd5965948b877f8 + at /stable-dist-rustc/build/src/libstd/panicking.rs:501 + 7: 0x560ed90ee167 - rust_begin_unwind + at /stable-dist-rustc/build/src/libstd/panicking.rs:477 + 8: 0x560ed911401d - core::panicking::panic_fmt::hc0f6d7b2c300cdd9 + at /stable-dist-rustc/build/src/libcore/panicking.rs:69 + 9: 0x560ed9113fc8 - +core::panicking::panic_bounds_check::h02a4af86d01b3e96 + at /stable-dist-rustc/build/src/libcore/panicking.rs:56 + 10: 0x560ed90e71c5 - as +core::ops::Index>::index::h98abcd4e2a74c41 + at +/stable-dist-rustc/build/src/libcollections/vec.rs:1392 + 11: 0x560ed90e727a - panic::main::h5d6b77c20526bc35 + at /home/you/projects/panic/src/main.rs:4 + 12: 0x560ed90f5d6a - __rust_maybe_catch_panic + at +/stable-dist-rustc/build/src/libpanic_unwind/lib.rs:98 + 13: 0x560ed90ee926 - std::rt::lang_start::hd7c880a37a646e81 + at /stable-dist-rustc/build/src/libstd/panicking.rs:436 + at /stable-dist-rustc/build/src/libstd/panic.rs:361 + at /stable-dist-rustc/build/src/libstd/rt.rs:57 + 14: 0x560ed90e7302 - main + 15: 0x7f0d53f16400 - __libc_start_main + 16: 0x560ed90e6659 - _start + 17: 0x0 - +``` + +Listing 9-2: The backtrace generated by a call to `panic!` displayed when the environment variable `RUST_BACKTRACE` is set -
-
- -That’s a lot of output! Line 11 of the backtrace points to the line in our -project causing the problem: *src/main.rs*, line four. A backtrace is a list of -all the functions that have been called to get to this point. Backtraces in -Rust work like they do in other languages: the key to reading the backtrace is -to start from the top and read until you see files you wrote. That’s the spot -where the problem originated. The lines above the lines mentioning your files -are code that your code called; the lines below are code that called your code. -These lines might include core Rust code, standard library code, or crates that -you’re using. - -If we don’t want our program to panic, the location pointed to by the first -line mentioning a file we wrote is where we should start investigating in order -to figure out how we got to this location with values that caused the panic. In -our example where we deliberately wrote code that would panic in order to -demonstrate how to use backtraces, the way to fix the panic is to not try to -request an element at index 100 from a vector that only contains three items. -When your code panics in the future, you’ll need to figure out for your -particular case what action the code is taking with what values that causes the -panic and what the code should do instead. - -We’ll come back to `panic!` and when we should and should not use these methods -later in the chapter. Next, we’ll now look at how to recover from an error with -`Result`. +That’s a lot of output! The exact output you see might be different depending +on your operating system and Rust version. In order to get backtraces with this +information, debug symbols must be enabled. Debug symbols are enabled by +default when using cargo build or cargo run without the --release flag, as we +have here. + +In the output in Listing 9-2, line 11 of the backtrace points to the line in +our project that’s causing the problem: *src/main.rs* in line 4. If we don’t +want our program to panic, the location pointed to by the first line mentioning +a file we wrote is where we should start investigating to figure out how we got +to this location with values that caused the panic. In Listing 9-1 where we +deliberately wrote code that would panic in order to demonstrate how to use +backtraces, the way to fix the panic is to not request an element at index 100 +from a vector that only contains three items. When your code panics in the +future, you’ll need to figure out what action the code is taking with what +values that causes the panic and what the code should do instead. + +We’ll come back to `panic!` and when we should and should not use `panic!` to +handle error conditions later in the chapter. Next, we’ll look at how to +recover from an error using `Result`. ## Recoverable Errors with `Result` @@ -208,18 +235,18 @@ interpret and respond to. For example, if we try to open a file and that operation fails because the file doesn’t exist, we might want to create the file instead of terminating the process. -Recall from Chapter 2 the section on “Handling Potential Failure with the -`Result` Type” that the `Result` enum is defined as having two variants, `Ok` -and `Err`, as follows: +Recall in Chapter 2 in the “Handling Potential Failure with the `Result` Type” +section that the `Result` enum is defined as having two variants, `Ok` and +`Err`, as follows: -```rust +``` enum Result { Ok(T), Err(E), } ``` -The `T` and `E` are generic type parameters; we’ll go into generics in more +The `T` and `E` are generic type parameters: we’ll discuss generics in more detail in Chapter 10. What you need to know right now is that `T` represents the type of the value that will be returned in a success case within the `Ok` variant, and `E` represents the type of the error that will be returned in a @@ -229,12 +256,11 @@ library has defined on it in many different situations where the successful value and error value we want to return may differ. Let’s call a function that returns a `Result` value because the function could -fail: opening a file, shown in Listing 9-2. +fail: in Listing 9-3 we try to open a file: -
-Filename: src/main.rs +Filename: src/main.rs -```rust +``` use std::fs::File; fn main() { @@ -242,28 +268,23 @@ fn main() { } ``` -
- -Listing 9-2: Opening a file - -
-
+Listing 9-3: Opening a file How do we know `File::open` returns a `Result`? We could look at the standard library API documentation, or we could ask the compiler! If we give `f` a type -annotation of some type that we know the return type of the function is *not*, +annotation of a type that we know the return type of the function is *not* and then we try to compile the code, the compiler will tell us that the types don’t -match. The error message will then tell us what the type of `f` *is*! Let’s try +match. The error message will then tell us what the type of `f` *is*. Let’s try it: we know that the return type of `File::open` isn’t of type `u32`, so let’s -change the `let f` statement to: +change the `let f` statement to this: -```rust,ignore +``` let f: u32 = File::open("hello.txt"); ``` -Attempting to compile now gives us: +Attempting to compile now gives us the following output: -```text +``` error[E0308]: mismatched types --> src/main.rs:4:18 | @@ -282,9 +303,9 @@ error value is `std::io::Error`. This return type means the call to `File::open` might succeed and return to us a file handle that we can read from or write to. The function call also might -fail: for example, the file might not exist, or we might not have permission to +fail: for example, the file might not exist or we might not have permission to access the file. The `File::open` function needs to have a way to tell us -whether it succeeded or failed, and at the same time give us either the file +whether it succeeded or failed and at the same time give us either the file handle or error information. This information is exactly what the `Result` enum conveys. @@ -293,15 +314,14 @@ In the case where `File::open` succeeds, the value we will have in the variable it fails, the value in `f` will be an instance of `Err` that contains more information about the kind of error that happened. -We need to add to the code from Listing 9-2 to take different actions depending -on the value `File::open` returned. Listing 9-3 shows one way to handle the -`Result` with a basic tool: the `match` expression that we learned about in +We need to add to the code in Listing 9-3 to take different actions depending +on the value `File::open` returned. Listing 9-4 shows one way to handle the +`Result` using a basic tool: the `match` expression that we discussed in Chapter 6. -
-Filename: src/main.rs +Filename: src/main.rs -```rust,should_panic +``` use std::fs::File; fn main() { @@ -316,14 +336,9 @@ fn main() { } ``` -
- -Listing 9-3: Using a `match` expression to handle the `Result` variants we +Listing 9-4: Using a `match` expression to handle the `Result` variants we might have -
-
- Note that, like the `Option` enum, the `Result` enum and its variants have been imported in the prelude, so we don’t need to specify `Result::` before the `Ok` and `Err` variants in the `match` arms. @@ -338,26 +353,26 @@ The other arm of the `match` handles the case where we get an `Err` value from there’s no file named *hello.txt* in our current directory and we run this code, we’ll see the following output from the `panic!` macro: -```text +``` thread 'main' panicked at 'There was a problem opening the file: Error { repr: Os { code: 2, message: "No such file or directory" } }', src/main.rs:8 ``` +As usual, this output tells us exactly what has gone wrong. + ### Matching on Different Errors -The code in Listing 9-3 will `panic!` no matter the reason that `File::open` -failed. What we’d really like to do instead is take different actions for -different failure reasons: if `File::open` failed because the file doesn’t -exist, we want to create the file and return the handle to the new file. If -`File::open` failed for any other reason, for example because we didn’t have -permission to open the file, we still want to `panic!` in the same way as we -did in Listing 9-3. Let’s look at Listing 9-4, which adds another arm to the -`match`: +The code in Listing 9-4 will `panic!` no matter the reason that `File::open` +failed. What we want to do instead is take different actions for different +failure reasons: if `File::open` failed because the file doesn’t exist, we want +to create the file and return the handle to the new file. If `File::open` +failed for any other reason, for example because we didn’t have permission to +open the file, we still want the code to `panic!` in the same way as it did in +Listing 9-4. Look at Listing 9-5, which adds another arm to the `match`: -
-Filename: src/main.rs +Filename: src/main.rs -```rust,ignore +``` use std::fs::File; use std::io::ErrorKind; @@ -387,50 +402,47 @@ fn main() { } ``` -
- -Listing 9-4: Handling different kinds of errors in different ways - -
-
+Listing 9-5: Handling different kinds of errors in different ways The type of the value that `File::open` returns inside the `Err` variant is `io::Error`, which is a struct provided by the standard library. This struct has a method `kind` that we can call to get an `io::ErrorKind` value. `io::ErrorKind` is an enum provided by the standard library that has variants representing the different kinds of errors that might result from an `io` -operation. The variant we’re interested in is `ErrorKind::NotFound`, which -indicates the file we’re trying to open doesn’t exist yet. +operation. The variant we want to use is `ErrorKind::NotFound`, which indicates +the file we’re trying to open doesn’t exist yet. The condition `if error.kind() == ErrorKind::NotFound` is called a *match guard*: it’s an extra condition on a `match` arm that further refines the arm’s -pattern. This condition must be true in order for that arm’s code to get run; -otherwise, the pattern matching will move on to consider the next arm in the -`match`. The `ref` in the pattern is needed so that `error` is not moved into -the guard condition but is merely referenced by it. The reason `ref` is used to -take a reference in a pattern instead of `&` will be covered in detail in -Chapter 18. In short, in the context of a pattern, `&` matches a reference and -gives us its value, but `ref` matches a value and gives us a reference to it. +pattern. This condition must be true for that arm’s code to be run; otherwise, +the pattern matching will move on to consider the next arm in the `match`. The +`ref` in the pattern is needed so `error` is not moved into the guard condition +but is merely referenced by it. The reason `ref` is used to take a reference in +a pattern instead of `&` will be covered in detail in Chapter 18. In short, in +the context of a pattern, `&` matches a reference and gives us its value, but +`ref` matches a value and gives us a reference to it. The condition we want to check in the match guard is whether the value returned by `error.kind()` is the `NotFound` variant of the `ErrorKind` enum. If it is, -we try to create the file with `File::create`. However, since `File::create` -could also fail, we need to add an inner `match` statement as well! When the +we try to create the file with `File::create`. However, because `File::create` +could also fail, we need to add an inner `match` statement as well. When the file can’t be opened, a different error message will be printed. The last arm -of the outer `match` stays the same so that the program panics on any error -besides the missing file error. +of the outer `match` stays the same so the program panics on any error besides +the missing file error. ### Shortcuts for Panic on Error: `unwrap` and `expect` Using `match` works well enough, but it can be a bit verbose and doesn’t always communicate intent well. The `Result` type has many helper methods -defined on it to do various things. One of those methods, called `unwrap`, is a +defined on it to do various tasks. One of those methods, called `unwrap`, is a shortcut method that is implemented just like the `match` statement we wrote in -Listing 9-3. If the `Result` value is the `Ok` variant, `unwrap` will return +Listing 9-4. If the `Result` value is the `Ok` variant, `unwrap` will return the value inside the `Ok`. If the `Result` is the `Err` variant, `unwrap` will -call the `panic!` macro for us. +call the `panic!` macro for us. Here is an example of `unwrap` in action: + +Filename: src/main.rs -```rust,should_panic +``` use std::fs::File; fn main() { @@ -441,18 +453,20 @@ fn main() { If we run this code without a *hello.txt* file, we’ll see an error message from the `panic!` call that the `unwrap` method makes: -```text +``` thread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: Error { repr: Os { code: 2, message: "No such file or directory" } }', /stable-dist-rustc/build/src/libcore/result.rs:868 ``` -There’s another method similar to `unwrap` that lets us also choose the -`panic!` error message: `expect`. Using `expect` instead of `unwrap` and -providing good error messages can convey your intent and make tracking down the -source of a panic easier. The syntax of `expect` looks like this: +Another method, `expect`, which is similar to `unwrap`, lets us also choose the +`panic!` error message. Using `expect` instead of `unwrap` and providing good +error messages can convey your intent and make tracking down the source of a +panic easier. The syntax of `expect` looks like this: + +Filename: src/main.rs -```rust,should_panic +``` use std::fs::File; fn main() { @@ -461,32 +475,38 @@ fn main() { ``` We use `expect` in the same way as `unwrap`: to return the file handle or call -the `panic!` macro. The error message that `expect` uses in its call to -`panic!` will be the parameter that we pass to `expect` instead of the default +the `panic!` macro. The error message used by `expect` in its call to `panic!` +will be the parameter that we pass to `expect`, rather than the default `panic!` message that `unwrap` uses. Here’s what it looks like: -```text +``` thread 'main' panicked at 'Failed to open hello.txt: Error { repr: Os { code: 2, message: "No such file or directory" } }', /stable-dist-rustc/build/src/libcore/result.rs:868 ``` +Because this error message starts with the text we specified, `Failed to open +hello.txt`, it will be easier to find where in the code this error message is +coming from. If we use `unwrap` in multiple places, it can take more time to +figure out exactly which `unwrap` is causing the panic because all `unwrap` +calls that panic print the same message. + ### Propagating Errors -When writing a function whose implementation calls something that might fail, -instead of handling the error within this function, you can choose to let your -caller know about the error so they can decide what to do. This is known as -*propagating* the error, and gives more control to the calling code where there +When you’re writing a function whose implementation calls something that might +fail, instead of handling the error within this function, you can return the +error to the calling code so that it can decide what to do. This is known as +*propagating* the error and gives more control to the calling code where there might be more information or logic that dictates how the error should be handled than what you have available in the context of your code. -For example, Listing 9-5 shows a function that reads a username from a file. If +For example, Listing 9-6 shows a function that reads a username from a file. If the file doesn’t exist or can’t be read, this function will return those errors to the code that called this function: -
+Filename: src/main.rs -```rust +``` use std::io; use std::io::Read; use std::fs::File; @@ -508,67 +528,63 @@ fn read_username_from_file() -> Result { } ``` -
- -Listing 9-5: A function that returns errors to the calling code using `match` - -
-
+Listing 9-6: A function that returns errors to the calling code using `match` Let’s look at the return type of the function first: `Result`. This means that the function is returning a value of the type +io::Error>`. This means the function is returning a value of the type `Result` where the generic parameter `T` has been filled in with the concrete type `String`, and the generic type `E` has been filled in with the concrete type `io::Error`. If this function succeeds without any problems, the -caller of this function will receive an `Ok` value that holds a `String` — the -username that this function read from the file. If this function encounters any -problems, the caller of this function will receive an `Err` value that holds an -instance of `io::Error` that contains more information about what the problems -were. We chose `io::Error` as the return type of this function because that -happens to be the type of the error value returned from both of the operations -we’re calling in this function’s body that might fail: the `File::open` -function and the `read_to_string` method. +code that calls this function will receive an `Ok` value that holds a +`String`—the username that this function read from the file. If this function +encounters any problems, the code that calls this function will receive an +`Err` value that holds an instance of `io::Error` that contains more +information about what the problems were. We chose `io::Error` as the return +type of this function because that happens to be the type of the error value +returned from both of the operations we’re calling in this function’s body that +might fail: the `File::open` function and the `read_to_string` method. The body of the function starts by calling the `File::open` function. Then we handle the `Result` value returned with a `match` similar to the `match` in -Listing 9-3, only instead of calling `panic!` in the `Err` case, we return +Listing 9-4, only instead of calling `panic!` in the `Err` case, we return early from this function and pass the error value from `File::open` back to the -caller as this function’s error value. If `File::open` succeeds, we store the -file handle in the variable `f` and continue. +calling code as this function’s error value. If `File::open` succeeds, we store +the file handle in the variable `f` and continue. Then we create a new `String` in variable `s` and call the `read_to_string` -method on the file handle in `f` in order to read the contents of the file into -`s`. The `read_to_string` method also returns a `Result` because it might fail, -even though `File::open` succeeded. So we need another `match` to handle that +method on the file handle in `f` to read the contents of the file into `s`. The +`read_to_string` method also returns a `Result` because it might fail, even +though `File::open` succeeded. So we need another `match` to handle that `Result`: if `read_to_string` succeeds, then our function has succeeded, and we return the username from the file that’s now in `s` wrapped in an `Ok`. If `read_to_string` fails, we return the error value in the same way that we returned the error value in the `match` that handled the return value of -`File::open`. We don’t need to explicitly say `return`, however, since this is -the last expression in the function. +`File::open`. However, we don’t need to explicitly say `return`, because this +is the last expression in the function. The code that calls this code will then handle getting either an `Ok` value that contains a username or an `Err` value that contains an `io::Error`. We -don’t know what the caller will do with those values. If they get an `Err` -value, they could choose to call `panic!` and crash their program, use a +don’t know what the calling code will do with those values. If the calling code +gets an `Err` value, it could call `panic!` and crash the program, use a default username, or look up the username from somewhere other than a file, for -example. We don’t have enough information on what the caller is actually trying -to do, so we propagate all the success or error information upwards for them to -handle as they see fit. +example. We don’t have enough information on what the calling code is actually +trying to do, so we propagate all the success or error information upwards for +it to handle appropriately. -This pattern of propagating errors is so common in Rust that there is dedicated -syntax to make this easier: `?`. +This pattern of propagating errors is so common in Rust that Rust provides the +question mark operator `?` to make this easier. -### A Shortcut for Propagating Errors: `?` +#### A Shortcut for Propagating Errors: `?` -Listing 9-6 shows an implementation of `read_username_from_file` that has the -same functionality as it had in Listing 9-5, but this implementation uses the +Listing 9-7 shows an implementation of `read_username_from_file` that has the +same functionality as it had in Listing 9-6, but this implementation uses the question mark operator: -
+Filename: src/main.rs -```rust +``` use std::io; +use std::io::Read; use std::fs::File; fn read_username_from_file() -> Result { @@ -579,32 +595,43 @@ fn read_username_from_file() -> Result { } ``` -
- -Listing 9-6: A function that returns errors to the calling code using `?` +Listing 9-7: A function that returns errors to the calling code using `?` -
-
- -The `?` placed after a `Result` value is defined to work the exact same way as -the `match` expressions we defined to handle the `Result` values in Listing -9-5. If the value of the `Result` is an `Ok`, the value inside the `Ok` will +The `?` placed after a `Result` value is defined to work in almost the same way +as the `match` expressions we defined to handle the `Result` values in Listing +9-6. If the value of the `Result` is an `Ok`, the value inside the `Ok` will get returned from this expression and the program will continue. If the value is an `Err`, the value inside the `Err` will be returned from the whole -function as if we had used the `return` keyword so that the error value gets -propagated to the caller. - -In the context of Listing 9-6, the `?` at the end of the `File::open` call will +function as if we had used the `return` keyword so the error value gets +propagated to the calling code. + +The one difference between the `match` expression from Listing 9-6 and what the +question mark operator does is that when using the question mark operator, +error values go through the `from` function defined in the `From` trait in the +standard library. Many error types implement the `from` function to convert an +error of one type into an error of another type. When used by the question mark +operator, the call to the `from` function converts the error type that the +question mark operator gets into the error type defined in the return type of +the current function that we’re using `?` in. This is useful when parts of a +function might fail for many different reasons, but the function returns one +error type that represents all the ways the function might fail. As long as +each error type implements the `from` function to define how to convert itself +to the returned error type, the question mark operator takes care of the +conversion automatically. + +In the context of Listing 9-7, the `?` at the end of the `File::open` call will return the value inside an `Ok` to the variable `f`. If an error occurs, `?` -will return early out of the whole function and give any `Err` value to our -caller. The same thing applies to the `?` at the end of the `read_to_string` -call. +will return early out of the whole function and give any `Err` value to the +calling code. The same thing applies to the `?` at the end of the +`read_to_string` call. The `?` eliminates a lot of boilerplate and makes this function’s implementation simpler. We could even shorten this code further by chaining -method calls immediately after the `?`: +method calls immediately after the `?` as shown in Listing 9-8: -```rust +Filename: src/main.rs + +``` use std::io; use std::io::Read; use std::fs::File; @@ -613,32 +640,33 @@ fn read_username_from_file() -> Result { let mut s = String::new(); File::open("hello.txt")?.read_to_string(&mut s)?; - Ok(s) } ``` +Listing 9-8: Chaining method calls after the question mark operator + We’ve moved the creation of the new `String` in `s` to the beginning of the function; that part hasn’t changed. Instead of creating a variable `f`, we’ve chained the call to `read_to_string` directly onto the result of `File::open("hello.txt")?`. We still have a `?` at the end of the `read_to_string` call, and we still return an `Ok` value containing the username in `s` when both `File::open` and `read_to_string` succeed rather than -returning errors. The functionality is again the same as in Listing 9-5 and -Listing 9-6, this is just a different, more ergonomic way to write it. +returning errors. The functionality is again the same as in Listing 9-6 and +Listing 9-7; this is just a different, more ergonomic way to write it. -### `?` Can Only Be Used in Functions That Return `Result` +#### `?` Can Only Be Used in Functions That Return Result The `?` can only be used in functions that have a return type of `Result`, -since it is defined to work in exactly the same way as the `match` expression -we defined in Listing 9-5. The part of the `match` that requires a return type -of `Result` is `return Err(e)`, so the return type of the function must be a +because it is defined to work in the same way as the `match` expression we +defined in Listing 9-6. The part of the `match` that requires a return type of +`Result` is `return Err(e)`, so the return type of the function must be a `Result` to be compatible with this `return`. -Let’s look at what happens if use `?` in the `main` function, which you’ll +Let’s look at what happens if we use `?` in the `main` function, which you’ll recall has a return type of `()`: -```rust,ignore +``` use std::fs::File; fn main() { @@ -646,69 +674,61 @@ fn main() { } ``` - - -When we compile this, we get the following error message: +When we compile this code, we get the following error message: -```text -error[E0308]: mismatched types - --> +``` +error[E0277]: the `?` operator can only be used in a function that returns +`Result` (or another type that implements `std::ops::Try`) + --> src/main.rs:4:13 | -3 | let f = File::open("hello.txt")?; - | ^^^^^^^^^^^^^^^^^^^^^^^^^ expected (), found enum -`std::result::Result` +4 | let f = File::open("hello.txt")?; + | ------------------------ + | | + | cannot use the `?` operator in a function that returns `()` + | in this macro invocation | - = note: expected type `()` - = note: found type `std::result::Result<_, _>` + = help: the trait `std::ops::Try` is not implemented for `()` + = note: required by `std::ops::Try::from_error` ``` -This error is pointing out that we have mismatched types: the `main` function -has a return type of `()`, but the `?` might return a `Result`. In functions -that don’t return `Result`, when you call other functions that return `Result`, -you’ll need to use a `match` or one of the `Result` methods to handle it, -instead of using `?` to potentially propagate the error to the caller. +This error points out that we’re only allowed to use the question mark operator +in a function that returns `Result`. In functions that don’t return `Result`, +when you call other functions that return `Result`, you’ll need to use a +`match` or one of the `Result` methods to handle it instead of using `?` to +potentially propagate the error to the calling code. Now that we’ve discussed the details of calling `panic!` or returning `Result`, let’s return to the topic of how to decide which is appropriate to use in which cases. -## To `panic!` or Not To `panic!` +## To `panic!` or Not to `panic!` So how do you decide when you should `panic!` and when you should return -`Result`? When code panics, there’s no way to recover. You could choose to call -`panic!` for any error situation, whether there’s a possible way to recover or -not, but then you’re making the decision for your callers that a situation is -unrecoverable. When you choose to return a `Result` value, you give your caller -options, rather than making the decision for them. They could choose to attempt -to recover in a way that’s appropriate for their situation, or they could -decide that actually, an `Err` value in this case is unrecoverable, so they can -call `panic!` and turn your recoverable error into an unrecoverable one. -Therefore, returning `Result` is a good default choice when you’re defining a -function that might fail. - -There are a few situations in which it’s more appropriate to write code that -panics instead of returning a `Result`, but they are less common. Let’s discuss -why it’s appropriate to panic in examples, prototype code, and tests, then -situations where you as a human can know a method won’t fail that the compiler -can’t reason about, and conclude with some general guidelines on how to decide +`Result`? When code panics, there’s no way to recover. You could call `panic!` +for any error situation, whether there’s a possible way to recover or not, but +then you’re making the decision on behalf of the code calling your code that a +situation is unrecoverable. When you choose to return a `Result` value, you +give the calling code options rather than making the decision for it. The +calling code could choose to attempt to recover in a way that’s appropriate for +its situation, or it could decide that an `Err` value in this case is +unrecoverable, so it can call `panic!` and turn your recoverable error into an +unrecoverable one. Therefore, returning `Result` is a good default choice when +you’re defining a function that might fail. + +In a few situations it’s more appropriate to write code that panics instead of +returning a `Result`, but they are less common. Let’s explore why it’s +appropriate to panic in examples, prototype code, and tests; then in situations +where you as a human can know a method won’t fail that the compiler can’t +reason about; and conclude with some general guidelines on how to decide whether to panic in library code. -### Examples, Prototype Code, and Tests: Perfectly Fine to Panic +### Examples, Prototype Code, and Tests Are All Places it’s Perfectly Fine to Panic When you’re writing an example to illustrate some concept, having robust error handling code in the example as well can make the example less clear. In examples, it’s understood that a call to a method like `unwrap` that could -`panic!` is meant as a placeholder for the way that you’d actually like your -application to handle errors, which can differ based on what the rest of your -code is doing. +`panic!` is meant as a placeholder for the way that you’d want your application +to handle errors, which can differ based on what the rest of your code is doing. Similarly, the `unwrap` and `expect` methods are very handy when prototyping, before you’re ready to decide how to handle errors. They leave clear markers in @@ -716,10 +736,10 @@ your code for when you’re ready to make your program more robust. If a method call fails in a test, we’d want the whole test to fail, even if that method isn’t the functionality under test. Because `panic!` is how a test -gets marked as a failure, calling `unwrap` or `expect` is exactly what makes -sense to do. +is marked as a failure, calling `unwrap` or `expect` is exactly what should +happen. -### Cases When You Have More Information Than The Compiler +### Cases When You Have More Information Than the Compiler It would also be appropriate to call `unwrap` when you have some other logic that ensures the `Result` will have an `Ok` value, but the logic isn’t @@ -727,10 +747,10 @@ something the compiler understands. You’ll still have a `Result` value that yo need to handle: whatever operation you’re calling still has the possibility of failing in general, even though it’s logically impossible in your particular situation. If you can ensure by manually inspecting the code that you’ll never -have an `Err` variant, it is perfectly acceptable to call `unwrap`. Here’s an +have an `Err` variant, it’s perfectly acceptable to call `unwrap`. Here’s an example: -```rust +``` use std::net::IpAddr; let home = "127.0.0.1".parse::().unwrap(); @@ -741,62 +761,62 @@ that `127.0.0.1` is a valid IP address, so it’s acceptable to use `unwrap` here. However, having a hardcoded, valid string doesn’t change the return type of the `parse` method: we still get a `Result` value, and the compiler will still make us handle the `Result` as if the `Err` variant is still a -possibility since the compiler isn’t smart enough to see that this string is -always a valid IP address. If the IP address string came from a user instead of -being hardcoded into the program, and therefore *did* have a possibility of -failure, we’d definitely want to handle the `Result` in a more robust way +possibility because the compiler isn’t smart enough to see that this string is +always a valid IP address. If the IP address string came from a user rather +than being hardcoded into the program, and therefore *did* have a possibility +of failure, we’d definitely want to handle the `Result` in a more robust way instead. ### Guidelines for Error Handling -It’s advisable to have your code `panic!` when it’s possible that you could end -up in a bad state—in this context, bad state is when some assumption, -guarantee, contract, or invariant has been broken, such as when invalid values, -contradictory values, or missing values are passed to your code—plus one or -more of the following: +It’s advisable to have your code `panic!` when it’s possible that your code +could end up in a bad state. In this context, bad state is when some +assumption, guarantee, contract, or invariant has been broken, such as when +invalid values, contradictory values, or missing values are passed to your +code—plus one or more of the following: -* The bad state is not something that’s *expected* to happen occasionally -* Your code after this point needs to rely on not being in this bad state -* There’s not a good way to encode this information in the types you use +* The bad state is not something that’s *expected* to happen occasionally. +* Your code after this point needs to rely on not being in this bad state. +* There’s not a good way to encode this information in the types you use. If someone calls your code and passes in values that don’t make sense, the best -thing might be to `panic!` and alert the person using your library to the bug -in their code so that they can fix it during development. Similarly, `panic!` -is often appropriate if you’re calling external code that is out of your -control, and it returns an invalid state that you have no way of fixing. +choice might be to `panic!` and alert the person using your library to the bug +in their code so they can fix it during development. Similarly, `panic!` is +often appropriate if you’re calling external code that is out of your control, +and it returns an invalid state that you have no way of fixing. When a bad state is reached, but it’s expected to happen no matter how well you write your code, it’s still more appropriate to return a `Result` rather than -calling `panic!`. Examples of this include a parser being given malformed data, -or an HTTP request returning a status that indicates you have hit a rate limit. -In these cases, you should indicate that failure is an expected possibility by -returning a `Result` in order to propagate these bad states upwards so that the -caller can decide how they would like to handle the problem. To `panic!` -wouldn’t be the best way to handle these cases. +making a `panic!` call. Examples of this include a parser being given malformed +data or an HTTP request returning a status that indicates you have hit a rate +limit. In these cases, you should indicate that failure is an expected +possibility by returning a `Result` to propagate these bad states upwards so +the calling code can decide how to handle the problem. To `panic!` wouldn’t be +the best way to handle these cases. When your code performs operations on values, your code should verify the values are valid first, and `panic!` if the values aren’t valid. This is mostly for safety reasons: attempting to operate on invalid data can expose your code -to vulnerabilities. This is the main reason that the standard library will -`panic!` if you attempt an out-of-bounds array access: trying to access memory -that doesn’t belong to the current data structure is a common security problem. +to vulnerabilities. This is the main reason the standard library will `panic!` +if you attempt an out-of-bounds memory access: trying to access memory that +doesn’t belong to the current data structure is a common security problem. Functions often have *contracts*: their behavior is only guaranteed if the inputs meet particular requirements. Panicking when the contract is violated makes sense because a contract violation always indicates a caller-side bug, -and it is not a kind of error you want callers to have to explicitly handle. In -fact, there’s no reasonable way for calling code to recover: the calling -*programmers* need to fix the code. Contracts for a function, especially when a -violation will cause a panic, should be explained in the API documentation for -the function. - -Having lots of error checks in all of your functions would be verbose and -annoying, though. Luckily, you can use Rust’s type system (and thus the type -checking the compiler does) to do a lot of the checks for you. If your function +and it’s not a kind of error you want the calling code to have to explicitly +handle. In fact, there’s no reasonable way for calling code to recover: the +calling *programmers* need to fix the code. Contracts for a function, +especially when a violation will cause a panic, should be explained in the API +documentation for the function. + +However, having lots of error checks in all of your functions would be verbose +and annoying. Fortunately, you can use Rust’s type system (and thus the type +checking the compiler does) to do many of the checks for you. If your function has a particular type as a parameter, you can proceed with your code’s logic knowing that the compiler has already ensured you have a valid value. For example, if you have a type rather than an `Option`, your program expects to have *something* rather than *nothing*. Your code then doesn’t have to handle -two cases for the `Some` and `None` variants, it will only have one case for +two cases for the `Some` and `None` variants: it will only have one case for definitely having a value. Code trying to pass nothing to your function won’t even compile, so your function doesn’t have to check for that case at runtime. Another example is using an unsigned integer type like `u32`, which ensures the @@ -805,21 +825,21 @@ parameter is never negative. ### Creating Custom Types for Validation Let’s take the idea of using Rust’s type system to ensure we have a valid value -one step further, and look at creating a custom type for validation. Recall the -guessing game in Chapter 2, where our code asked the user to guess a number -between 1 and 100. We actually never validated that the user’s guess was -between those numbers before checking it against our secret number, only that -it was positive. In this case, the consequences were not very dire: our output -of “Too high” or “Too low” would still be correct. It would be a useful -enhancement to guide the user towards valid guesses, though, and have different -behavior when a user guesses a number that’s out of range versus when a user -types, for example, letters instead. +one step further and look at creating a custom type for validation. Recall the +guessing game in Chapter 2 where our code asked the user to guess a number +between 1 and 100. We never validated that the user’s guess was between those +numbers before checking it against our secret number; we only validated that +the guess was positive. In this case, the consequences were not very dire: our +output of “Too high” or “Too low” would still be correct. It would be a useful +enhancement to guide the user toward valid guesses and have different behavior +when a user guesses a number that’s out of range versus when a user types, for +example, letters instead. One way to do this would be to parse the guess as an `i32` instead of only a -`u32`, to allow potentially negative numbers, then add a check for the number -being in range: +`u32` to allow potentially negative numbers, and then add a check for the +number being in range, like so: -```rust,ignore +``` loop { // snip @@ -838,7 +858,7 @@ loop { } ``` -The `if` expression checks to see if our value is out of range, tells the user +The `if` expression checks whether our value is out of range, tells the user about the problem, and calls `continue` to start the next iteration of the loop and ask for another guess. After the `if` expression, we can proceed with the comparisons between `guess` and the secret number knowing that `guess` is @@ -852,14 +872,12 @@ to have a check like this in every function. Instead, we can make a new type and put the validations in a function to create an instance of the type rather than repeating the validations everywhere. That way, it’s safe for functions to use the new type in their signatures and -confidently use the values they receive. Listing 9-8 shows one way to define a +confidently use the values they receive. Listing 9-9 shows one way to define a `Guess` type that will only create an instance of `Guess` if the `new` function receives a value between 1 and 100: -
- -```rust -struct Guess { +``` +pub struct Guess { value: u32, } @@ -870,7 +888,7 @@ impl Guess { } Guess { - value: value, + value } } @@ -880,49 +898,44 @@ impl Guess { } ``` -
- -Listing 9-8: A `Guess` type that will only continue with values between 1 and +Listing 9-9: A `Guess` type that will only continue with values between 1 and 100 -
-
- First, we define a struct named `Guess` that has a field named `value` that holds a `u32`. This is where the number will be stored. Then we implement an associated function named `new` on `Guess` that creates instances of `Guess` values. The `new` function is defined to have one parameter named `value` of type `u32` and to return a `Guess`. The code in the -body of the `new` function tests `value` to make sure it is between 1 and 100. -If `value` doesn’t pass this test, we call `panic!`, which will alert the -programmer who is calling this code that they have a bug they need to fix, -since creating a `Guess` with a `value` outside this range would violate the -contract that `Guess::new` is relying on. The conditions in which `Guess::new` -might panic should be discussed in its public-facing API documentation; we’ll -cover documentation conventions around indicating the possibility of a `panic!` -in the API documentation that you create in Chapter 14. If `value` does pass -the test, we create a new `Guess` with its `value` field set to the `value` -parameter and return the `Guess`. +body of the `new` function tests `value` to make sure it’s between 1 and 100. +If `value` doesn’t pass this test, we make a `panic!` call, which will alert +the programmer who is writing the calling code that they have a bug they need +to fix, because creating a `Guess` with a `value` outside this range would +violate the contract that `Guess::new` is relying on. The conditions in which +`Guess::new` might panic should be discussed in its public-facing API +documentation; we’ll cover documentation conventions indicating the possibility +of a `panic!` in the API documentation that you create in Chapter 14. If +`value` does pass the test, we create a new `Guess` with its `value` field set +to the `value` parameter and return the `Guess`. Next, we implement a method named `value` that borrows `self`, doesn’t have any other parameters, and returns a `u32`. This is a kind of method sometimes -called a *getter*, since its purpose is to get some data from its fields and +called a *getter*, because its purpose is to get some data from its fields and return it. This public method is necessary because the `value` field of the `Guess` struct is private. It’s important that the `value` field is private so -that code using the `Guess` struct is not allowed to set `value` directly: -callers *must* use the `Guess::new` function to create an instance of `Guess`, -which ensures there’s no way for a `Guess` to have a `value` that hasn’t been -checked by the conditions in the `Guess::new` function. +code using the `Guess` struct is not allowed to set `value` directly: code +outside the module *must* use the `Guess::new` function to create an instance +of `Guess`, which ensures there’s no way for a `Guess` to have a `value` that +hasn’t been checked by the conditions in the `Guess::new` function. A function that has a parameter or returns only numbers between 1 and 100 could then declare in its signature that it takes or returns a `Guess` rather than a -`u32`, and wouldn’t need to do any additional checks in its body. +`u32` and wouldn’t need to do any additional checks in its body. ## Summary Rust’s error handling features are designed to help you write more robust code. -The `panic!` macro signals that your program is in a state it can’t handle, and +The `panic!` macro signals that your program is in a state it can’t handle and lets you tell the process to stop instead of trying to proceed with invalid or incorrect values. The `Result` enum uses Rust’s type system to indicate that operations might fail in a way that your code could recover from. You can use @@ -930,6 +943,7 @@ operations might fail in a way that your code could recover from. You can use success or failure as well. Using `panic!` and `Result` in the appropriate situations will make your code more reliable in the face of inevitable problems. -Now that we’ve seen useful ways that the standard library uses generics with -the `Option` and `Result` enums, let’s talk about how generics work and how you -can make use of them in your code. +Now that you’ve seen useful ways that the standard library uses generics with +the `Option` and `Result` enums, we’ll talk about how generics work and how you +can use them in your code in the next chapter. + diff --git a/src/doc/book/second-edition/nostarch/chapter13.md b/src/doc/book/second-edition/nostarch/chapter13.md index 283bd6ff90..e497d4cc47 100644 --- a/src/doc/book/second-edition/nostarch/chapter13.md +++ b/src/doc/book/second-edition/nostarch/chapter13.md @@ -3,19 +3,14 @@ # Functional Language features in Rust: Iterators and Closures - - - Rust’s design has taken inspiration from a lot of existing languages and techniques, and one significant influence is *functional programming*. -Programming in a functional style often includes using functions as values in -arguments or return values of other functions, assigning functions to variables -for later execution, and so forth. We won’t debate here the issue of what, -exactly, functional programming is or is not, but will instead show off some -features of Rust that are similar to features in many languages often referred -to as functional. +Programming in a functional style often includes using functions as values, by +passing them in arguments, returning them from other functions, assigning them +to variables for later execution, and so forth. We won’t debate here the issue +of what, exactly, functional programming is or is not, but will instead show +off some features of Rust that are similar to features in many languages often +referred to as functional. More specifically, we’re going to cover: @@ -32,49 +27,32 @@ code, so we’re devoting an entire chapter to them here. ## Closures: Anonymous Functions that can Capture their Environment - - - -Rust’s *closures* are anonymous functions that you can save in a variable or -pass as arguments to other functions. You can create the closure in one place, -and then call the closure to evaluate it in a different context. Unlike -functions, closures are allowed to capture values from the scope in which they -are called. We’re going to demonstrate how these features of closures allow for -code reuse and customization of behavior. - - - +Rust’s *closures* are anonymous functions you can save in a variable or pass as +arguments to other functions. You can create the closure in one place, and then +call the closure to evaluate it in a different context. Unlike functions, +closures are able to capture values from the scope in which they are called. +We’re going to demonstrate how these features of closures allow for code reuse +and customization of behavior. ### Creating an Abstraction of Behavior Using a Closure -Let’s work on an example that will show a situation where storing a closure to -be executed at a later time is useful. We’ll talk about the syntax of closures, -type inference, and traits along the way. - -The hypothetical situation is this: we’re working at a startup that’s making an -app to generate custom exercise workout plans. The backend is written in Rust, -and the algorithm that generates the workout plan takes into account many -different factors like the app user’s age, their Body Mass Index, their -preferences, their recent workouts, and an intensity number they specify. The -actual algorithm used isn’t important in this example; what’s important is that -this calculation takes a few seconds. We only want to call this algorithm if we -need to, and we only want to call it once, so that we aren’t making the user -wait more than they need to. We’re going to simulate calling this hypothetical -algorithm by calling the `simulated_expensive_calculation` function shown in -Listing 13-1 instead, which will print `calculating slowly...`, wait for two -seconds, and then return whatever number we passed in: +Let’s work on an example of a situation in which it’s useful to store a closure +to be executed at a later time. We’ll talk about the syntax of closures, type +inference, and traits along the way. + +The hypothetical situation is this: we work at a startup that’s making an app +to generate custom exercise workout plans. The backend is written in Rust, and +the algorithm that generates the workout plan takes into account many different +factors, like the app user’s age, Body Mass Index, preferences, recent +workouts, and an intensity number they specify. The actual algorithm used isn’t +important in this example; what’s important is that this calculation takes a +few seconds. We only want to call this algorithm when we need to, and only call +it once, so we aren’t making the user wait more than necessary. + +We’ll simulate calling this hypothetical algorithm with the +`simulated_expensive_calculation` function shown in Listing 13-1, which will +print `calculating slowly...`, wait for two seconds, and then return whatever +number we passed in: Filename: src/main.rs @@ -89,31 +67,25 @@ fn simulated_expensive_calculation(intensity: i32) -> i32 { } ``` -Listing 13-1: A function we’ll use to stand in for a hypothetical calculation -that takes about two seconds to run +Listing 13-1: A function to stand in for a hypothetical calculation that takes +about two seconds to run -Next, we have a `main` function that contains the parts of the workout app that -are important for this example. This represents the code that the app would -call when a user asks for a workout plan. Because the interaction with the -app’s frontend isn’t relevant to the use of closures, we’re going to hardcode -values representing inputs to our program and print the outputs. +Next, we have a `main` function that contains the parts of the workout app +important for this example. This represents the code that the app would call +when a user asks for a workout plan. Because the interaction with the app’s +frontend isn’t relevant to the use of closures, we’re going to hardcode values +representing inputs to our program and print the outputs. -The inputs to the program are: +The required inputs are: -- An `intensity` number from the user, specified when they request a workout, - so they can indicate whether they’d like a low intensity workout or a high +* **An intensity number from the user**, specified when they request a + workout to indicate whether they’d like a low intensity workout or a high intensity workout -- A random number that will generate some variety in the workout plans +* **A random number** that will generate some variety in the workout plans -The output the program prints will be the recommended workout plan. +The output will be the recommended workout plan. -Listing 13-2 shows the `main` function we’re going to use. We’ve hardcoded the -variable `simulated_user_specified_value` to 10 and the variable -`simulated_random_number` to 7 for simplicity’s sake; in an actual program we’d -get the intensity number from the app frontend and we’d use the `rand` crate to -generate a random number like we did in the Guessing Game example in Chapter 2. -The `main` function calls a `generate_workout` function with the simulated -input values: +Listing 13-2 shows the `main` function we’re going to use. Filename: src/main.rs @@ -122,17 +94,27 @@ fn main() { let simulated_user_specified_value = 10; let simulated_random_number = 7; - generate_workout(simulated_user_specified_value, simulated_random_number); + generate_workout( + simulated_user_specified_value, + simulated_random_number + ); } ``` -Listing 13-2: A `main` function containing hardcoded values to simulate user -input and random number generation inputs to the `generate_workout` function +Listing 13-2: A `main` function with hardcoded values to simulate user input +and random number generation + +We’ve hardcoded the variable `simulated_user_specified_value` to 10 and the +variable `simulated_random_number` to 7 for simplicity’s sake; in an actual +program we’d get the intensity number from the app frontend and we’d use the +`rand` crate to generate a random number like we did in the Guessing Game +example in Chapter 2. The `main` function calls a `generate_workout` function +with the simulated input values. -That’s the context of what we’re working on. The `generate_workout` function in -Listing 13-3 contains the business logic of the app that we’re most concerned -with in this example. The rest of the code changes in this example will be made -to this function: +There’s the context, so let’s get to the algorithm. The `generate_workout` +function in Listing 13-3 contains the business logic of the app that we’re most +concerned with in this example. The rest of the code changes in this example +will be made to this function: Filename: src/main.rs @@ -154,47 +136,45 @@ fn generate_workout(intensity: i32, random_number: i32) { println!( "Today, run for {} minutes!", simulated_expensive_calculation(intensity) - ) + ); } } } ``` -Listing 13-3: The business logic of the program that prints the workout plans -based on the inputs and calls to the `simulated_expensive_calculation` function +Listing 13-3: The business logic that prints the workout plans based on the +inputs and calls to the `simulated_expensive_calculation` function The code in Listing 13-3 has multiple calls to the slow calculation function. The first `if` block calls `simulated_expensive_calculation` twice, the `if` -inside the outer `else` doesn’t call it at all, and the code inside the `else` -case inside the outer `else` calls it once. - - +inside the outer `else` doesn’t call it at all, and the code inside the +second `else` case calls it once. The desired behavior of the `generate_workout` function is to first check if the user wants a low intensity workout (indicated by a number less than 25) or -a high intensity workout (25 or more). Low intensity workout plans will -recommend a number of pushups and situps based on the complex algorithm we’re -simulating with the `simulated_expensive_calculation` function, which needs the -intensity number as an input. +a high intensity workout (25 or more). + +Low intensity workout plans will recommend a number of pushups and situps based +on the complex algorithm we’re simulating. If the user wants a high intensity workout, there’s some additional logic: if the value of the random number generated by the app happens to be 3, the app -will recommend a break and hydration instead. If not, the user will get a high -intensity workout of a number of minutes of running that comes from the complex -algorithm. - -The data science team has let us know that there are going to be some changes -to the way we have to call the algorithm. To simplify the update when those -changes happen, we would like to refactor this code to have only a single call -to the `simulated_expensive_calculation` function. We also want to get rid of -the spot where we’re currently calling the function twice unnecessarily, and -we don’t want to add any other calls to that function in the process. That is, -we don’t want to call it if we’re in the case where the result isn’t needed at -all, and we still want to call it only once in the last case. - -There are many ways we could restructure this program. The way we’re going to -try first is extracting the duplicated call to the expensive calculation -function into a variable, as shown in Listing 13-4: +will recommend a break and hydration. If not, the user will get a number of +minutes of running based on the complex algorithm. + +The data science team has let us know that we’ll have to make some changes to +the way we call the algorithm in the future. To simplify the update when those +changes happen, we want to refactor this code so it only calls the +`simulated_expensive_calculation` function once. We also want to cut the place +where we’re currently calling the function twice unnecessarily without adding +any other calls to that function in the process. That is, we don’t want to call +it if the result isn’t needed, and we still want to call it only once. + +#### Refactoring Using Functions + +There are many ways we could restructure this program. First we’ll try +extracting the duplicated call to the expensive calculation function into a +variable, as shown in Listing 13-4: Filename: src/main.rs @@ -219,17 +199,14 @@ fn generate_workout(intensity: i32, random_number: i32) { println!( "Today, run for {} minutes!", expensive_result - ) + ); } } } ``` Listing 13-4: Extracting the calls to `simulated_expensive_calculation` to one -place before the `if` blocks and storing the result in the `expensive_result` -variable - - +place and storing the result in the `expensive_result` variable This change unifies all the calls to `simulated_expensive_calculation` and solves the problem of the first `if` block calling the function twice @@ -237,15 +214,14 @@ unnecessarily. Unfortunately, we’re now calling this function and waiting for the result in all cases, which includes the inner `if` block that doesn’t use the result value at all. -We want to be able to specify some code in one place in our program, but then -only execute that code if we actually need the result in some other place in -our program. This is a use case for closures! +We want to define code in one place in our program, but only *execute* that +code where we actually need the result. This is a use case for closures! -### Closures Store Code to be Executed Later +#### Refactoring with Closures to Store Code for Later Execution Instead of always calling the `simulated_expensive_calculation` function before -the `if` blocks, we can define a closure and store the closure in a variable -instead of the result as shown in Listing 13-5. We can actually choose to move +the `if` blocks, we can define a closure and store the *closure* in a variable +rather than storing the result, as shown in Listing 13-5. We can actually move the whole body of `simulated_expensive_calculation` within the closure we’re introducing here: @@ -259,42 +235,34 @@ let expensive_closure = |num| { }; ``` -Listing 13-5: Defining a closure with the body that was in the expensive -function and store the closure in the `expensive_closure` variable - - - +Listing 13-5: Defining a closure and storing it in the `expensive_closure` +variable -The closure definition is the part after the `=` that we’re assigning to the -variable `expensive_closure`. To define a closure, we start with a pair of -vertical pipes (`|`). Inside the pipes is where we specify the parameters to -the closure; this syntax was chosen because of its similarity to closure -definitions in Smalltalk and Ruby. This closure has one parameter named `num`; -if we had more than one parameter, we would separate them with commas, like -`|param1, param2|`. +The closure definition comes after the `=` to assign it to the variable +`expensive_closure`. To define a closure, we start with a pair of vertical +pipes (`|`), inside which we specify the parameters to the closure; this syntax +was chosen because of its similarity to closure definitions in Smalltalk and +Ruby. This closure has one parameter named `num`; if we had more than one +parameter, we would separate them with commas, like `|param1, param2|`. -After the parameters, we put curly braces that hold the body of the closure. -The curly braces are optional if the closure body only has one line. After the -curly braces, we need a semicolon to go with the `let` statement. The value -returned from the last line in the closure body (`num`), since that line -doesn’t end in a semicolon, will be the value returned from the closure when -it’s called, just like in function bodies. +After the parameters, we place curly braces that hold the body of the +closure—these are optional if the closure body is a single expression. The end +of the closure, after the curly braces, needs a semicolon to complete the `let` +statement. The value returned from the last line in the closure body (`num`) +will be the value returned from the closure when it’s called, since that line +doesn’t end in a semicolon; just like in function bodies. Note that this `let` statement means `expensive_closure` contains the *definition* of an anonymous function, not the *resulting value* of calling the -anonymous function. Recall the reason we’re using a closure is because we want -to define the code to call at one point, store that code, and actually call it -at a later point; the code we want to call is now stored in `expensive_closure`. +anonymous function. Recall that we’re using a closure because we want to define +the code to call at one point, store that code, and actually call it at a later +point; the code we want to call is now stored in `expensive_closure`. Now that we have the closure defined, we can change the code in the `if` blocks -to call the closure in order to execute the code and get the resulting value. -Calling a closure looks very similar to calling a function; we specify the -variable name that holds the closure definition and follow it with parentheses -containing the argument values we want to use for that call as shown in Listing -13-6: +to call the closure, in order to execute the code and get the resulting value. +We call a closure like we do a function: we specify the variable name that +holds the closure definition and follow it with parentheses containing the +argument values we want to use, as shown in Listing 13-6: Filename: src/main.rs @@ -322,7 +290,7 @@ fn generate_workout(intensity: i32, random_number: i32) { println!( "Today, run for {} minutes!", expensive_closure(intensity) - ) + ); } } } @@ -330,56 +298,44 @@ fn generate_workout(intensity: i32, random_number: i32) { Listing 13-6: Calling the `expensive_closure` we’ve defined -Now we’ve achieved the goal of unifying where the expensive calculation is -called to one place, and we’re only executing that code where we need the -results. However, we’ve reintroduced one of the problems from Listing 13-3: -we’re still calling the closure twice in the first `if` block, which will call -the expensive code twice and make the user wait twice as long as they need to. -We could fix this problem by creating a variable local to that `if` block to -hold the result of calling the closure, but there’s another solution we can use -since we have a closure. We’ll get back to that solution in a bit; let’s first -talk about why there aren’t type annotations in the closure definition and the -traits involved with closures. +Now the expensive calculation is called in only one place, and we’re only +executing that code where we need the results. + +We have, however, reintroduced one of the problems from Listing 13-3: we’re +still calling the closure twice in the first `if` block, which will call the +expensive code twice and make the user wait twice as long as they need to. We +could fix this problem by creating a variable local to that `if` block to hold +the result of calling the closure, but closures provide us with another +solution. We’ll get back to that solution in a bit; let’s first talk about why +there aren’t type annotations in the closure definition and the traits involved +with closures. ### Closure Type Inference and Annotation -Closures differ from functions defined with the `fn` keyword in a few -ways. The first is that closures don’t require you to annotate the types of the +Closures differ from functions defined with the `fn` keyword in a few ways. The +first is that closures don’t require you to annotate the types of the parameters or the return value like `fn` functions do. - - -Type annotations are required on functions because they are part of an -explicit interface exposed to your users. Defining this interface rigidly is -important for ensuring that everyone agrees on what types of values a function -uses and returns. Closures aren’t used in an exposed interface like this, -though: they’re stored in variables and used without naming them and exposing -them to be invoked by users of our library. +Type annotations are required on functions because they are part of an explicit +interface exposed to your users. Defining this interface rigidly is important +for ensuring that everyone agrees on what types of values a function uses and +returns. Closures aren’t used in an exposed interface like this, though: +they’re stored in variables and used without naming them and exposing them to +users of our library. Additionally, closures are usually short and only relevant within a narrow context rather than in any arbitrary scenario. Within these limited contexts, the compiler is reliably able to infer the types of the parameters and return -type similarly to how it’s able to infer the types of most variables. Being -forced to annotate the types in these small, anonymous functions would be -annoying and largely redundant with the information the compiler already has -available. +type, similar to how it’s able to infer the types of most variables. - - +Making programmers annotate the types in these small, anonymous functions would +be annoying and largely redundant with the information the compiler already has +available. Like variables, we can choose to add type annotations if we want to increase -explicitness and clarity in exchange for being more verbose than is strictly +explicitness and clarity at the cost of being more verbose than is strictly necessary; annotating the types for the closure we defined in Listing 13-4 -would look like the definition shown here in Listing 13-7: +would look like the definition shown in Listing 13-7: Filename: src/main.rs @@ -394,25 +350,12 @@ let expensive_closure = |num: i32| -> i32 { Listing 13-7: Adding optional type annotations of the parameter and return value types in the closure - - - - - - The syntax of closures and functions looks more similar with type annotations. Here’s a vertical comparison of the syntax for the definition of a function that adds one to its parameter, and a closure that has the same behavior. We’ve added some spaces here to line up the relevant parts). This illustrates how -closure syntax is similar to function syntax except for the use of pipes rather -than parentheses and the amount of syntax that is optional: - - - +closure syntax is similar to function syntax, except for the use of pipes and +the amount of syntax that is optional: ``` fn add_one_v1 (x: i32) -> i32 { x + 1 } @@ -421,34 +364,21 @@ let add_one_v3 = |x| { x + 1 }; let add_one_v4 = |x| x + 1 ; ``` - - - - - The first line shows a function definition, and the second line shows a fully annotated closure definition. The third line removes the type annotations from the closure definition, and the fourth line removes the braces that are -optional since the closure body only has one line. These are all valid +optional, since the closure body only has one expression. These are all valid definitions that will produce the same behavior when they’re called. - - - Closure definitions will have one concrete type inferred for each of their parameters and for their return value. For instance, Listing 13-8 shows the -definition of a short closure that just returns the value it gets as a -parameter. This closure isn’t very useful except for the purposes of this -example. Note that we haven’t added any type annotations to the definition: if -we then try to call the closure twice, using a `String` as an argument the -first time and an `i32` the second time, we’ll get an error: +definition of a short closure that just returns the value it receives as a +parameter. + +This closure isn’t very useful except for the purposes of this example. Note +that we haven’t added any type annotations to the definition: if we then try to +call the closure twice, using a `String` as an argument the first time and an +`i32` the second time, we’ll get an error: Filename: src/main.rs @@ -459,8 +389,8 @@ let s = example_closure(String::from("hello")); let n = example_closure(5); ``` -Listing 13-8: Attempting to call a closure whose types -are inferred with two different types +Listing 13-8: Attempting to call a closure whose types are inferred with two +different types The compiler gives us this error: @@ -476,40 +406,34 @@ error[E0308]: mismatched types found type `{integer}` ``` - - The first time we call `example_closure` with the `String` value, the compiler infers the type of `x` and the return type of the closure to be `String`. Those types are then locked in to the closure in `example_closure`, and we get a type error if we try to use a different type with the same closure. -### Using Closures with Generic Parameters and the `Fn` Traits +### Storing Closures Using Generic Parameters and the `Fn` Traits Returning to our workout generation app, in Listing 13-6 we left our code still -calling the expensive calculation closure more times than it needs to. In each -place throughout our code, if we need the results of the expensive closure more -than once, we could save the result in a variable for reuse and use the -variable instead of calling the closure again. This could be a lot of repeated -code saving the results in a variety of places. - -However, because we have a closure for the expensive calculation, we have -another solution available to us. We can create a struct that will hold the -closure and the resulting value of calling the closure. The struct will only -execute the closure if we need the resulting value, and it will cache the -resulting value so that the rest of our code doesn’t have to be responsible for -saving and reusing the result. You may know this pattern as *memoization* or -*lazy evaluation*. +calling the expensive calculation closure more times than it needs to. One +option to solve this issue is to save the result of the expensive closure in a +variable for reuse and use the variable instead in each place we need the +result instead of calling the closure again. This method, though, could result +in a lot of repeated code. + +Fortunately, we have another solution available to us. We can create a struct +that will hold the closure and the resulting value of calling the closure. The +struct will only execute the closure if we need the resulting value, and it +will cache the resulting value so that the rest of our code doesn’t have to be +responsible for saving and reusing the result. You may know this pattern as +*memoization* or *lazy evaluation*. In order to make a struct that holds a closure, we need to be able to specify -the type of the closure. Each closure instance has its own unique anonymous -type: that is, even if two closures have the same signature, their types are -still considered to be different. In order to define structs, enums, or -function parameters that use closures, we use generics and trait bounds like we -discussed in Chapter 10. - - - +the type of the closure, because a struct definition needs to know the types of +each of its fields. Each closure instance has its own unique anonymous type: +that is, even if two closures have the same signature, their types are still +considered different. In order to define structs, enums, or function parameters +that use closures, we use generics and trait bounds like we discussed in +Chapter 10. The `Fn` traits are provided by the standard library. All closures implement one of the traits `Fn`, `FnMut`, or `FnOnce`. We’ll discuss the difference @@ -517,9 +441,9 @@ between these traits in the next section on capturing the environment; in this example, we can use the `Fn` trait. We add types to the `Fn` trait bound to represent the types of the parameters -and return values that the closures must have in order to match this trait -bound. In this case, our closure has a parameter of type `i32` and returns an -`i32`, so the trait bound we specify is `Fn(i32) -> i32`. +and return values the closures must have in order to match this trait bound. In +this case, our closure has a parameter of type `i32` and returns an `i32`, so +the trait bound we specify is `Fn(i32) -> i32`. Listing 13-9 shows the definition of the `Cacher` struct that holds a closure and an optional result value: @@ -539,20 +463,25 @@ Listing 13-9: Defining a `Cacher` struct that holds a closure in `calculation` and an optional result in `value` The `Cacher` struct has a `calculation` field of the generic type `T`. The -trait bounds on `T` specify that `T` is a closure by using the `Fn` trait. Any -closure we want to store in the `calculation` field of a `Cacher` instance must -have one `i32` parameter (specified within the parentheses after `Fn`) and must -return an `i32` (specified after the `->`). +trait bounds on `T` specify that it’s a closure by using the `Fn` trait. Any +closure we want to store in the `calculation` field must have one `i32` +parameter (specified within the parentheses after `Fn`) and must return an +`i32` (specified after the `->`). + +> Note: Functions implement all three of the `Fn` traits too. If what we want to +> do doesn’t require capturing a value from the environment, we can use a +> function rather than a closure where we need something that implements an `Fn` +> trait. The `value` field is of type `Option`. Before we execute the closure, -`value` will be `None`. If the code using a `Cacher` asks for the result of the -closure, we’ll execute the closure at that time and store the result within a -`Some` variant in the `value` field. Then if the code asks for the result of -the closure again, instead of executing the closure again, we’ll return the -result that we’re holding in the `Some` variant. +`value` will be `None`. When code using a `Cacher` asks for the *result* of the +closure, the `Cacher` will execute the closure at that time and store the +result within a `Some` variant in the `value` field. Then if the code asks for +the result of the closure again, instead of executing the closure again, the +`Cacher` will return the result held in the `Some` variant. -The logic around the `value` field that we’ve just described is defined in -Listing 13-10: +The logic around the `value` field we’ve just described is defined in Listing +13-10: Filename: src/main.rs @@ -580,23 +509,17 @@ impl Cacher } ``` - +Listing 13-10: The caching logic of `Cacher` -Listing 13-10: Implementations on `Cacher` of an associated function named -`new` and a method named `value` that manage the caching logic +We want `Cacher` to manage the struct fields’ values, rather than letting the +calling code potentially change the values in these fields directly, so these +fields are private. -The fields on the `Cacher` struct are private since we want `Cacher` to manage -their values rather than letting the calling code potentially change the values -in these fields directly. The `Cacher::new` function takes a generic parameter -`T`, which we’ve defined in the context of the `impl` block to have the same -trait bound as the `Cacher` struct. `Cacher::new` returns a `Cacher` instance -that holds the closure specified in the `calculation` field and a `None` value -in the `value` field, since we haven’t executed the closure yet. +The `Cacher::new` function takes a generic parameter `T`, which we’ve defined +as having the same trait bound as the `Cacher` struct. Then `Cacher::new` +returns a `Cacher` instance that holds the closure specified in the +`calculation` field and a `None` value in the `value` field, since we haven’t +executed the closure yet. When the calling code wants the result of evaluating the closure, instead of calling the closure directly, it will call the `value` method. This method @@ -636,7 +559,7 @@ fn generate_workout(intensity: i32, random_number: i32) { println!( "Today, run for {} minutes!", expensive_result.value(intensity) - ) + ); } } } @@ -645,25 +568,26 @@ fn generate_workout(intensity: i32, random_number: i32) { Listing 13-11: Using `Cacher` in the `generate_workout` function to abstract away the caching logic - - Instead of saving the closure in a variable directly, we save a new instance of `Cacher` that holds the closure. Then, in each place we want the result, we call the `value` method on the `Cacher` instance. We can call the `value` method as many times as we want, or not call it at all, and the expensive -calculation will be run a maximum of once. Try running this program with the -`main` function from Listing 13-2, and change the values in the -`simulated_user_specified_value` and `simulated_random_number` variables to -verify that in all of the cases in the various `if` and `else` blocks, -`calculating slowly...` printed by the closure only shows up once and only when -needed. - -The `Cacher` takes care of the logic necessary to ensure we aren’t calling the +calculation will be run a maximum of once. + +Try running this program with the `main` function from Listing 13-2. Change the +values in the `simulated_user_specified_value` and `simulated_random_number` +variables to verify that in all of the cases in the various `if` and `else` +blocks, `calculating slowly...` only shows up once and only when needed. The +`Cacher` takes care of the logic necessary to ensure we aren’t calling the expensive calculation more than we need to, so that `generate_workout` can -focus on the business logic. Caching values is a more generally useful behavior -that we might want to use in other parts of our code with other closures as -well. However, there are a few problems with the current implementation of -`Cacher` that would make reusing it in different contexts difficult. +focus on the business logic. + +### Limitations of the `Cacher` Implementation + +Caching values is a generally useful behavior that we might want to use in +other parts of our code with different closures. However, there are a few +problems with the current implementation of `Cacher` that would make reusing it +in different contexts difficult. The first problem is a `Cacher` instance assumes it will always get the same value for the parameter `arg` to the `value` method. That is, this test of @@ -682,9 +606,9 @@ fn call_with_different_values() { ``` This test creates a new `Cacher` instance with a closure that returns the value -passed into it. We call the `value` method on this `Cacher` instance with -an `arg` value of 1 and then an `arg` value of 2, and we expect that the call -to `value` with the `arg` value of 2 returns 2. +passed into it. We call the `value` method on this `Cacher` instance with an +`arg` value of 1 and then an `arg` value of 2, and we expect that the call to +`value` with the `arg` value of 2 should return 2. Run this with the `Cacher` implementation from Listing 13-9 and Listing 13-10 and the test will fail on the `assert_eq!` with this message: @@ -695,50 +619,33 @@ thread 'call_with_different_arg_values' panicked at 'assertion failed: ``` The problem is that the first time we called `c.value` with 1, the `Cacher` -instance saved `Some(1)` in `self.value`. After that, no matter what we pass -in to the `value` method, it will always return 1. +instance saved `Some(1)` in `self.value`. After that, no matter what we pass in +to the `value` method, it will always return 1. Try modifying `Cacher` to hold a hash map rather than a single value. The keys of the hash map will be the `arg` values that are passed in, and the values of the hash map will be the result of calling the closure on that key. Instead of looking at whether `self.value` directly has a `Some` or a `None` value, the -`value` function will look up the `arg` in the hash map and return the value if -it’s present. If it’s not present, the `Cacher` will call the closure and save -the resulting value in the hash map associated with its `arg` value. +`value` function will look up the `arg` in the hash map and return the value, +if it’s present. If it’s not present, the `Cacher` will call the closure and +save the resulting value in the hash map associated with its `arg` value. -Another problem with the current `Cacher` implementation that restricts its use -is that it only accepts closures that take one parameter of type `i32` and -return an `i32`. We might want to be able to cache the results of closures that -take a string slice as an argument and return `usize` values, for example. Try -introducing more generic parameters to increase the flexibility of the `Cacher` -functionality. +Another problem with the current `Cacher` implementation is that it only +accepts closures that take one parameter of type `i32` and return an `i32`. We +might want to cache the results of closures that take a string slice and return +`usize` values, for example. To fix this issue, try introducing more generic +parameters to increase the flexibility of the `Cacher` functionality. ### Closures Can Capture Their Environment In the workout generator example, we only used closures as inline anonymous -functions. Closures have an additional ability we can use that functions don’t -have, however: they can capture their environment and access variables from the -scope in which they’re defined. - - - +functions. Closures have an additional ability that functions don’t have, +however: they can capture their environment and access variables from the scope +in which they’re defined. Listing 13-12 has an example of a closure stored in the variable `equal_to_x` that uses the variable `x` from the closure’s surrounding environment: - - - Filename: src/main.rs ``` @@ -760,15 +667,6 @@ Here, even though `x` is not one of the parameters of `equal_to_x`, the `equal_to_x` closure is allowed to use the `x` variable that’s defined in the same scope that `equal_to_x` is defined in. - - - We can’t do the same with functions; let’s see what happens if we try: Filename: src/main.rs @@ -798,43 +696,40 @@ closure form instead The compiler even reminds us that this only works with closures! -When a closure captures a value from its environment, the closure uses memory -to store the values for use in the closure body. This use of memory is overhead -that we don’t want to pay for in the more common case where we want to execute -code that doesn’t capture its environment. Because functions are never allowed -to capture their environment, defining and using functions will never incur -this overhead. - - - +When a closure captures a value from its environment, it uses memory to store +the values for use in the closure body. This use of memory is overhead that we +don’t want to pay in more common cases, where we want to execute code that +doesn’t capture its environment. Because functions are never allowed to capture +their environment, defining and using functions will never incur this overhead. Closures can capture values from their environment in three ways, which directly map to the three ways a function can take a parameter: taking -ownership, borrowing immutably, and borrowing mutably. These ways of capturing -values are encoded in the three `Fn` traits as follows: - -* `FnOnce` consumes the variables it captures from its enclosing scope (the - enclosing scope is called the closure's *environment*). In order to consume - the captured variables, the closure must therefore take ownership of these - variables and moves them into the closure when the closure is defined. The - `Once` part of the name is because the closure can't take ownership of the - same variables more than once, so it can only be called one time. +ownership, borrowing immutably, and borrowing mutably. These are encoded in the +three `Fn` traits as follows: + +* `FnOnce` consumes the variables it captures from its enclosing scope, known + as the closure’s *environment*. In order to consume the captured variables, + the closure must take ownership of these variables and move them into the + closure when it is defined. The `Once` part of the name is because the + closure can’t take ownership of the same variables more than once, so it can + only be called one time. * `Fn` borrows values from the environment immutably. * `FnMut` can change the environment since it mutably borrows values. -When we create a closure, Rust infers how we want to reference the environment -based on how the closure uses the values from the environment. In Listing -13-12, the `equal_to_x` closure borrows `x` immutably (so `equal_to_x` has the -`Fn` trait) since the body of the closure only needs to read the value in `x`. +When we create a closure, Rust infers which to use based on how the closure +uses the values from the environment. In Listing 13-12, the `equal_to_x` +closure borrows `x` immutably (so `equal_to_x` has the `Fn` trait) since the +body of the closure only needs to read the value in `x`. If we want to force the closure to take ownership of the values it uses in the environment, we can use the `move` keyword before the parameter list. This is mostly useful when passing a closure to a new thread in order to move the data -to be owned by the new thread. We’ll have more examples of `move` closures in -Chapter 16 when we talk about concurrency, but for now here’s the code from -Listing 13-12 with the `move` keyword added to the closure definition and using -vectors instead of integers, since integers can be copied rather than moved: +so that it’s owned by the new thread. + +We’ll have more examples of `move` closures in Chapter 16 when we talk about +concurrency, but for now here’s the code from Listing 13-12 with the `move` +keyword added to the closure definition and using vectors instead of integers, +since integers can be copied rather than moved: Filename: src/main.rs @@ -868,9 +763,10 @@ error[E0382]: use of moved value: `x` implement the `Copy` trait ``` -The `x` value is moved into the closure when the closure is defined because of -the `move` keyword. The closure then has ownership of `x`, and `main` isn’t -allowed to use `x` anymore. Removing the `println!` will fix this example. +The `x` value is moved into the closure when the closure is defined, because we +added the `move` keyword. The closure then has ownership of `x`, and `main` +isn’t allowed to use `x` anymore in the `println!` statement. Removing +`println!` will fix this example. Most of the time when specifying one of the `Fn` trait bounds, you can start with `Fn` and the compiler will tell you if you need `FnMut` or `FnOnce` based @@ -881,26 +777,16 @@ useful as function parameters, let’s move on to our next topic: iterators. ## Processing a Series of Items with Iterators - - - - The iterator pattern allows you to perform some task on a sequence of items in -turn. An *iterator* is responsible for the logic around iterating over each item -in the sequence and determining when the sequence has finished. When we use -iterators, we don’t have to reimplement that logic ourselves. +turn. An *iterator* is responsible for the logic of iterating over each item +and determining when the sequence has finished. When we use iterators, we don’t +have to reimplement that logic ourselves. -In Rust, iterators are *lazy*, which means they have no effect until we call -methods on them that consume the iterator to use it up. For example, the code -in Listing 13-13 creates an iterator over the items in the vector `v1` by -calling the `iter` method defined on `Vec`. This code by itself doesn’t do -anything useful: +In Rust, iterators are *lazy*, meaning they have no effect until we call +methods that consume the iterator to use it up. For example, the code in +Listing 13-13 creates an iterator over the items in the vector `v1` by calling +the `iter` method defined on `Vec`. This code by itself doesn’t do anything +useful: ``` let v1 = vec![1, 2, 3]; @@ -910,11 +796,13 @@ let v1_iter = v1.iter(); Listing 13-13: Creating an iterator -After creating an iterator, we can choose to use it in a variety of ways. In -Listing 3-6, we actually used iterators with `for` loops to execute some code -on each item, though we glossed over what the call to `iter` did until now. The -example in Listing 13-14 separates the creation of the iterator from the use of -the iterator in the `for` loop. The iterator is stored in the `v1_iter` +Once we’ve created an iterator, we can choose to use it in a variety of ways. +In Listing 3-6 from Chapter 3, we actually used iterators with `for` loops to +execute some code on each item, though we glossed over what the call to `iter` +did until now. + +The example in Listing 13-14 separates the creation of the iterator from the +use of the iterator in the `for` loop. The iterator is stored in the `v1_iter` variable, and no iteration takes place at that time. Once the `for` loop is called using the iterator in `v1_iter`, then each element in the iterator is used in one iteration of the loop, which prints out each value: @@ -934,12 +822,13 @@ Listing 13-14: Making use of an iterator in a `for` loop In languages that don’t have iterators provided by their standard libraries, we would likely write this same functionality by starting a variable at index 0, using that variable to index into the vector to get a value, and incrementing -the variable value in a loop until its value gets up to the total number of -items in the vector. Iterators take care of all of that logic for us, which -cuts down on the repetitive code we would have to write and potentially mess up. -In addition, the way iterators are implemented gives us more flexibility to -use the same logic with many different kinds of sequences, not just data -structures that we can index into like vectors. Let’s see how iterators do that. +the variable value in a loop until it gets to the total number of items in the +vector. + +Iterators take care of all of that logic for us, cutting down on repetitive +code we could potentially mess up. Iterators give us more flexibility to use +the same logic with many different kinds of sequences, not just data structures +we can index into like vectors. Let’s see how iterators do that. ### The `Iterator` trait and the `next` method @@ -959,17 +848,18 @@ trait Iterator { You’ll notice some new syntax that we haven’t covered yet: `type Item` and `Self::Item`, which are defining an *associated type* with this trait. We’ll talk about associated types in depth in Chapter 19, but for now, all you need -to know is that this code says implementing `Iterator` trait requires that you -also define an `Item` type, and this `Item` type is used in the return type of -the `next` method. In other words, the `Item` type will be the type of element -that’s returned from the iterator. - -The `next` method is the only method that the `Iterator` trait requires -implementers of the trait to define. `next` returns one item of the iterator -at a time wrapped in `Some`, and when iteration is over, it returns `None`. -We can call the `next` method on iterators directly if we’d like; Listing 13-15 -has a test that demonstrates the values we’d get on repeated calls to `next` -on the iterator created from the vector: +to know is that this code says implementing the `Iterator` trait requires that +you also define an `Item` type, and this `Item` type is used in the return type +of the `next` method. In other words, the `Item` type will be the type returned +from the iterator. + +The `Iterator` trait only requires implementors to define one method: the +`next` method, which returns one item of the iterator at a time wrapped in +`Some` and, when iteration is over, it returns `None`. + +We can call the `next` method on iterators directly; Listing 13-15 demonstrates +what values are returned from repeated calls to `next` on the iterator created +from the vector: Filename: src/lib.rs @@ -990,29 +880,21 @@ fn iterator_demonstration() { Listing 13-15: Calling the `next` method on an iterator Note that we needed to make `v1_iter` mutable: calling the `next` method on an -iterator changes the iterator’s state that keeps track of where it is in the -sequence. Put another way, this code *consumes*, or uses up, the iterator. Each -call to `next` eats up an item from the iterator. We didn’t need to make -`v1_iter` mutable when we used a `for` loop because the `for` loop took -ownership of `v1_iter` and made `v1_iter` mutable behind the scenes. +iterator changes state that keeps track of where it is in the sequence. Put +another way, this code *consumes*, or uses up, the iterator. Each call to +`next` eats up an item from the iterator. We didn’t need to make `v1_iter` +mutable when we used a `for` loop because the loop took ownership of `v1_iter` +and made it mutable behind the scenes. Also note that the values we get from the calls to `next` are immutable references to the values in the vector. The `iter` method produces an iterator -over immutable references. If we wanted to create an iterator that takes +over immutable references. If we want to create an iterator that takes ownership of `v1` and returns owned values, we can call `into_iter` instead of `iter`. Similarly, if we want to iterate over mutable references, we can call `iter_mut` instead of `iter`. ### Methods in the `Iterator` Trait that Consume the Iterator - - - - The `Iterator` trait has a number of different methods with default implementations provided for us by the standard library; you can find out all about these methods by looking in the standard library API documentation for @@ -1020,25 +902,12 @@ the `Iterator` trait. Some of these methods call the `next` method in their definition, which is why we’re required to implement the `next` method when implementing the `Iterator` trait. - - - -The methods that call the `next` method are called *consuming adaptors*, since -calling them uses up the iterator. An example of a consuming adaptor is the -`sum` method. This method takes ownership of the iterator and iterates through -the items by repeatedly calling `next`, thus consuming the iterator. As it -iterates through each item, it adds each item to a running total and returns -the total when iteration has completed. Listing 13-16 has a test illustrating a -use of the `sum` method: +Methods that call `next` are called *consuming adaptors*, because calling them +uses up the iterator. One example is the `sum` method, which takes ownership of +the iterator and iterates through the items by repeatedly calling `next`, thus +consuming the iterator. As it iterates through, it adds each item to a running +total and returns the total when iteration is complete. Listing 13-16 has a +test illustrating a use of the `sum` method: Filename: src/lib.rs @@ -1063,14 +932,16 @@ ownership of the iterator we call it on. ### Methods in the `Iterator` Trait that Produce Other Iterators -Another kind of method defined on the `Iterator` trait are methods that produce -other iterators. These methods are called *iterator adaptors* and allow us to -change iterators into different kind of iterators. We can chain multiple calls -to iterator adaptors. Because all iterators are lazy, however, we have to -call one of the consuming adaptor methods in order to get results from calls -to iterator adaptors. Listing 13-17 shows an example of calling the iterator -adaptor method `map`, which takes a closure that `map` will call on each -item in order to produce a new iterator in which each item from the vector has +Other methods defined on the `Iterator` trait, known as *iterator adaptors*, +allow us to change iterators into different kind of iterators. We can chain +multiple calls to iterator adaptors to perform complex actions in a readable +way. Because all iterators are lazy, however, we have to call one of the +consuming adaptor methods in order to get results from calls to iterator +adaptors. + +Listing 13-17 shows an example of calling the iterator adaptor method `map` +which takes a closure to call on each item in order to produce a new iterator. +The closure here creates a new iterator in which each item from the vector has been incremented by 1. This code produces a warning, though: Filename: src/main.rs @@ -1098,14 +969,15 @@ nothing unless consumed The code in Listing 13-17 isn’t actually doing anything; the closure we’ve specified never gets called. The warning reminds us why: iterator adaptors are -lazy, and we probably meant to consume the iterator here. +lazy, and we need to consume the iterator here. + +To fix this and consume the iterator, we’re going to use the `collect` method, +which we saw briefly in Chapter 12. This method consumes the iterator and +collects the resulting values into a collection data type. -In order to fix this warning and consume the iterator to get a useful result, -we’re going to use the `collect` method, which we saw briefly in Chapter 12. -This method consumes the iterator and collects the resulting values into a -data structure. In Listing 13-18, we’re going to collect the results of -iterating over the iterator returned from the call to `map` into a vector that -will contain each item from the original vector incremented by 1: +In Listing 13-18, we collect the results of iterating over the iterator that’s +returned from the call to `map` into a vector. This vector will end up +containing each item from the original vector incremented by 1: Filename: src/main.rs @@ -1120,24 +992,10 @@ assert_eq!(v2, vec![2, 3, 4]); Listing 13-18: Calling the `map` method to create a new iterator, then calling the `collect` method to consume the new iterator and create a vector -Because `map` takes a closure, we can specify any operation that we want to -perform on each item that we iterate over. This is a great example of how using -closures lets us customize some behavior while reusing the iteration behavior -that the `Iterator` trait provides. - - - - +Because `map` takes a closure, we can specify any operation we want to perform +on each item. This is a great example of how closures let us customize some +behavior while reusing the iteration behavior that the `Iterator` trait +provides. ### Using Closures that Capture their Environment with Iterators @@ -1146,10 +1004,11 @@ closures that capture their environment by using the `filter` iterator adapter. The `filter` method on an iterator takes a closure that takes each item from the iterator and returns a boolean. If the closure returns `true`, the value will be included in the iterator produced by `filter`. If the closure returns -`false`, the value won’t be included in the resulting iterator. Listing 13-19 -demonstrates using `filter` with a closure that captures the `shoe_size` -variable from its environment in order to iterate over a collection of `Shoe` -struct instances in order to return only shoes that are the specified size: +`false`, the value won’t be included in the resulting iterator. + +In Listing 13-19 we use `filter` with a closure that captures the `shoe_size` +variable from its environment, in order to iterate over a collection of `Shoe` +struct instances. It will return only shoes that are the specified size: Filename: src/lib.rs @@ -1189,45 +1048,37 @@ fn filters_by_size() { Listing 13-19: Using the `filter` method with a closure that captures `shoe_size` - - The `shoes_in_my_size` function takes ownership of a vector of shoes and a shoe size as parameters. It returns a vector containing only shoes of the specified -size. In the body of `shoes_in_my_size`, we call `into_iter` to create an -iterator that takes ownership of the vector. Then we call `filter` to adapt -that iterator into a new iterator that only contains elements for which the -closure returns `true`. The closure we’ve specified captures the `shoe_size` -parameter from the environment and uses the value to compare with each shoe’s -size to only keep shoes that are of the size specified. Finally, calling -`collect` gathers the values returned by the adapted iterator into a vector -that the function returns. +size. + +In the body of `shoes_in_my_size`, we call `into_iter` to create an iterator +that takes ownership of the vector. Then we call `filter` to adapt that +iterator into a new iterator that only contains elements for which the closure +returns `true`. + +The closure captures the `shoe_size` parameter from the environment and +compares the value with each shoe’s size, keeping only shoes of the size +specified. Finally, calling `collect` gathers the values returned by the +adapted iterator into a vector that’s returned by the function. The test shows that when we call `shoes_in_my_size`, we only get back shoes that have the same size as the value we specified. ### Implementing the `Iterator` Trait to Create Our Own Iterators - - - We’ve shown that we can create an iterator by calling `iter`, `into_iter`, or -`iter_mut` on a vector. We can also create iterators from the other collection -types in the standard library, such as hash map. Additionally, we can implement -the `Iterator` trait in order to create iterators that do anything we want. -As previously mentioned, the only method we’re required to provide a definition -for is the `next` method. Once we’ve done that, we can use all the other -methods that have default implementations provided by the `Iterator` trait on -our iterator! - -The iterator we’re going to create is one that will only ever count from 1 -to 5. First, we’ll create a struct to hold on to some values, and then we’ll -make this struct into an iterator by implementing the `Iterator` trait and use -the values in that implementation. +`iter_mut` on a vector. We can create iterators from the other collection types +in the standard library, such as hash map. We can also create iterators that do +anything we want by implementing the `Iterator` trait on our own types. As +previously mentioned, the only method we’re required to provide a definition +for is the `next` method. Once we’ve done that, we can use all other methods +that have default implementations provided by the `Iterator` trait! + +To demonstrate, let’s create an iterator that will only ever count from 1 to 5. +First, we’ll create a struct to hold some values, and then we’ll make this +struct into an iterator by implementing the `Iterator` trait and use the values +in that implementation. Listing 13-20 has the definition of the `Counter` struct and an associated `new` function to create instances of `Counter`: @@ -1249,26 +1100,14 @@ impl Counter { Listing 13-20: Defining the `Counter` struct and a `new` function that creates instances of `Counter` with an initial value of 0 for `count` - - - -The `Counter` struct has one field named `count`. This field holds a `u32` -value that will keep track of where we are in the process of iterating from 1 -to 5. The `count` field is private since we want the implementation of -`Counter` to manage its value. The `new` function enforces the behavior we want -of always starting new instances with a value of 0 in the `count` field. - - - - +The `Counter` struct has one field named `count`. This holds a `u32` value that +will keep track of where we are in the process of iterating from 1 to 5. The +`count` field is private since we want the implementation of `Counter` to +manage its value. The `new` function enforces the behavior of always starting +new instances with a value of 0 in the `count` field. Next, we’re going to implement the `Iterator` trait for our `Counter` type by -defining the body of the `next` method to specify what we want to happen when +defining the body of the `next` method, to specify what we want to happen when this iterator is used, as shown in Listing 13-21: Filename: src/lib.rs @@ -1291,22 +1130,21 @@ impl Iterator for Counter { Listing 13-21: Implementing the `Iterator` trait on our `Counter` struct - - We set the associated `Item` type for our iterator to `u32`, meaning the iterator will return `u32` values. Again, don’t worry about associated types -yet, we’ll be covering them in Chapter 19. We want our iterator to add one to -the current state, which is why we initialized `count` to 0: we want our -iterator to return one first. If the value of `count` is less than six, `next` -will return the current value wrapped in `Some`, but if `count` is six or -higher, our iterator will return `None`. +yet, we’ll be covering them in Chapter 19. + +We want our iterator to add one to the current state, so we initialized `count` +to 0 so it would return one first. If the value of `count` is less than six, +`next` will return the current value wrapped in `Some`, but if `count` is six +or higher, our iterator will return `None`. #### Using Our `Counter` Iterator’s `next` Method Once we’ve implemented the `Iterator` trait, we have an iterator! Listing 13-22 -shows a test demonstrating that we can use the iterator functionality our -`Counter` struct now has by calling the `next` method on it directly, just like -we did with the iterator created from a vector in Listing 13-15: +shows a test demonstrating that we can use the iterator functionality of our +`Counter` struct by calling the `next` method on it directly, just like we did +with the iterator created from a vector in Listing 13-15: Filename: src/lib.rs @@ -1328,41 +1166,19 @@ Listing 13-22: Testing the functionality of the `next` method implementation This test creates a new `Counter` instance in the `counter` variable and then calls `next` repeatedly, verifying that we have implemented the behavior we -want this iterator to have of returning the values from 1 to 5. - - - +want this iterator to have: returning the values from 1 to 5. #### Using Other `Iterator` Trait Methods on Our Iterator Because we implemented the `Iterator` trait by defining the `next` method, we -can now use any `Iterator` trait method’s default implementations that the -standard library has defined, since they all use the `next` method’s -functionality. - - - - - - - -For example, if for some reason we wanted to take the values that an instance -of `Counter` produces, pair those values with values produced by another -`Counter` instance after skipping the first value that instance produces, -multiply each pair together, keep only those results that are divisible by -three, and add all the resulting values together, we could do so as shown in -the test in Listing 13-23: +can now use any `Iterator` trait method’s default implementations as defined in +the standard library, since they all use the `next` method’s functionality. + +For example, if for some reason we wanted to take the values produced by an +instance of `Counter`, pair them with values produced by another `Counter` +instance after skipping the first value, multiply each pair together, keep only +those results that are divisible by three, and add all the resulting values +together, we could do so as shown in the test in Listing 13-23: Filename: src/lib.rs @@ -1384,13 +1200,13 @@ Note that `zip` produces only four pairs; the theoretical fifth pair `(5, None)` is never produced because `zip` returns `None` when either of its input iterators return `None`. -All of these method calls are possible because we implemented the `Iterator` -trait by specifying how the `next` method works and the standard library -provides default implementations for other methods that call `next`. +All of these method calls are possible because we specified how the `next` +method works, and the standard library provides default implementations for +other methods that call `next`. ## Improving our I/O Project -We can improve our implementation of the I/O project in Chapter 12 by using +With this new knowledge, we can improve the I/O project in Chapter 12 by using iterators to make places in the code clearer and more concise. Let’s take a look at how iterators can improve our implementation of both the `Config::new` function and the `search` function. @@ -1399,7 +1215,7 @@ function and the `search` function. In Listing 12-6, we added code that took a slice of `String` values and created an instance of the `Config` struct by indexing into the slice and cloning the -values so that the `Config` struct could own those values. We’ve reproduced the +values, allowing the `Config` struct to own those values. We’ve reproduced the implementation of the `Config::new` function as it was at the end of Chapter 12 in Listing 13-24: @@ -1422,45 +1238,35 @@ impl Config { } ``` -Listing 13-24: Reproduction of the `Config::new` function -from the end of Chapter 12 - - - +Listing 13-24: Reproduction of the `Config::new` function from the end of +Chapter 12 At the time, we said not to worry about the inefficient `clone` calls here because we would remove them in the future. Well, that time is now! -The reason we needed `clone` here in the first place is that we have a slice -with `String` elements in the parameter `args`, but the `new` function does not -own `args`. In order to be able to return ownership of a `Config` instance, we -need to clone the values that we put in the `query` and `filename` fields of -`Config`, so that the `Config` instance can own its values. +We needed `clone` here because we have a slice with `String` elements in the +parameter `args`, but the `new` function doesn’t own `args`. In order to be +able to return ownership of a `Config` instance, we had to clone the values +from the `query` and `filename` fields of `Config`, so that the `Config` +instance can own its values. With our new knowledge about iterators, we can change the `new` function to take ownership of an iterator as its argument instead of borrowing a slice. -We’ll use the iterator functionality instead of the code we had that checks the -length of the slice and indexes into specific locations. This will clear up -what the `Config::new` function is doing since the iterator will take care of -accessing the values. - - - +We’ll use the iterator functionality instead of the code that checks the length +of the slice and indexes into specific locations. This will clear up what the +`Config::new` function is doing since the iterator will take care of accessing +the values. -Once `Config::new` taking ownership of the iterator and not using indexing +Once `Config::new` takes ownership of the iterator and stops using indexing operations that borrow, we can move the `String` values from the iterator into `Config` rather than calling `clone` and making a new allocation. - - - #### Using the Iterator Returned by `env::args` Directly -In your I/O project’s *src/main.rs*, let’s change the start of the `main` -function from this code that we had at the end of Chapter 12: +Open your I/O project’s *src/main.rs*, and we’ll change the start of the `main` +function that we had at the end of Chapter 12: + +Filename: src/main.rs ``` fn main() { @@ -1492,14 +1298,6 @@ fn main() { Listing 13-25: Passing the return value of `env::args` to `Config::new` - - - The `env::args` function returns an iterator! Rather than collecting the iterator values into a vector and then passing a slice to `Config::new`, now we’re passing ownership of the iterator returned from `env::args` to @@ -1509,14 +1307,11 @@ Next, we need to update the definition of `Config::new`. In your I/O project’s *src/lib.rs*, let’s change the signature of `Config::new` to look like Listing 13-26: - - - Filename: src/lib.rs ``` impl Config { - pub fn new(args: std::env::Args) -> Result { + pub fn new(mut args: std::env::Args) -> Result { // ...snip... ``` @@ -1525,21 +1320,24 @@ Listing 13-26: Updating the signature of `Config::new` to expect an iterator The standard library documentation for the `env::args` function shows that the type of the iterator it returns is `std::env::Args`. We’ve updated the signature of the `Config::new` function so that the parameter `args` has the -type `std::env::Args` instead of `&[String]`. +type `std::env::Args` instead of `&[String]`. Because we’re taking ownership of +`args`, and we’re going to be mutating `args` by iterating over it, we can add +the `mut` keyword into the specification of the `args` parameter to make it +mutable. #### Using `Iterator` Trait Methods Instead of Indexing Next, we’ll fix the body of `Config::new`. The standard library documentation also mentions that `std::env::Args` implements the `Iterator` trait, so we know -we can call the `next` method on it! Listing 13-27 has updated the code -from Listing 12-23 to use the `next` method: +we can call the `next` method on it! Listing 13-27 has updated the code from +Listing 12-23 to use the `next` method: Filename: src/lib.rs ``` impl Config { pub fn new(mut args: std::env::Args) -> Result { - args.next(); + args.next(); let query = match args.next() { Some(arg) => arg, @@ -1553,21 +1351,13 @@ impl Config { let case_sensitive = env::var("CASE_INSENSITIVE").is_err(); - Ok(Config { - query, filename, case_sensitive - }) + Ok(Config { query, filename, case_sensitive }) } } ``` Listing 13-27: Changing the body of `Config::new` to use iterator methods - - - Remember that the first value in the return value of `env::args` is the name of the program. We want to ignore that and get to the next value, so first we call `next` and do nothing with the return value. Second, we call `next` on the @@ -1576,12 +1366,6 @@ value we want to put in the `query` field of `Config`. If `next` returns a not enough arguments were given and we return early with an `Err` value. We do the same thing for the `filename` value. - - - ### Making Code Clearer with Iterator Adaptors The other place in our I/O project we could take advantage of iterators is in @@ -1606,17 +1390,14 @@ pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { Listing 13-28: The implementation of the `search` function from Chapter 12 -We can write this code in a much shorter way by using iterator adaptor methods -instead. This also lets us avoid having a mutable intermediate `results` +We can write this code in a much more concise way using iterator adaptor +methods. This also lets us avoid having a mutable intermediate `results` vector. The functional programming style prefers to minimize the amount of mutable state to make code clearer. Removing the mutable state might make it easier for us to make a future enhancement to make searching happen in parallel, since we wouldn’t have to manage concurrent access to the `results` vector. Listing 13-29 shows this change: - - - Filename: src/lib.rs ``` @@ -1631,23 +1412,18 @@ Listing 13-29: Using iterator adaptor methods in the implementation of the `search` function Recall that the purpose of the `search` function is to return all lines in -`contents` that contain the `query`. Similarly to the `filter` example in -Listing 13-19, we can use the `filter` adaptor to keep only the lines that +`contents` that contain the `query`. Similar to the `filter` example in Listing +13-19, we can use the `filter` adaptor to keep only the lines that `line.contains(query)` returns true for. We then collect the matching lines up into another vector with `collect`. Much simpler! Feel free to make the same change to use iterator methods in the `search_case_insensitive` function as well. - - - -The next logical question is which style you should choose in your own code: -the original implementation in Listing 13-28, or the version using iterators in -Listing 13-29. Most Rust programmers prefer to use the iterator style. It’s a -bit tougher to get the hang of at first, but once you get a feel for the -various iterator adaptors and what they do, iterators can be easier to +The next logical question is which style you should choose in your own code and +why: the original implementation in Listing 13-28, or the version using +iterators in Listing 13-29. Most Rust programmers prefer to use the iterator +style. It’s a bit tougher to get the hang of at first, but once you get a feel +for the various iterator adaptors and what they do, iterators can be easier to understand. Instead of fiddling with the various bits of looping and building new vectors, the code focuses on the high-level objective of the loop. This abstracts away some of the commonplace code so that it’s easier to see the @@ -1677,14 +1453,16 @@ test bench_search_iter ... bench: 19,234,900 ns/iter (+/- 657,200) The iterator version ended up slightly faster! We’re not going to go through the benchmark code here, as the point is not to prove that they’re exactly equivalent, but to get a general sense of how these two implementations compare -performance-wise. For a more comprehensive benchmark, you’d want to check -various texts of various sizes, different words, words of different lengths, -and all kinds of other variations. The point is this: iterators, while a -high-level abstraction, get compiled down to roughly the same code as if you’d -written the lower-level code yourself. Iterators are one of Rust’s *zero-cost -abstractions*, by which we mean using the abstraction imposes no additional -runtime overhead in the same way that Bjarne Stroustrup, the original designer -and implementer of C++, defines *zero-overhead*: +performance-wise. + +For a more comprehensive benchmark, you’d want to check various texts of +various sizes, different words, words of different lengths, and all kinds of +other variations. The point is this: iterators, while a high-level abstraction, +get compiled down to roughly the same code as if you’d written the lower-level +code yourself. Iterators are one of Rust’s *zero-cost* *abstractions*, by which +we mean using the abstraction imposes no additional runtime overhead, in the +same way that Bjarne Stroustrup, the original designer and implementor of C++, +defines *zero-overhead*: > In general, C++ implementations obey the zero-overhead principle: What you > don’t use, you don’t pay for. And further: What you do use, you couldn’t hand @@ -1692,21 +1470,10 @@ and implementer of C++, defines *zero-overhead*: > > - Bjarne Stroustrup “Foundations of C++” - - - As another example, here is some code taken from an audio decoder. The decoding algorithm uses the linear prediction mathematical operation to estimate future values based on a linear function of the previous samples. - - - This code uses an iterator chain to do some math on three variables in scope: a `buffer` slice of data, an array of 12 `coefficients`, and an amount by which to shift data in `qlp_shift`. We’ve declared the variables within this example @@ -1741,13 +1508,10 @@ consuming the value. What assembly code would this Rust code compile to? Well, as of this writing, it compiles down to the same assembly you’d write by hand. There’s no loop at all corresponding to the iteration over the values in `coefficients`: Rust knows that there are twelve iterations, so it “unrolls” -the loop. Unrolling is an optimization that removes the overhead of the loop +the loop. *Unrolling* is an optimization that removes the overhead of the loop controlling code and instead generates repetitive code for each iteration of the loop. - - - All of the coefficients get stored in registers, which means it’s very fast to access the values. There are no bounds checks on the array access at runtime. All these optimizations Rust is able to apply make the resulting code extremely @@ -1765,15 +1529,6 @@ ideas, at low level performance. The implementations of closures and iterators are such that runtime performance is not affected. This is part of Rust’s goal to strive to provide zero-cost abstractions. - - - Now that we’ve improved the expressiveness of our I/O project, let’s look at some more features of `cargo` that would help us get ready to share the project with the world. diff --git a/src/doc/book/second-edition/nostarch/chapter14.md b/src/doc/book/second-edition/nostarch/chapter14.md index dfdc027ab4..a2e5006858 100644 --- a/src/doc/book/second-edition/nostarch/chapter14.md +++ b/src/doc/book/second-edition/nostarch/chapter14.md @@ -14,10 +14,7 @@ advanced features to show you how to: * Extend Cargo with your own custom commands Cargo can do even more than what we can cover in this chapter too, so for a -full explanation, see its documentation at *http://doc.rust-lang.org/cargo/*. - - - +full explanation, see its documentation at *https://doc.rust-lang.org/cargo/*. ## Customizing Builds with Release Profiles @@ -26,36 +23,14 @@ different configurations, to allow the programmer more control over various options for compiling your code. Each profile is configured independently of the others. - - - -Cargo has four profiles defined with good default configurations for each use -case. Cargo uses the different profiles based on which command you’re running. -The commands correspond to the profiles as shown in Table 14-1: - - - - -| Command | Profile | -|-------------------------|-----------| -| `cargo build` | `dev` | -| `cargo build --release` | `release` | -| `cargo test` | `test` | -| `cargo doc` | `doc` | - -Table 14-1: Which profile is used when you run different Cargo commands - -This may be familiar from the output of your builds, which shows the profile -used in the build: +Cargo has two main profiles you should know about: the `dev` profile Cargo uses +when you run `cargo build`, and the `release` profile Cargo uses when you run +`cargo build --release`. The `dev` profile is defined with good defaults for +developing, and likewise the `release` profile has good defaults for release +builds. - - +These names may be familiar from the output of your builds, which shows the +profile used in the build: ``` $ cargo build @@ -64,31 +39,19 @@ $ cargo build --release Finished release [optimized] target(s) in 0.0 secs ``` -The “dev” and “release” notifications here indicate that the compiler is -using different profiles. - - - +The “dev” and “release” notifications here indicate that the compiler is using +different profiles. ### Customizing Release Profiles - - - Cargo has default settings for each of the profiles that apply when there aren’t any `[profile.*]` sections in the project’s *Cargo.toml* file. By adding `[profile.*]` sections for any profile we want to customize, we can choose to override any subset of the default settings. For example, here are the default values for the `opt-level` setting for the `dev` and `release` profiles: +Filename: Cargo.toml + ``` [profile.dev] opt-level = 0 @@ -112,15 +75,6 @@ them in *Cargo.toml*. If we wanted to use optimization level 1 in the development profile, for example, we can add these two lines to our project’s *Cargo.toml*: - - - Filename: Cargo.toml ``` @@ -134,7 +88,7 @@ will use the defaults for the `dev` profile plus our customization to optimizations than the default, but not as many as a release build. For the full list of configuration options and defaults for each profile, see -Cargo’s documentation at *http://doc.rust-lang.org/cargo/*. +Cargo’s documentation at *https://doc.rust-lang.org/cargo/*. ## Publishing a Crate to Crates.io @@ -158,13 +112,9 @@ contents of documentation comments for public API items, intended for programmers interested in knowing how to *use* your crate, as opposed to how your crate is *implemented*. - - - Documentation comments use `///` instead of `//` and support Markdown notation for formatting the text if you’d like. You place documentation comments just -before the item they are documenting. Listing 14-2 shows documentation comments +before the item they are documenting. Listing 14-1 shows documentation comments for an `add_one` function in a crate named `my_crate`: Filename: src/lib.rs @@ -184,11 +134,7 @@ pub fn add_one(x: i32) -> i32 { } ``` -Listing 14-2: A documentation comment for a function - - - +Listing 14-1: A documentation comment for a function Here, we give a description of what the `add_one` function does, then start a section with the heading “Examples”, and code that demonstrates how to use the @@ -201,36 +147,28 @@ For convenience, running `cargo doc --open` will build the HTML for your current crate’s documentation (as well as the documentation for all of your crate’s dependencies) and open the result in a web browser. Navigate to the `add_one` function and you’ll see how the text in the documentation comments -gets rendered, shown here in Figure 14-3: +gets rendered, shown here in Figure 14-2: Rendered HTML documentation for the `add_one` function of `my_crate` -Figure 14-3: HTML documentation for the `add_one` function - - - +Figure 14-2: HTML documentation for the `add_one` function #### Commonly Used Sections -We used the `# Examples` markdown heading in Listing 14-2 to create a section +We used the `# Examples` markdown heading in Listing 14-1 to create a section in the HTML with the title “Examples”. Some other sections that crate authors commonly use in their documentation include: -- Panics: The scenarios in which this function could `panic!`. Callers of this - function who don’t want their programs to panic should make sure that they - don’t call this function in these situations. -- Errors: If this function returns a `Result`, describing the kinds of errors - that might occur and what conditions might cause those errors to be returned - can be helpful to callers so that they can write code to handle the different - kinds of errors in different ways. -- Safety: If this function uses `unsafe` code (which we will discuss in Chapter - 19), there should be a section covering the invariants that this function - expects callers to uphold in order for the code in `unsafe` blocks to - function correctly. +* **Panics**: The scenarios in which this function could `panic!`. Callers of + this function who don’t want their programs to panic should make sure that + they don’t call this function in these situations. +* **Errors**: If this function returns a `Result`, describing the kinds of + errors that might occur and what conditions might cause those errors to be + returned can be helpful to callers so that they can write code to handle the + different kinds of errors in different ways. +* **Safety**: If this function is `unsafe` to call (we will discuss unsafety in + Chapter 19), there should be a section explaining why the function is unsafe + and covering the invariants that this function expects callers to uphold. Most documentation comment sections don’t need all of these sections, but this is a good list to check to remind you of the kinds of things that people @@ -244,7 +182,7 @@ running `cargo test` will run the code examples in your documentation as tests! Nothing is better than documentation with examples. Nothing is worse than examples that don’t actually work because the code has changed since the documentation has been written. Try running `cargo test` with the documentation -for the `add_one` function like in Listing 14-2; you should see a section in +for the `add_one` function like in Listing 14-1; you should see a section in the test results like this: ``` @@ -262,21 +200,16 @@ tests catch that the example and the code are out of sync from one another! #### Commenting Contained Items - - - There’s another style of doc comment, `//!`, that adds documentation to the item that contains the comments, rather than adding documentation to the items following the comments. These are typically used inside the crate root file -(*src/lib.rs*) or inside a module’s root (*mod.rs*) to document the crate or -the module as a whole. +(*src/lib.rs* by convention) or inside a module to document the crate or the +module as a whole. For example, if we wanted to add documentation that described the purpose of the `my_crate` crate that contains the `add_one` function, we can add documentation comments that start with `//!` to the beginning of *src/lib.rs* -as shown in Listing 14-4: +as shown in Listing 14-3: Filename: src/lib.rs @@ -290,7 +223,7 @@ Filename: src/lib.rs // ...snip... ``` -Listing 14-4: Documentation for the `my_crate` crate as a whole +Listing 14-3: Documentation for the `my_crate` crate as a whole Notice there isn’t any code after the last line that begins with `//!`. Because we started the comments with `//!` instead of `///`, we’re documenting the item @@ -300,22 +233,18 @@ is the crate root. These comments describe the entire crate. If we run `cargo doc --open`, we’ll see these comments displayed on the front page of the documentation for `my_crate` above the list of public items in the -crate, as shown in Figure 14-5: +crate, as shown in Figure 14-4: Rendered HTML documentation with a comment for the crate as a whole -Figure 14-5: Rendered documentation for `my_crate` including the comment +Figure 14-4: Rendered documentation for `my_crate` including the comment describing the crate as a whole - - - Documentation comments within items are useful for describing crates and modules especially. Use them to talk about the purpose of the container overall to help users of your crate understand your organization. -### Exporting a Convenient Public API with `pub use` +#### Exporting a Convenient Public API with `pub use` In Chapter 7, we covered how to organize our code into modules with the `mod` keyword, how to make items public with the `pub` keyword, and how to bring @@ -328,11 +257,6 @@ also be annoyed at having to type `use my_crate::some_module::another_module::UsefulType;` rather than `use my_crate::UsefulType;`. - - - The structure of your public API is a major consideration when publishing a crate. People who use your crate are less familiar with the structure than you are, and might have trouble finding the pieces they want to use if the module @@ -345,13 +269,10 @@ to your private structure, using `pub use`. Re-exporting takes a public item in one location and makes it public in another location as if it was defined in the other location instead. - - - For example, say we made a library named `art` for modeling artistic concepts. Within this library is a `kinds` module containing two enums named `PrimaryColor` and `SecondaryColor` and a `utils` module containing a function -named `mix` as shown in Listing 14-6: +named `mix` as shown in Listing 14-5: Filename: src/lib.rs @@ -387,16 +308,16 @@ pub mod utils { } ``` -Listing 14-6: An `art` library with items organized into `kinds` and `utils` +Listing 14-5: An `art` library with items organized into `kinds` and `utils` modules The front page of the documentation for this crate generated by `cargo doc` -would look like Figure 14-7: +would look like Figure 14-6: Rendered documentation for the `art` crate that lists the `kinds` and `utils` modules -Figure 14-7: Front page of the documentation for `art` -that lists the `kinds` and `utils` modules +Figure 14-6: Front page of the documentation for `art` that lists the `kinds` +and `utils` modules Note that the `PrimaryColor` and `SecondaryColor` types aren’t listed on the front page, nor is the `mix` function. We have to click on `kinds` and `utils` @@ -404,7 +325,7 @@ in order to see them. Another crate depending on this library would need `use` statements that import the items from `art` including specifying the module structure that’s currently -defined. Listing 14-8 shows an example of a crate that uses the `PrimaryColor` +defined. Listing 14-7 shows an example of a crate that uses the `PrimaryColor` and `mix` items from the `art` crate: Filename: src/main.rs @@ -422,16 +343,10 @@ fn main() { } ``` -Listing 14-8: A crate using the `art` crate’s items with its internal structure +Listing 14-7: A crate using the `art` crate’s items with its internal structure exported - - - -The author of the code in Listing 14-8 that uses the `art` crate had to figure +The author of the code in Listing 14-7 that uses the `art` crate had to figure out that `PrimaryColor` is in the `kinds` module and `mix` is in the `utils` module. The module structure of the `art` crate is more relevant to developers working on the `art` crate than developers using the `art` crate. The internal @@ -442,8 +357,8 @@ confusion in having to figure out where to look and inconvenience in having to specify the module names in the `use` statements. To remove the internal organization from the public API, we can take the `art` -crate code from Listing 14-6 and add `pub use` statements to re-export the -items at the top level, as shown in Listing 14-9: +crate code from Listing 14-5 and add `pub use` statements to re-export the +items at the top level, as shown in Listing 14-8: Filename: src/lib.rs @@ -465,22 +380,20 @@ pub mod utils { } ``` -Listing 14-9: Adding `pub use` statements to re-export items - - +Listing 14-8: Adding `pub use` statements to re-export items The API documentation generated with `cargo doc` for this crate will now list -and link re-exports on the front page as shown in Figure 14-10, which makes +and link re-exports on the front page as shown in Figure 14-9, which makes these types easier to find. Rendered documentation for the `art` crate with the re-exports on the front page -Figure 14-10: Front page of the documentation for `art` that lists the +Figure 14-9: Front page of the documentation for `art` that lists the re-exports Users of the `art` crate can still see and choose to use the internal structure -as in Listing 14-8, or they can use the more convenient structure from Listing -14-9, as shown in Listing 14-11: +as in Listing 14-7, or they can use the more convenient structure from Listing +14-8, as shown in Listing 14-10: Filename: src/main.rs @@ -495,9 +408,7 @@ fn main() { } ``` -Listing 14-11: A program using the re-exported items from the `art` crate - - +Listing 14-10: A program using the re-exported items from the `art` crate In cases where there are many nested modules, re-exporting the types at the top level with `pub use` can make a big difference in the experience of people who @@ -514,7 +425,7 @@ structure differs from their public API. Before you can publish any crates, you need to create an account on crates.io and get an API token. To do so, visit the home page at *https://crates.io* and -log in via a GitHub account---the GitHub account is a requirement for now, but +log in via a GitHub account—the GitHub account is a requirement for now, but the site may support other ways of creating an account in the future. Once you’re logged in, visit your account settings at *https://crates.io/me* and retrieve your API key. Then run the `cargo login` command with your API key, @@ -525,7 +436,7 @@ $ cargo login abcdefghijklmnopqrstuvwxyz012345 ``` This command will inform Cargo of your API token and store it locally in -*~/.cargo/credentials*. Note that this token is a **secret** and should not be +*~/.cargo/credentials*. Note that this token is a *secret* and should not be shared with anyone else. If it is shared with anyone for any reason, you should revoke it and generate a new token on Crates.io. @@ -535,9 +446,6 @@ Now you have an account, and let’s say you already have a crate you want to publish. Before publishing, you’ll need to add some metadata to your crate by adding it to the `[package]` section of the crate’s *Cargo.toml*. - - - Your crate will first need a unique name. While you’re working on a crate locally, you may name a crate whatever you’d like. However, crate names on Crates.io are allocated on a first-come-first-serve basis. Once a crate name is @@ -546,6 +454,8 @@ you’d like to use on the site to find out if it has been taken. If it hasn’t edit the name in *Cargo.toml* under `[package]` to have the name you want to use for publishing like so: +Filename: Cargo.toml + ``` [package] name = "guessing_game" @@ -575,16 +485,14 @@ Package Data Exchange (SPDX) at *http://spdx.org/licenses/* lists the identifiers you can use for this value. For example, to specify that you’ve licensed your crate using the MIT License, add the `MIT` identifier: +Filename: Cargo.toml + ``` [package] name = "guessing_game" license = "MIT" ``` - - - If you want to use a license that doesn’t appear in the SPDX, you need to place the text of that license in a file, include the file in your project, then use `license-file` to specify the name of that file instead of using the `license` @@ -592,7 +500,7 @@ key. Guidance on which license is right for your project is out of scope for this book. Many people in the Rust community choose to license their projects in the -same way as Rust itself, with a dual license of `MIT/Apache-2.0`---this +same way as Rust itself, with a dual license of `MIT/Apache-2.0`—this demonstrates that you can also specify multiple license identifiers separated by a slash. @@ -600,6 +508,8 @@ So, with a unique name, the version, and author details that `cargo new` added when you created the crate, your description, and the license you chose added, the *Cargo.toml* for a project that’s ready to publish might look like this: +Filename: Cargo.toml + ``` [package] name = "guessing_game" @@ -611,7 +521,7 @@ license = "MIT/Apache-2.0" [dependencies] ``` -Cargo's documentation at *http://doc.rust-lang.org/cargo/* describes other +Cargo’s documentation at *https://doc.rust-lang.org/cargo/* describes other metadata you can specify to ensure your crate can be discovered and used more easily! @@ -648,10 +558,9 @@ anyone can easily add your crate as a dependency of their project. When you’ve made changes to your crate and are ready to release a new version, you change the `version` value specified in your *Cargo.toml* and republish. -Use the Semantic Versioning rules at *http://semver.org/* to decide what an appropriate next -version number is based on the kinds of changes you’ve made. Then run `cargo -publish` to upload the new version. - +Use the Semantic Versioning rules at *http://semver.org/* to decide what an +appropriate next version number is based on the kinds of changes you’ve made. +Then run `cargo publish` to upload the new version. ### Removing Versions from Crates.io with `cargo yank` @@ -660,11 +569,11 @@ projects from adding them as a new dependency. This is useful when a version of a crate ends up being broken for one reason or another. For situations such as this, Cargo supports *yanking* a version of a crate. -Yanking a version prevents new projects from starting to depend on that -version while allowing all existing projects that depend on it to continue to -download and depend on that version. Essentially, a yank means that all -projects with a *Cargo.lock* will not break, while any future *Cargo.lock* -files generated will not use the yanked version. +Yanking a version prevents new projects from starting to depend on that version +while allowing all existing projects that depend on it to continue to download +and depend on that version. Essentially, a yank means that all projects with a +*Cargo.lock* will not break, while any future *Cargo.lock* files generated will +not use the yanked version. To yank a version of a crate, run `cargo yank` and specify which version you want to yank: @@ -711,6 +620,8 @@ We need to modify the binary package’s *Cargo.toml* and add a `[workspace]` section to tell Cargo the `adder` package is a workspace. Add this at the bottom of the file: +Filename: Cargo.toml + ``` [workspace] ``` @@ -719,26 +630,20 @@ Like many Cargo features, workspaces support convention over configuration: we don’t need to add anything more than this to *Cargo.toml* to define our workspace as long as we follow the convention. - - - ### Specifying Workspace Dependencies -The workspace convention says any crates in any subdirectories that the -top-level crate depends on are part of the workspace. Any crate, whether in a -workspace or not, can specify that it has a dependency on a crate in a local -directory by using the `path` attribute on the dependency specification in -*Cargo.toml*. If a crate has the `[workspace]` key and we specify path -dependencies where the paths are subdirectories of the crate’s directory, those -dependent crates will be considered part of the workspace. Let’s specify in the -*Cargo.toml* for the top-level `adder` crate that it will have a dependency on -an `add-one` crate that will be in the `add-one` subdirectory, by changing -*Cargo.toml* to look like this: +By default, Cargo will include all transitive path dependencies. A *path +dependency* is when any crate, whether in a workspace or not, specifies that it +has a dependency on a crate in a local directory by using the `path` attribute +on the dependency specification in *Cargo.toml*. If a crate has the +`[workspace]` key, or if the crate is itself part of a workspace, and we +specify path dependencies where the paths are subdirectories of the crate’s +directory, those dependent crates will be considered part of the workspace. +Let’s specify in the *Cargo.toml* for the top-level `adder` crate that it will +have a dependency on an `add-one` crate that will be in the `add-one` +subdirectory, by changing *Cargo.toml* to look like this: - - +Filename: Cargo.toml ``` [dependencies] @@ -751,10 +656,6 @@ and are assumed to come from Crates.io. ### Creating the Second Crate in the Workspace - - - Next, while in the `adder` directory, generate an `add-one` crate: ``` @@ -784,12 +685,11 @@ pub fn add_one(x: i32) -> i32 { } ``` - - - Open up *src/main.rs* for `adder` and add an `extern crate` line at the top of the file to bring the new `add-one` library crate into scope. Then change the -`main` function to call the `add_one` function, as in Listing 14-12: +`main` function to call the `add_one` function, as in Listing 14-11: + +Filename: src/main.rs ``` extern crate add_one; @@ -800,7 +700,7 @@ fn main() { } ``` -Listing 14-12: Using the `add-one` library crate from the `adder` crate +Listing 14-11: Using the `add-one` library crate from the `adder` crate Let’s build the `adder` crate by running `cargo build` in the *adder* directory! @@ -836,14 +736,6 @@ its own *target* directory. By sharing one *target* directory, the crates in the workspace can avoid rebuilding the other crates in the workspace more than necessary. - - - #### Depending on an External Crate in a Workspace Also notice the workspace only has one *Cargo.lock*, rather than having a @@ -1001,9 +893,6 @@ does not have an `--all` flag or a `-p` flag, so it is necessary to change to each crate’s directory and run `cargo publish` on each crate in the workspace in order to publish them. - - - Now try adding an `add-two` crate to this workspace in a similar way as the `add-one` crate for some more practice! @@ -1024,13 +913,10 @@ target that isn’t runnable on its own but is suitable for including within other programs. Usually, crates have information in the *README* file about whether a crate is a library, has a binary target, or both. - - - All binaries from `cargo install` are put into the installation root’s *bin* folder. If you installed Rust using *rustup.rs* and don’t have any custom -configurations, this will be `$HOME/.cargo/bin`. Add that directory to your -`$PATH` to be able to run programs you’ve gotten through `cargo install`. +configurations, this will be `$HOME/.cargo/bin`. Ensure that directory is in +your `$PATH` to be able to run programs you’ve gotten through `cargo install`. For example, we mentioned in Chapter 12 that there’s a Rust implementation of the `grep` tool for searching files called `ripgrep`. If we want to install diff --git a/src/doc/book/second-edition/nostarch/chapter15.md b/src/doc/book/second-edition/nostarch/chapter15.md index 57fbc43c13..cbc8c67f73 100644 --- a/src/doc/book/second-edition/nostarch/chapter15.md +++ b/src/doc/book/second-edition/nostarch/chapter15.md @@ -3,52 +3,141 @@ # Smart Pointers -*Pointer* is a generic programming term for something that refers to a location -that stores some other data. We learned about Rust’s references in Chapter 4; -they’re a plain sort of pointer indicated by the `&` symbol and borrow the -value that they point to. *Smart pointers* are data structures that act like a -pointer, but also have additional metadata and capabilities, such as reference -counting. The smart pointer pattern originated in C++. In Rust, an additional -difference between plain references and smart pointers is that references are a -kind of pointer that only borrow data; by contrast, in many cases, smart -pointers *own* the data that they point to. - -We’ve actually already encountered a few smart pointers in this book, even -though we didn’t call them that by name at the time. For example, in a certain -sense, `String` and `Vec` from Chapter 8 are both smart pointers. They own -some memory and allow you to manipulate it, and have metadata (like their -capacity) and extra capabilities or guarantees (`String` data will always be -valid UTF-8). The characteristics that distinguish a smart pointer from an -ordinary struct are that smart pointers implement the `Deref` and `Drop` -traits, and in this chapter we’ll be discussing both of those traits and why -they’re important to smart pointers. +A *pointer* is a general concept for a variable that contains an address in +memory. This address refers to, or “points at”, some other data. The most +common kind of pointer in Rust is a *reference*, which we learned about in +Chapter 4. References are indicated by the `&` symbol and borrow the value that +they point to. They don’t have any special abilities other than referring to +data. They also don’t have any overhead, so they’re used the most often. + +*Smart pointers*, on the other hand, are data structures that act like a +pointer, but they also have additional metadata and capabilities. The concept +of smart pointers isn’t unique to Rust; it originated in C++ and exists in +other languages as well. The different smart pointers defined in Rust’s +standard library provide extra functionality beyond what references provide. +One example that we’ll explore in this chapter is the *reference counting* +smart pointer type, which enables you to have multiple owners of data. The +reference counting smart pointer keeps track of how many owners there are, and +when there aren’t any remaining, the smart pointer takes care of cleaning up +the data. + + + + + + + +In Rust, where we have the concept of ownership and borrowing, an additional +difference between references and smart pointers is that references are a kind +of pointer that only borrow data; by contrast, in many cases, smart pointers +*own* the data that they point to. + +We’ve actually already encountered a few smart pointers in this book, such as +`String` and `Vec` from Chapter 8, though we didn’t call them smart pointers +at the time. Both these types count as smart pointers because they own some +memory and allow you to manipulate it. They also have metadata (such as their +capacity) and extra capabilities or guarantees (such as `String` ensuring its +data will always be valid UTF-8). + + + + +Smart pointers are usually implemented using structs. The characteristics that +distinguish a smart pointer from an ordinary struct are that smart pointers +implement the `Deref` and `Drop` traits. The `Deref` trait allows an instance +of the smart pointer struct to behave like a reference so that we can write +code that works with either references or smart pointers. The `Drop` trait +allows us to customize the code that gets run when an instance of the smart +pointer goes out of scope. In this chapter, we’ll be discussing both of those +traits and demonstrating why they’re important to smart pointers. Given that the smart pointer pattern is a general design pattern used frequently in Rust, this chapter won’t cover every smart pointer that exists. -Many libraries have their own and you may write some yourself. The ones we -cover here are the most common ones from the standard library: - -* `Box`, for allocating values on the heap -* `Rc`, a reference counted type so data can have multiple owners -* `RefCell`, which isn’t a smart pointer itself, but manages access to the - smart pointers `Ref` and `RefMut` to enforce the borrowing rules at runtime - instead of compile time - -Along the way, we’ll also cover: - -* The *interior mutability* pattern where an immutable type exposes an API for - mutating an interior value, and the borrowing rules apply at runtime instead - of compile time -* Reference cycles, how they can leak memory, and how to prevent them +Many libraries have their own smart pointers and you can even write some +yourself. We’ll just cover the most common smart pointers from the standard +library: + + + + +* `Box` for allocating values on the heap +* `Rc`, a reference counted type that enables multiple ownership +* `Ref` and `RefMut`, accessed through `RefCell`, a type that enforces + the borrowing rules at runtime instead of compile time + + + + +Along the way, we’ll cover the *interior mutability* pattern where an immutable +type exposes an API for mutating an interior value. We’ll also discuss +*reference cycles*, how they can leak memory, and how to prevent them. Let’s dive in! ## `Box` Points to Data on the Heap and Has a Known Size The most straightforward smart pointer is a *box*, whose type is written -`Box`. Boxes allow you to put a single value on the heap (we talked about -the stack vs. the heap in Chapter 4). Listing 15-1 shows how to use a box to -store an `i32` on the heap: +`Box`. Boxes allow you to store data on the heap rather than the stack. What +remains on the stack is the pointer to the heap data. Refer back to Chapter 4 +if you’d like to review the difference between the stack and the heap. + + + + +Boxes don’t have performance overhead other than their data being on the heap +instead of on the stack, but they don’t have a lot of extra abilities either. +They’re most often used in these situations: + +- When you have a type whose size can’t be known at compile time, and you want + to use a value of that type in a context that needs to know an exact size +- When you have a large amount of data and you want to transfer ownership but + ensure the data won’t be copied when you do so +- When you want to own a value and only care that it’s a type that implements a + particular trait rather than knowing the concrete type itself + +We’re going to demonstrate the first case in the rest of this section. To +elaborate on the other two situations a bit more: in the second case, +transferring ownership of a large amount of data can take a long time because +the data gets copied around on the stack. To improve performance in this +situation, we can store the large amount of data on the heap in a box. Then, +only the small amount of pointer data is copied around on the stack, and the +data stays in one place on the heap. The third case is known as a *trait +object*, and Chapter 17 has an entire section devoted just to that topic. So +know that what you learn here will be applied again in Chapter 17! + +### Using a `Box` to Store Data on the Heap + +Before we get into a use case for `Box`, let’s get familiar with the syntax +and how to interact with values stored within a `Box`. + +Listing 15-1 shows how to use a box to store an `i32` on the heap: Filename: src/main.rs @@ -61,28 +150,104 @@ fn main() { Listing 15-1: Storing an `i32` value on the heap using a box -This will print `b = 5`. In this case, we can access the data in the box in a -similar way as we would if this data was on the stack. Just like any value that -has ownership of data, when a box goes out of scope like `b` does at the end of -`main`, it will be deallocated. The deallocation happens for both the box -(stored on the stack) and the data it points to (stored on the heap). +We define the variable `b` to have the value of a `Box` that points to the +value `5`, which is allocated on the heap. This program will print `b = 5`; in +this case, we can access the data in the box in a similar way as we would if +this data was on the stack. Just like any value that has ownership of data, +when a box goes out of scope like `b` does at the end of `main`, it will be +deallocated. The deallocation happens for both the box (stored on the stack) +and the data it points to (stored on the heap). Putting a single value on the heap isn’t very useful, so you won’t use boxes by -themselves in the way that Listing 15-1 does very often. A time when boxes are -useful is when you want to ensure that your type has a known size. For -example, consider Listing 15-2, which contains an enum definition for a *cons -list*, a type of data structure that comes from functional programming. - -A cons list is a list where each item contains a value and the next item until -the end of the list, which is signified by a value called `Nil`. Note that we -aren’t introducing the idea of “nil” or “null” that we discussed in Chapter 6, -this is just a regular enum variant name we’re using because it’s the canonical -name to use when describing the cons list data structure. Cons lists aren’t -used very often in Rust, `Vec` is a better choice most of the time, but -implementing this data structure is useful as an example. - -Here’s our first try at defining a cons list as an enum; note that this won’t -compile quite yet: +themselves in the way that Listing 15-1 does very often. Having values like a +single `i32` on the stack, where they’re stored by default is more appropriate +in the majority of cases. Let’s get into a case where boxes allow us to define +types that we wouldn’t be allowed to if we didn’t have boxes. + + + + +### Boxes Enable Recursive Types + + + + + + +Rust needs to know at compile time how much space a type takes up. One kind of +type whose size can’t be known at compile time is a *recursive type* where a +value can have as part of itself another value of the same type. This nesting +of values could theoretically continue infinitely, so Rust doesn’t know how +much space a value of a recursive type needs. Boxes have a known size, however, +so by inserting a box in a recursive type definition, we are allowed to have +recursive types. + +Let’s explore the *cons list*, a data type common in functional programming +languages, to illustrate this concept. The cons list type we’re going to define +is straightforward except for the recursion, so the concepts in this example +will be useful any time you get into more complex situations involving +recursive types. + + + + +A cons list is a list where each item in the list contains two things: the +value of the current item and the next item. The last item in the list contains +only a value called `Nil` without a next item. + +> #### More Information About the Cons List +> +> A *cons list* is a data structure that comes from the Lisp programming +> language and its dialects. In Lisp, the `cons` function (short for “construct +> function”) constructs a new list from its two arguments, which usually are a +> single value and another list. +> +> The cons function concept has made its way into more general functional +> programming jargon; “to cons x onto y” informally means to construct a new +> container instance by putting the element x at the start of this new +> container, followed by the container y. +> +> A cons list is produced by recursively calling the `cons` function. +> The canonical name to denote the base case of the recursion is `Nil`, which +> announces the end of the list. Note that this is not the same as the “null” +> or “nil” concept from Chapter 6, which is an invalid or absent value. + +Note that while functional programming languages use cons lists frequently, +this isn’t a commonly used data structure in Rust. Most of the time when you +have a list of items in Rust, `Vec` is a better choice. Other, more complex +recursive data types *are* useful in various situations in Rust, but by +starting with the cons list, we can explore how boxes let us define a recursive +data type without much distraction. + + + + +Listing 15-2 contains an enum definition for a cons list. Note that this +won’t compile quite yet because this is type doesn’t have a known size, which +we’ll demonstrate: + + + Filename: src/main.rs @@ -96,12 +261,21 @@ enum List { Listing 15-2: The first attempt of defining an enum to represent a cons list data structure of `i32` values -We’re choosing to implement a cons list that only holds `i32` values, but we -could have chosen to implement it using generics as we discussed in Chapter 10 -to define a cons list concept independent of the type of value stored in the -cons list. +> Note: We’re choosing to implement a cons list that only holds `i32` values +> for the purposes of this example. We could have implemented it using +> generics, as we discussed in Chapter 10, in order to define a cons list type +> that could store values of any type. + + + -Using a cons list to store the list `1, 2, 3` would look like this: +Using our cons list type to store the list `1, 2, 3` would look like the code +in Listing 15-3: + +Filename: src/main.rs ``` use List::{Cons, Nil}; @@ -111,35 +285,47 @@ fn main() { } ``` +Listing 15-3: Using the `List` enum to store the list `1, 2, 3` + The first `Cons` value holds `1` and another `List` value. This `List` value is another `Cons` value that holds `2` and another `List` value. This is one more `Cons` value that holds `3` and a `List` value, which is finally `Nil`, the non-recursive variant that signals the end of the list. -If we try to compile the above code, we get the error shown in Listing 15-3: +If we try to compile the above code, we get the error shown in Listing 15-4: ``` error[E0072]: recursive type `List` has infinite size --> | -1 | enum List { - | _^ starting here... -2 | | Cons(i32, List), -3 | | Nil, -4 | | } - | |_^ ...ending here: recursive type has infinite size +1 | enum List { + | ^^^^^^^^^ recursive type has infinite size +2 | Cons(i32, List), + | --------------- recursive without indirection | = help: insert indirection (e.g., a `Box`, `Rc`, or `&`) at some point to make `List` representable ``` -Listing 15-3: The error we get when attempting to define a recursive enum +Listing 15-4: The error we get when attempting to define a recursive enum + + + + +The error says this type ‘has infinite size’. The reason is the way we’ve +defined `List` is with a variant that is recursive: it holds another value of +itself directly. This means Rust can’t figure out how much space it needs in +order to store a `List` value. Let’s break this down a bit: first let’s look at +how Rust decides how much space it needs to store a value of a non-recursive +type. + +### Computing the Size of a Non-Recursive Type -The error says this type ‘has infinite size’. Why is that? It’s because we’ve -defined `List` to have a variant that is recursive: it holds another value of -itself. This means Rust can’t figure out how much space it needs in order to -store a `List` value. Let’s break this down a bit: first let’s look at how Rust -decides how much space it needs to store a value of a non-recursive type. Recall the `Message` enum we defined in Listing 6-2 when we discussed enum definitions in Chapter 6: @@ -152,28 +338,30 @@ enum Message { } ``` -When Rust needs to know how much space to allocate for a `Message` value, it -can go through each of the variants and see that `Message::Quit` does not need -any space, `Message::Move` needs enough space to store two `i32` values, and so -forth. Therefore, the most space a `Message` value will need is the space it -would take to store the largest of its variants. +To determine how much space to allocate for a `Message` value, Rust goes +through each of the variants to see which variant needs the most space. Rust +sees that `Message::Quit` doesn’t need any space, `Message::Move` needs enough +space to store two `i32` values, and so forth. Since only one variant will end +up being used, the most space a `Message` value will need is the space it would +take to store the largest of its variants. -Contrast this to what happens when the Rust compiler looks at a recursive type -like `List` in Listing 15-2. The compiler tries to figure out how much memory -is needed to store value of `List`, and starts by looking at the `Cons` +Contrast this to what happens when Rust tries to determine how much space a +recursive type like the `List` enum in Listing 15-2 needs. The compiler starts +by looking at the `Cons` variant, which holds a value of type `i32` and a value +of type `List`. Therefore, `Cons` needs an amount of space equal to the size of +an `i32` plus the size of a `List`. To figure out how much memory the `List` +type needs, the compiler looks at the variants, starting with the `Cons` variant. The `Cons` variant holds a value of type `i32` and a value of type -`List`, so `Cons` needs an amount of space equal to the size of an `i32` plus -the size of a `List`. To figure out how much memory a `List` needs, it looks at -its variants, starting with the `Cons` variant. The `Cons` variant holds a -value of type `i32` and a value of type `List`, and this continues infinitely, -as shown in Figure 15-4. +`List`, and this continues infinitely, as shown in Figure 15-5. An infinite Cons list -Figure 15-4: An infinite `List` consisting of infinite `Cons` variants +Figure 15-5: An infinite `List` consisting of infinite `Cons` variants + +### Using `Box` to Get a Recursive Type with a Known Size Rust can’t figure out how much space to allocate for recursively defined types, -so the compiler gives the error in Listing 15-3. The error did include this +so the compiler gives the error in Listing 15-4. The error does include this helpful suggestion: ``` @@ -181,13 +369,23 @@ helpful suggestion: make `List` representable ``` -Because a `Box` is a pointer, we always know how much space it needs: a -pointer takes up a `usize` amount of space. The value of the `usize` will be -the address of the heap data. The heap data can be any size, but the address to -the start of that heap data will always fit in a `usize`. So if we change our -definition from Listing 15-2 to look like the definition here in Listing 15-5, -and change `main` to use `Box::new` for the values inside the `Cons` variants -like so: +In this suggestion, “indirection” means that instead of storing a value +directly, we’re going to store the value indirectly by storing a pointer to +the value instead. + +Because a `Box` is a pointer, Rust always knows how much space a `Box` +needs: a pointer’s size doesn’t change based on the amount of data it’s +pointing to. + +So we can put a `Box` inside the `Cons` variant instead of another `List` value +directly. The `Box` will point to the next `List` value that will be on the +heap, rather than inside the `Cons` variant. Conceptually, we still have a list +created by lists “holding” other lists, but the way this concept is implemented +is now more like the items being next to one another rather than inside one +another. + +We can change the definition of the `List` enum from Listing 15-2 and the usage +of the `List` from Listing 15-3 to the code in Listing 15-6, which will compile: Filename: src/main.rs @@ -207,253 +405,540 @@ fn main() { } ``` -Listing 15-5: Definition of `List` that uses `Box` in order to have a -known size +Listing 15-6: Definition of `List` that uses `Box` in order to have a known +size -The compiler will be able to figure out the size it needs to store a `List` -value. Rust will look at `List`, and again start by looking at the `Cons` -variant. The `Cons` variant will need the size of `i32` plus the space to store -a `usize`, since a box always has the size of a `usize`, no matter what it’s -pointing to. Then Rust looks at the `Nil` variant, which does not store a -value, so `Nil` doesn’t need any space. We’ve broken the infinite, recursive -chain by adding in a box. Figure 15-6 shows what the `Cons` variant looks like -now: +The `Cons` variant will need the size of an `i32` plus the space to store the +box’s pointer data. The `Nil` variant stores no values, so it needs less space +than the `Cons` variant. We now know that any `List` value will take up the +size of an `i32` plus the size of a box’s pointer data. By using a box, we’ve +broken the infinite, recursive chain so the compiler is able to figure out the +size it needs to store a `List` value. Figure 15-7 shows what the `Cons` +variant looks like now: A finite Cons list -Figure 15-6: A `List` that is not infinitely sized since `Cons` holds a `Box` +Figure 15-7: A `List` that is not infinitely sized since `Cons` holds a `Box` + + + + +Boxes only provide the indirection and heap allocation; they don’t have any +other special abilities like those we’ll see with the other smart pointer +types. They also don’t have any performance overhead that these special +abilities incur, so they can be useful in cases like the cons list where the +indirection is the only feature we need. We’ll look at more use cases for boxes +in Chapter 17, too. + +The `Box` type is a smart pointer because it implements the `Deref` trait, +which allows `Box` values to be treated like references. When a `Box` +value goes out of scope, the heap data that the box is pointing to is cleaned +up as well because of the `Box` type’s `Drop` trait implementation. Let’s +explore these two types in more detail; these traits are going to be even more +important to the functionality provided by the other smart pointer types we’ll +be discussing in the rest of this chapter. + + + + +## Treating Smart Pointers like Regular References with the `Deref` Trait + +Implementing `Deref` trait allows us to customize the behavior of the +*dereference operator* `*`(as opposed to the multiplication or glob operator). +By implementing `Deref` in such a way that a smart pointer can be treated like +a regular reference, we can write code that operates on references and use that +code with smart pointers too. + + + + + + + +Let’s first take a look at how `*` works with regular references, then try and +define our own type like `Box` and see why `*` doesn’t work like a +reference. We’ll explore how implementing the `Deref` trait makes it possible +for smart pointers to work in a similar way as references. Finally, we’ll look +at the *deref coercion* feature of Rust and how that lets us work with either +references or smart pointers. + +### Following the Pointer to the Value with `*` + + + + + + + +A regular reference is a type of pointer, and one way to think of a pointer is +that it’s an arrow to a value stored somewhere else. In Listing 15-8, let’s +create a reference to an `i32` value then use the dereference operator to +follow the reference to the data: + + + + + + + +Filename: src/main.rs -This is the main area where boxes are useful: breaking up an infinite data -structure so that the compiler can know what size it is. We’ll look at another -case where Rust has data of unknown size in Chapter 17 when we discuss trait -objects. +``` +fn main() { + let x = 5; + let y = &x; -Even though you won’t be using boxes very often, they are a good way to -understand the smart pointer pattern. Two of the aspects of `Box` that are -commonly used with smart pointers are its implementations of the `Deref` trait -and the `Drop` trait. Let’s investigate how these traits work and how smart -pointers use them. + assert_eq!(5, x); + assert_eq!(5, *y); +} +``` -## The `Deref` Trait Allows Access to the Data Through a Reference +Listing 15-8: Using the dereference operator to follow a reference to an `i32` +value -The first important smart pointer-related trait is `Deref`, which allows us to -override `*`, the dereference operator (as opposed to the multiplication -operator or the glob operator). Overriding `*` for smart pointers makes -accessing the data behind the smart pointer convenient, and we’ll talk about -what we mean by convenient when we get to deref coercions later in this section. +The variable `x` holds an `i32` value, `5`. We set `y` equal to a reference to +`x`. We can assert that `x` is equal to `5`. However, if we want to make an +assertion about the value in `y`, we have to use `*y` to follow the reference +to the value that the reference is pointing to (hence *de-reference*). Once we +de-reference `y`, we have access to the integer value `y` is pointing to that +we can compare with `5`. -We briefly mentioned the dereference operator in Chapter 8, in the hash map -section titled “Update a Value Based on the Old Value”. We had a mutable -reference, and we wanted to change the value that the reference was pointing -to. In order to do that, first we had to dereference the reference. Here’s -another example using references to `i32` values: +If we try to write `assert_eq!(5, y);` instead, we’ll get this compilation +error: ``` -let mut x = 5; -{ - let y = &mut x; +error[E0277]: the trait bound `{integer}: std::cmp::PartialEq<&{integer}>` is +not satisfied + --> :5:19 + | +5 | if ! ( * left_val == * right_val ) { + | ^^ can't compare `{integer}` with `&{integer}` + | + = help: the trait `std::cmp::PartialEq<&{integer}>` is not implemented for + `{integer}` +``` - *y += 1 -} +Comparing a reference to a number with a number isn’t allowed because they’re +different types. We have to use `*` to follow the reference to the value it’s +pointing to. + +### Using `Box` Like a Reference + +We can rewrite the code in Listing 15-8 to use a `Box` instead of a +reference, and the de-reference operator will work the same way as shown in +Listing 15-9: + +Filename: src/main.rs -assert_eq!(6, x); ``` +fn main() { + let x = 5; + let y = Box::new(x); -We use `*y` to access the data that the mutable reference in `y` refers to, -rather than the mutable reference itself. We can then modify that data, in this -case by adding 1. + assert_eq!(5, x); + assert_eq!(5, *y); +} +``` -With references that aren’t smart pointers, there’s only one value that the -reference is pointing to, so the dereference operation is straightforward. -Smart pointers can also store metadata about the pointer or the data. When -dereferencing a smart pointer, we only want the data, not the metadata, since -dereferencing a regular reference only gives us data and not metadata. We want -to be able to use smart pointers in the same places that we can use regular -references. To enable that, we can override the behavior of the `*` operator by -implementing the `Deref` trait. +Listing 15-9: Using the dereference operator on a `Box` -Listing 15-7 has an example of overriding `*` using `Deref` on a struct we’ve -defined to hold mp3 data and metadata. `Mp3` is, in a sense, a smart pointer: -it owns the `Vec` data containing the audio. In addition, it holds some -optional metadata, in this case the artist and title of the song in the audio -data. We want to be able to conveniently access the audio data, not the -metadata, so we implement the `Deref` trait to return the audio data. -Implementing the `Deref` trait requires implementing one method named `deref` -that borrows `self` and returns the inner data: +The only part of Listing 15-8 that we changed was to set `y` to be an instance +of a box pointing to the value in `x` rather than a reference pointing to the +value of `x`. In the last assertion, we can use the dereference operator to +follow the box’s pointer in the same way that we did when `y` was a reference. +Let’s explore what is special about `Box` that enables us to do this by +defining our own box type. + +### Defining Our Own Smart Pointer + +Let’s build a smart pointer similar to the `Box` type that the standard +library has provided for us, in order to experience that smart pointers don’t +behave like references by default. Then we’ll learn about how to add the +ability to use the dereference operator. + +`Box` is ultimately defined as a tuple struct with one element, so Listing +15-10 defines a `MyBox` type in the same way. We’ll also define a `new` +function to match the `new` function defined on `Box`: Filename: src/main.rs ``` -use std::ops::Deref; +struct MyBox(T); -struct Mp3 { - audio: Vec, - artist: Option, - title: Option, +impl MyBox { + fn new(x: T) -> MyBox { + MyBox(x) + } } +``` -impl Deref for Mp3 { - type Target = Vec; +Listing 15-10: Defining a `MyBox` type - fn deref(&self) -> &Vec { - &self.audio - } -} +We define a struct named `MyBox` and declare a generic parameter `T`, since we +want our type to be able to hold values of any type. `MyBox` is a tuple struct +with one element of type `T`. The `MyBox::new` function takes one parameter of +type `T` and returns a `MyBox` instance that holds the value passed in. + +Let’s try adding the code from Listing 15-9 to the code in Listing 15-10 and +changing `main` to use the `MyBox` type we’ve defined instead of `Box`. +The code in Listing 15-11 won’t compile because Rust doesn’t know how to +dereference `MyBox`: + +Filename: src/main.rs +``` fn main() { - let my_favorite_song = Mp3 { - // we would read the actual audio data from an mp3 file - audio: vec![1, 2, 3], - artist: Some(String::from("Nirvana")), - title: Some(String::from("Smells Like Teen Spirit")), - }; - - assert_eq!(vec![1, 2, 3], *my_favorite_song); + let x = 5; + let y = MyBox::new(x); + + assert_eq!(5, x); + assert_eq!(5, *y); +} +``` + +Listing 15-11: Attempting to use `MyBox` in the same way we were able to use +references and `Box` + +The compilation error we get is: + +``` +error: type `MyBox<{integer}>` cannot be dereferenced + --> src/main.rs:14:19 + | +14 | assert_eq!(5, *y); + | ^^ +``` + +Our `MyBox` type can’t be dereferenced because we haven’t implemented that +ability on our type. To enable dereferencing with the `*` operator, we can +implement the `Deref` trait. + +### Implementing the `Deref` Trait Defines How To Treat a Type Like a Reference + +As we discussed in Chapter 10, in order to implement a trait, we need to +provide implementations for the trait’s required methods. The `Deref` trait, +provided by the standard library, requires implementing one method named +`deref` that borrows `self` and returns a reference to the inner data. Listing +15-12 contains an implementation of `Deref` to add to the definition of `MyBox`: + +Filename: src/main.rs + +``` +use std::ops::Deref; + +# struct MyBox(T); +impl Deref for MyBox { + type Target = T; + + fn deref(&self) -> &T { + &self.0 + } } ``` -Listing 15-7: An implementation of the `Deref` trait on a struct that holds mp3 -file data and metadata +Listing 15-12: Implementing `Deref` on `MyBox` -Most of this should look familiar: a struct, a trait implementation, and a -main function that creates an instance of the struct. There is one part we -haven’t explained thoroughly yet: similarly to Chapter 13 when we looked at the -Iterator trait with the `type Item`, the `type Target = T;` syntax is defining -an associated type, which is covered in more detail in Chapter 19. Don’t worry -about that part of the example too much; it is a slightly different way of -declaring a generic parameter. +The `type Target = T;` syntax defines an associated type for this trait to use. +Associated types are a slightly different way of declaring a generic parameter +that you don’t need to worry about too much for now; we’ll cover it in more +detail in Chapter 19. -In the `assert_eq!`, we’re verifying that `vec![1, 2, 3]` is the result we get -when dereferencing the `Mp3` instance with `*my_favorite_song`, which is what -happens since we implemented the `deref` method to return the audio data. If -we hadn’t implemented the `Deref` trait for `Mp3`, Rust wouldn’t compile the -code `*my_favorite_song`: we’d get an error saying type `Mp3` cannot be -dereferenced. + + -The reason this code works is that what the `*` operator is doing behind -the scenes when we call `*my_favorite_song` is: +We filled in the body of the `deref` method with `&self.0` so that `deref` +returns a reference to the value we want to access with the `*` operator. The +`main` function from Listing 15-11 that calls `*` on the `MyBox` value now +compiles and the assertions pass! + +Without the `Deref` trait, the compiler can only dereference `&` references. +The `Deref` trait’s `deref` method gives the compiler the ability to take a +value of any type that implements `Deref` and call the `deref` method in order +to get a `&` reference that it knows how to dereference. + +When we typed `*y` in Listing 15-11, what Rust actually ran behind the scenes +was this code: ``` -*(my_favorite_song.deref()) +*(y.deref()) ``` -This calls the `deref` method on `my_favorite_song`, which borrows -`my_favorite_song` and returns a reference to `my_favorite_song.audio`, since -that’s what we defined `deref` to do in Listing 15-5. `*` on references is -defined to just follow the reference and return the data, so the expansion of -`*` doesn’t recurse for the outer `*`. So we end up with data of type -`Vec`, which matches the `vec![1, 2, 3]` in the `assert_eq!` in Listing -15-5. + + + +Rust substitutes the `*` operator with a call to the `deref` method and then a +plain dereference so that we don’t have to think about when we have to call the +`deref` method or not. This feature of Rust lets us write code that functions +identically whether we have a regular reference or a type that implements +`Deref`. + +The reason the `deref` method returns a reference to a value, and why the plain +dereference outside the parentheses in `*(y.deref())` is still necessary, is +because of ownership. If the `deref` method returned the value directly instead +of a reference to the value, the value would be moved out of `self`. We don’t +want to take ownership of the inner value inside `MyBox` in this case and in +most cases where we use the dereference operator. -The reason that the return type of the `deref` method is still a reference and -why it’s necessary to dereference the result of the method is that if the -`deref` method returned just the value, using `*` would always take ownership. +Note that replacing `*` with a call to the `deref` method and then a call to +`*` happens once, each time we type a `*` in our code. The substitution of `*` +does not recurse infinitely. That’s how we end up with data of type `i32`, +which matches the `5` in the `assert_eq!` in Listing 15-11. ### Implicit Deref Coercions with Functions and Methods -Rust tends to favor explicitness over implicitness, but one case where this -does not hold true is *deref coercions* of arguments to functions and methods. -A deref coercion will automatically convert a reference to a pointer or a smart -pointer into a reference to that pointer’s contents. A deref coercion happens -when a value is passed to a function or method, and only happens if it’s needed -to get the type of the value passed in to match the type of the parameter -defined in the signature. Deref coercion was added to Rust to make calling -functions and methods not need as many explicit references and dereferences -with `&` and `*`. + + + +*Deref coercion* is a convenience that Rust performs on arguments to functions +and methods. Deref coercion converts a reference to a type that implements +`Deref` into a reference to a type that `Deref` can convert the original type +into. Deref coercion happens automatically when we pass a reference to a value +of a particular type as an argument to a function or method that doesn’t match +the type of the parameter in the function or method definition, and there’s a +sequence of calls to the `deref` method that will convert the type we provided +into the type that the parameter needs. + +Deref coercion was added to Rust so that programmers writing function and +method calls don’t need to add as many explicit references and dereferences +with `&` and `*`. This feature also lets us write more code that can work for +either references or smart pointers. + +To illustrate deref coercion in action, let’s use the `MyBox` type we +defined in Listing 15-10 as well as the implementation of `Deref` that we added +in Listing 15-12. Listing 15-13 shows the definition of a function that has a +string slice parameter: -Using our `Mp3` struct from Listing 15-5, here’s the signature of a function to -compress mp3 audio data that takes a slice of `u8`: +Filename: src/main.rs ``` -fn compress_mp3(audio: &[u8]) -> Vec { - // the actual implementation would go here +fn hello(name: &str) { + println!("Hello, {}!", name); } ``` -If Rust didn’t have deref coercion, in order to call this function with the -audio data in `my_favorite_song`, we’d have to write: +Listing 15-13: A `hello` function that has the parameter `name` of type `&str` + +We can call the `hello` function with a string slice as an argument, like +`hello("Rust");` for example. Deref coercion makes it possible for us to call +`hello` with a reference to a value of type `MyBox`, as shown in +Listing 15-14: + +Filename: src/main.rs ``` -compress_mp3(my_favorite_song.audio.as_slice()) +# use std::ops::Deref; +# +# struct MyBox(T); +# +# impl MyBox { +# fn new(x: T) -> MyBox { +# MyBox(x) +# } +# } +# +# impl Deref for MyBox { +# type Target = T; +# +# fn deref(&self) -> &T { +# &self.0 +# } +# } +# +# fn hello(name: &str) { +# println!("Hello, {}!", name); +# } +# +fn main() { + let m = MyBox::new(String::from("Rust")); + hello(&m); +} ``` -That is, we’d have to explicitly say that we want the data in the `audio` field -of `my_favorite_song` and that we want a slice referring to the whole -`Vec`. If there were a lot of places where we’d want process the `audio` -data in a similar manner, `.audio.as_slice()` would be wordy and repetitive. +Listing 15-14: Calling `hello` with a reference to a `MyBox`, which +works because of deref coercion + +Here we’re calling the `hello` function with the argument `&m`, which is a +reference to a `MyBox` value. Because we implemented the `Deref` trait +on `MyBox` in Listing 15-12, Rust can turn `&MyBox` into `&String` +by calling `deref`. The standard library provides an implementation of `Deref` +on `String` that returns a string slice, which we can see in the API +documentation for `Deref`. Rust calls `deref` again to turn the `&String` into +`&str`, which matches the `hello` function’s definition. -However, because of deref coercion and our implementation of the `Deref` trait -on `Mp3`, we can call this function with the data in `my_favorite_song` by -using this code: +If Rust didn’t implement deref coercion, in order to call `hello` with a value +of type `&MyBox`, we’d have to write the code in Listing 15-15 instead +of the code in Listing 15-14: + +Filename: src/main.rs ``` -let result = compress_mp3(&my_favorite_song); +# use std::ops::Deref; +# +# struct MyBox(T); +# +# impl MyBox { +# fn new(x: T) -> MyBox { +# MyBox(x) +# } +# } +# +# impl Deref for MyBox { +# type Target = T; +# +# fn deref(&self) -> &T { +# &self.0 +# } +# } +# +# fn hello(name: &str) { +# println!("Hello, {}!", name); +# } +# +fn main() { + let m = MyBox::new(String::from("Rust")); + hello(&(*m)[..]); +} ``` -Just an `&` and the instance, nice! We can treat our smart pointer as if it was -a regular reference. Deref coercion means that Rust can use its knowledge of -our `Deref` implementation, namely: Rust knows that `Mp3` implements the -`Deref` trait and returns `&Vec` from the `deref` method. Rust also knows -the standard library implements the `Deref` trait on `Vec` to return `&[T]` -from the `deref` method (and we can find that out too by looking at the API -documentation for `Vec`). So, at compile time, Rust will see that it can use -`Deref::deref` twice to turn `&Mp3` into `&Vec` and then into `&[T]` to -match the signature of `compress_mp3`. That means we get to do less typing! -Rust will analyze types through `Deref::deref` as many times as it needs to in -order to get a reference to match the parameter’s type, when the `Deref` trait -is defined for the types involved. The indirection is resolved at compile time, -so there is no run-time penalty for taking advantage of deref coercion. +Listing 15-15: The code we’d have to write if Rust didn’t have deref coercion + +The `(*m)` is dereferencing the `MyBox` into a `String`. Then the `&` +and `[..]` are taking a string slice of the `String` that is equal to the whole +string to match the signature of `hello`. The code without deref coercions is +harder to read, write, and understand with all of these symbols involved. Deref +coercion makes it so that Rust takes care of these conversions for us +automatically. + +When the `Deref` trait is defined for the types involved, Rust will analyze the +types and use `Deref::deref` as many times as it needs in order to get a +reference to match the parameter’s type. This is resolved at compile time, so +there is no run-time penalty for taking advantage of deref coercion! + +### How Deref Coercion Interacts with Mutability -There’s also a `DerefMut` trait for overriding `*` on `&mut T` for use in -assignment in the same fashion that we use `Deref` to override `*` on `&T`s. + + + +Similar to how we use the `Deref` trait to override `*` on immutable +references, Rust provides a `DerefMut` trait for overriding `*` on mutable +references. Rust does deref coercion when it finds types and trait implementations in three cases: + + + * From `&T` to `&U` when `T: Deref`. * From `&mut T` to `&mut U` when `T: DerefMut`. * From `&mut T` to `&U` when `T: Deref`. -The first two are the same, except for mutability: if you have a `&T`, and -`T` implements `Deref` to some type `U`, you can get a `&U` transparently. Same -for mutable references. The last one is more tricky: if you have a mutable -reference, it will also coerce to an immutable one. The other case is _not_ -possible though: immutable references will never coerce to mutable ones. - -The reason that the `Deref` trait is important to the smart pointer pattern is -that smart pointers can then be treated like regular references and used in -places that expect regular references. We don’t have to redefine methods and -functions to take smart pointers explicitly, for example. +The first two cases are the same except for mutability. The first case says +that if you have a `&T`, and `T` implements `Deref` to some type `U`, you can +get a `&U` transparently. The second case states that the same deref coercion +happens for mutable references. + +The last case is trickier: Rust will also coerce a mutable reference to an +immutable one. The reverse is *not* possible though: immutable references will +never coerce to mutable ones. Because of the borrowing rules, if you have a +mutable reference, that mutable reference must be the only reference to that +data (otherwise, the program wouldn’t compile). Converting one mutable +reference to one immutable reference will never break the borrowing rules. +Converting an immutable reference to a mutable reference would require that +there was only one immutable reference to that data, and the borrowing rules +don’t guarantee that. Therefore, Rust can’t make the assumption that converting +an immutable reference to a mutable reference is possible. + + + ## The `Drop` Trait Runs Code on Cleanup -The other trait that’s important to the smart pointer pattern is the `Drop` -trait. `Drop` lets us run some code when a value is about to go out of scope. -Smart pointers perform important cleanup when being dropped, like deallocating -memory or decrementing a reference count. More generally, data types can manage -resources beyond memory, like files or network connections, and use `Drop` to -release those resources when our code is done with them. We’re discussing -`Drop` in the context of smart pointers, though, because the functionality of -the `Drop` trait is almost always used when implementing smart pointers. - -In some other languages, we have to remember to call code to free the memory or -resource every time we finish using an instance of a smart pointer. If we -forget, the system our code is running on might get overloaded and crash. In -Rust, we can specify that some code should be run when a value goes out of -scope, and the compiler will insert this code automatically. That means we don’t -need to remember to put this code everywhere we’re done with an instance of -these types, but we still won’t leak resources! - -The way we specify code should be run when a value goes out of scope is by -implementing the `Drop` trait. The `Drop` trait requires us to implement one -method named `drop` that takes a mutable reference to `self`. - -Listing 15-8 shows a `CustomSmartPointer` struct that doesn’t actually do -anything, but we’re printing out `CustomSmartPointer created.` right after we -create an instance of the struct and `Dropping CustomSmartPointer!` when the -instance goes out of scope so that we can see when each piece of code gets run. -Instead of a `println!` statement, you’d fill in `drop` with whatever cleanup -code your smart pointer needs to run: +The second trait important to the smart pointer pattern is `Drop`, which lets +us customize what happens when a value is about to go out of scope. We can +provide an implementation for the `Drop` trait on any type, and the code we +specify can be used to release resources like files or network connections. +We’re introducing `Drop` in the context of smart pointers because the +functionality of the `Drop` trait is almost always used when implementing a +smart pointer. For example, `Box` customizes `Drop` in order to deallocate +the space on the heap that the box points to. + +In some languages, the programmer must call code to free memory or resources +every time they finish using an instance of a smart pointer. If they forget, +the system might become overloaded and crash. In Rust, we can specify that a +particular bit of code should be run whenever a value goes out of scope, and +the compiler will insert this code automatically. + + + + +This means we don’t need to be careful about placing clean up code everywhere +in a program that an instance of a particular type is finished with, but we +still won’t leak resources! + +We specify the code to run when a value goes out of scope by implementing the +`Drop` trait. The `Drop` trait requires us to implement one method named `drop` +that takes a mutable reference to `self`. In order to be able to see when Rust +calls `drop`, let’s implement `drop` with `println!` statements for now. + + + + +Listing 15-8 shows a `CustomSmartPointer` struct whose only custom +functionality is that it will print out `Dropping CustomSmartPointer!` when the +instance goes out of scope. This will demonstrate when Rust runs the `drop` +function: + + + Filename: src/main.rs @@ -464,153 +949,233 @@ struct CustomSmartPointer { impl Drop for CustomSmartPointer { fn drop(&mut self) { - println!("Dropping CustomSmartPointer!"); + println!("Dropping CustomSmartPointer with data `{}`!", self.data); } } fn main() { - let c = CustomSmartPointer { data: String::from("some data") }; - println!("CustomSmartPointer created."); - println!("Wait for it..."); + let c = CustomSmartPointer { data: String::from("my stuff") }; + let d = CustomSmartPointer { data: String::from("other stuff") }; + println!("CustomSmartPointers created."); } ``` Listing 15-8: A `CustomSmartPointer` struct that implements the `Drop` trait, -where we could put code that would clean up after the `CustomSmartPointer`. +where we would put our clean up code. + +The `Drop` trait is included in the prelude, so we don’t need to import it. We +implement the `Drop` trait on `CustomSmartPointer`, and provide an +implementation for the `drop` method that calls `println!`. The body of the +`drop` function is where you’d put any logic that you wanted to run when an +instance of your type goes out of scope. We’re choosing to print out some text +here in order to demonstrate when Rust will call `drop`. + + + -The `Drop` trait is in the prelude, so we don’t need to import it. The `drop` -method implementation calls the `println!`; this is where you’d put the actual -code needed to close the socket. In `main`, we create a new instance of -`CustomSmartPointer` then print out `CustomSmartPointer created.` to be able to -see that our code got to that point at runtime. At the end of `main`, our -instance of `CustomSmartPointer` will go out of scope. Note that we didn’t call -the `drop` method explicitly. +In `main`, we create a new instance of `CustomSmartPointer` and then print out +`CustomSmartPointer created.`. At the end of `main`, our instance of +`CustomSmartPointer` will go out of scope, and Rust will call the code we put +in the `drop` method, printing our final message. Note that we didn’t need to +call the `drop` method explicitly. -When we run this program, we’ll see: +When we run this program, we’ll see the following output: ``` -CustomSmartPointer created. -Wait for it... -Dropping CustomSmartPointer! +CustomSmartPointers created. +Dropping CustomSmartPointer with data `other stuff`! +Dropping CustomSmartPointer with data `my stuff`! +``` + +Rust automatically called `drop` for us when our instance went out of scope, +calling the code we specified. Variables are dropped in the reverse order of +the order in which they were created, so `d` was dropped before `c`. This is +just to give you a visual guide to how the drop method works, but usually you +would specify the cleanup code that your type needs to run rather than a print +message. + + + + +#### Dropping a Value Early with `std::mem::drop` + + + + +Rust inserts the call to `drop` automatically when a value goes out of scope, +and it’s not straightforward to disable this functionality. Disabling `drop` +isn’t usually necessary; the whole point of the `Drop` trait is that it’s taken +care of automatically for us. Occasionally you may find that you want to clean +up a value early. One example is when using smart pointers that manage locks; +you may want to force the `drop` method that releases the lock to run so that +other code in the same scope can acquire the lock. First, let’s see what +happens if we try to call the `Drop` trait’s `drop` method ourselves by +modifying the `main` function from Listing 15-8 as shown in Listing 15-9: + + + + +Filename: src/main.rs + +``` +fn main() { + let c = CustomSmartPointer { data: String::from("some data") }; + println!("CustomSmartPointer created."); + c.drop(); + println!("CustomSmartPointer dropped before the end of main."); +} +``` + +Listing 15-9: Attempting to call the `drop` method from the `Drop` trait +manually to clean up early + +If we try to compile this, we’ll get this error: + ``` +error[E0040]: explicit use of destructor method + --> src/main.rs:15:7 + | +15 | c.drop(); + | ^^^^ explicit destructor calls not allowed +``` + +This error message says we’re not allowed to explicitly call `drop`. The error +message uses the term *destructor*, which is the general programming term for a +function that cleans up an instance. A *destructor* is analogous to a +*constructor* that creates an instance. The `drop` function in Rust is one +particular destructor. + +Rust doesn’t let us call `drop` explicitly because Rust would still +automatically call `drop` on the value at the end of `main`, and this would be +a *double free* error since Rust would be trying to clean up the same value +twice. -printed to the screen, which shows that Rust automatically called `drop` for us -when our instance went out of scope. +Because we can’t disable the automatic insertion of `drop` when a value goes +out of scope, and we can’t call the `drop` method explicitly, if we need to +force a value to be cleaned up early, we can use the `std::mem::drop` function. -We can use the `std::mem::drop` function to drop a value earlier than when it -goes out of scope. This isn’t usually necessary; the whole point of the `Drop` -trait is that it’s taken care of automatically for us. We’ll see an example of -a case when we’ll need to drop a value earlier than when it goes out of scope -in Chapter 16 when we’re talking about concurrency. For now, let’s just see -that it’s possible, and `std::mem::drop` is in the prelude so we can just call -`drop` as shown in Listing 15-9: +The `std::mem::drop` function is different than the `drop` method in the `Drop` +trait. We call it by passing the value we want to force to be dropped early as +an argument. `std::mem::drop` is in the prelude, so we can modify `main` from +Listing 15-8 to call the `drop` function as shown in Listing 15-10: Filename: src/main.rs ``` +# struct CustomSmartPointer { +# data: String, +# } +# +# impl Drop for CustomSmartPointer { +# fn drop(&mut self) { +# println!("Dropping CustomSmartPointer!"); +# } +# } +# fn main() { let c = CustomSmartPointer { data: String::from("some data") }; println!("CustomSmartPointer created."); drop(c); - println!("Wait for it..."); + println!("CustomSmartPointer dropped before the end of main."); } ``` -Listing 15-9: Calling `std::mem::drop` to explicitly drop a value before it +Listing 15-10: Calling `std::mem::drop` to explicitly drop a value before it goes out of scope -Running this code will print the following, showing that the destructor code is -called since `Dropping CustomSmartPointer!` is printed between -`CustomSmartPointer created.` and `Wait for it...`: +Running this code will print the following: ``` CustomSmartPointer created. Dropping CustomSmartPointer! -Wait for it... +CustomSmartPointer dropped before the end of main. ``` -Note that we aren’t allowed to call the `drop` method that we defined directly: -if we replaced `drop(c)` in Listing 15-9 with `c.drop()`, we’ll get a compiler -error that says `explicit destructor calls not allowed`. We’re not allowed to -call `Drop::drop` directly because when Rust inserts its call to `Drop::drop` -automatically when the value goes out of scope, then the value would get -dropped twice. Dropping a value twice could cause an error or corrupt memory, -so Rust doesn’t let us. Instead, we can use `std::mem::drop`, whose definition -is: + + -``` -pub mod std { - pub mod mem { - pub fn drop(x: T) { } - } -} -``` +The `Dropping CustomSmartPointer!` is printed between `CustomSmartPointer +created.` and `CustomSmartPointer dropped before the end of main.`, showing +that the `drop` method code is called to drop `c` at that point. -This function is generic over any type `T`, so we can pass any value to it. The -function doesn’t actually have anything in its body, so it doesn’t use its -parameter. The reason this empty function is useful is that `drop` takes -ownership of its parameter, which means the value in `x` gets dropped at the -end of this function when `x` goes out of scope. + + -Code specified in a `Drop` trait implementation can be used for many reasons to +Code specified in a `Drop` trait implementation can be used in many ways to make cleanup convenient and safe: we could use it to create our own memory -allocator, for instance! By using the `Drop` trait and Rust’s ownership system, -we don’t have to remember to clean up after ourselves since Rust takes care of -it automatically. We’ll get compiler errors if we write code that would clean -up a value that’s still in use, since the ownership system that makes sure +allocator, for instance! With the `Drop` trait and Rust’s ownership system, you +don’t have to remember to clean up after yourself, Rust takes care of it +automatically. + +We also don’t have to worry about accidentally cleaning up values still in use +because that would cause a compiler error: the ownership system that makes sure references are always valid will also make sure that `drop` only gets called -one time when the value is no longer being used. +once when the value is no longer being used. Now that we’ve gone over `Box` and some of the characteristics of smart pointers, let’s talk about a few other smart pointers defined in the standard -library that add different kinds of useful functionality. +library. ## `Rc`, the Reference Counted Smart Pointer -In the majority of cases, ownership is very clear: you know exactly which -variable owns a given value. However, this isn’t always the case; sometimes, -you may actually need multiple owners. For this, Rust has a type called -`Rc`. Its name is an abbreviation for *reference counting*. Reference -counting means keeping track of the number of references to a value in order to -know if a value is still in use or not. If there are zero references to a -value, we know we can clean up the value without any references becoming -invalid. - -To think about this in terms of a real-world scenario, it’s like a TV in a -family room. When one person comes in the room to watch TV, they turn it on. -Others can also come in the room and watch the TV. When the last person leaves -the room, they’ll turn the TV off since it’s no longer being used. If someone -turns off the TV while others are still watching it, though, the people -watching the TV would get mad! - -`Rc` is for use when we want to allocate some data on the heap for multiple +In the majority of cases, ownership is clear: you know exactly which variable +owns a given value. However, there are cases when a single value may have +multiple owners. For example, in graph data structures, multiple edges may +point to the same node, and that node is conceptually owned by all of the edges +that point to it. A node shouldn’t be cleaned up unless it doesn’t have any +edges pointing to it. + + + + +In order to enable multiple ownership, Rust has a type called `Rc`. Its name +is an abbreviation for reference counting. *Reference counting* means keeping +track of the number of references to a value in order to know if a value is +still in use or not. If there are zero references to a value, the value can be +cleaned up without any references becoming invalid. + +Imagine it like a TV in a family room. When one person enters to watch TV, they +turn it on. Others can come into the room and watch the TV. When the last +person leaves the room, they turn the TV off because it’s no longer being used. +If someone turns the TV off while others are still watching it, there’d be +uproar from the remaining TV watchers! + +`Rc` is used when we want to allocate some data on the heap for multiple parts of our program to read, and we can’t determine at compile time which part -of our program using this data will finish using it last. If we knew which part -would finish last, we could make that part the owner of the data and the normal -ownership rules enforced at compile time would kick in. +will finish using the data last. If we did know which part would finish last, +we could just make that the owner of the data and the normal ownership rules +enforced at compile time would kick in. -Note that `Rc` is only for use in single-threaded scenarios; the next -chapter on concurrency will cover how to do reference counting in -multithreaded programs. If you try to use `Rc` with multiple threads, -you’ll get a compile-time error. +Note that `Rc` is only for use in single-threaded scenarios; Chapter 16 on +concurrency will cover how to do reference counting in multithreaded programs. ### Using `Rc` to Share Data -Let’s return to our cons list example from Listing 15-5. In Listing 15-11, we’re -going to try to use `List` as we defined it using `Box`. First we’ll create -one list instance that contains 5 and then 10. Next, we want to create two more -lists: one that starts with 3 and continues on to our first list containing 5 -and 10, then another list that starts with 4 and *also* continues on to our -first list containing 5 and 10. In other words, we want two lists that both -share ownership of the third list, which conceptually will be something like -Figure 15-10: +Let’s return to our cons list example from Listing 15-6, as we defined it using +`Box`. This time, we want to create two lists that both share ownership of a +third list, which conceptually will look something like Figure 15-11: Two lists that share ownership of a third list -Figure 15-10: Two lists, `b` and `c`, sharing ownership of a third list, `a` +Figure 15-11: Two lists, `b` and `c`, sharing ownership of a third list, `a` + +We’ll create list `a` that contains 5 and then 10, then make two more lists: +`b` that starts with 3 and `c` that starts with 4. Both `b` and `c` lists will +then continue on to the first `a` list containing 5 and 10. In other words, +both lists will try to share the first list containing 5 and 10. Trying to implement this using our definition of `List` with `Box` won’t -work, as shown in Listing 15-11: +work, as shown in Listing 15-12: Filename: src/main.rs @@ -631,8 +1196,8 @@ fn main() { } ``` -Listing 15-11: Having two lists using `Box` that try to share ownership of a -third list won’t work +Listing 15-12: Demonstrating we’re not allowed to have two lists using `Box` +that try to share ownership of a third list If we compile this, we get this error: @@ -649,17 +1214,32 @@ error[E0382]: use of moved value: `a` implement the `Copy` trait ``` -The `Cons` variants own the data they hold, so when we create the `b` list it -moves `a` to be owned by `b`. Then when we try to use `a` again when creating -`c`, we’re not allowed to since `a` has been moved. +The `Cons` variants own the data they hold, so when we create the `b` list, `a` +is moved into `b` and `b` owns `a`. Then, when we try to use `a` again when +creating `c`, we’re not allowed to because `a` has been moved. We could change the definition of `Cons` to hold references instead, but then -we’d have to specify lifetime parameters and we’d have to construct elements of -a list such that every element lives at least as long as the list itself. -Otherwise, the borrow checker won’t even let us compile the code. - -Instead, we can change our definition of `List` to use `Rc` instead of -`Box` as shown here in Listing 15-12: +we’d have to specify lifetime parameters. By specifying lifetime parameters, +we’d be specifying that every element in the list will live at least as long as +the list itself. The borrow checker wouldn’t let us compile `let a = Cons(10, +&Nil);` for example, since the temporary `Nil` value would be dropped before +`a` could take a reference to it. + +Instead, we’ll change our definition of `List` to use `Rc` in place of +`Box` as shown here in Listing 15-13. Each `Cons` variant now holds a value +and an `Rc` pointing to a `List`. When we create `b`, instead of taking +ownership of `a`, we clone the `Rc` that `a` is holding, which increases the +number of references from 1 to 2 and lets `a` and `b` share ownership of the +data in that `Rc`. We also clone `a` when creating `c`, which increases the +number of references from 2 to 3. Every time we call `Rc::clone`, the reference +count to the data within the `Rc` is increased, and the data won’t be cleaned +up unless there are zero references to it: + + + Filename: src/main.rs @@ -674,94 +1254,149 @@ use std::rc::Rc; fn main() { let a = Rc::new(Cons(5, Rc::new(Cons(10, Rc::new(Nil))))); - let b = Cons(3, a.clone()); - let c = Cons(4, a.clone()); + let b = Cons(3, Rc::clone(&a)); + let c = Cons(4, Rc::clone(&a)); } ``` -Listing 15-12: A definition of `List` that uses `Rc` +Listing 15-13: A definition of `List` that uses `Rc` -Note that we need to add a `use` statement for `Rc` because it’s not in the -prelude. In `main`, we create the list holding 5 and 10 and store it in a new -`Rc` in `a`. Then when we create `b` and `c`, we call the `clone` method on `a`. +We need to add a `use` statement to bring `Rc` into scope because it’s not in +the prelude. In `main`, we create the list holding 5 and 10 and store it in a +new `Rc` in `a`. Then when we create `b` and `c`, we call the `Rc::clone` +function and pass a reference to the `Rc` in `a` as an argument. + +We could have called `a.clone()` rather than `Rc::clone(&a)`, but Rust +convention is to use `Rc::clone` in this case. The implementation of `clone` +doesn’t make a deep copy of all the data like most types’ implementations of +`clone` do. `Rc::clone` only increments the reference count, which doesn’t take +very much time. Deep copies of data can take a lot of time, so by using +`Rc::clone` for reference counting, we can visually distinguish between the +deep copy kinds of clones that might have a large impact on runtime performance +and memory usage and the types of clones that increase the reference count that +have a comparatively small impact on runtime performance and don’t allocate new +memory. ### Cloning an `Rc` Increases the Reference Count -We’ve seen the `clone` method previously, where we used it for making a -complete copy of some data. With `Rc`, though, it doesn’t make a full copy. -`Rc` holds a *reference count*, that is, a count of how many clones exist. -Let’s change `main` as shown in Listing 15-13 to have an inner scope around -where we create `c`, and to print out the results of the `Rc::strong_count` -associated function at various points. `Rc::strong_count` returns the reference -count of the `Rc` value we pass to it, and we’ll talk about why this function -is named `strong_count` in the section later in this chapter about preventing -reference cycles. +Let’s change our working example from Listing 15-13 so that we can see the +reference counts changing as we create and drop references to the `Rc` in `a`. + + + + +In Listing 15-14, we’ll change `main` so that it has an inner scope around list +`c`, so that we can see how the reference count changes when `c` goes out of +scope. At each point in the program where the reference count changes, we’ll +print out the reference count, which we can get by calling the +`Rc::strong_count` function. We’ll talk about why this function is named +`strong_count` rather than `count` in the section later in this chapter about +preventing reference cycles. + + + Filename: src/main.rs ``` +# enum List { +# Cons(i32, Rc), +# Nil, +# } +# +# use List::{Cons, Nil}; +# use std::rc::Rc; +# fn main() { let a = Rc::new(Cons(5, Rc::new(Cons(10, Rc::new(Nil))))); - println!("rc = {}", Rc::strong_count(&a)); - let b = Cons(3, a.clone()); - println!("rc after creating b = {}", Rc::strong_count(&a)); + println!("count after creating a = {}", Rc::strong_count(&a)); + let b = Cons(3, Rc::clone(&a)); + println!("count after creating b = {}", Rc::strong_count(&a)); { - let c = Cons(4, a.clone()); - println!("rc after creating c = {}", Rc::strong_count(&a)); + let c = Cons(4, Rc::clone(&a)); + println!("count after creating c = {}", Rc::strong_count(&a)); } - println!("rc after c goes out of scope = {}", Rc::strong_count(&a)); + println!("count after c goes out of scope = {}", Rc::strong_count(&a)); } ``` -Listing 15-13: Printing out the reference count +Listing 15-14: Printing out the reference count This will print out: ``` -rc = 1 -rc after creating b = 2 -rc after creating c = 3 -rc after c goes out of scope = 2 +count after creating a = 1 +count after creating b = 2 +count after creating c = 3 +count after c goes out of scope = 2 ``` -We’re able to see that `a` has an initial reference count of one. Then each -time we call `clone`, the count goes up by one. When `c` goes out of scope, the -count is decreased by one, which happens in the implementation of the `Drop` -trait for `Rc`. What we can’t see in this example is that when `b` and then -`a` go out of scope at the end of `main`, the count of references to the list -containing 5 and 10 is then 0, and the list is dropped. This strategy lets us -have multiple owners, as the count will ensure that the value remains valid as -long as any of the owners still exist. + + + +We’re able to see that the `Rc` in `a` has an initial reference count of one, +then each time we call `clone`, the count goes up by one. When `c` goes out of +scope, the count goes down by one. We don’t have to call a function to decrease +the reference count like we have to call `Rc::clone` to increase the reference +count; the implementation of the `Drop` trait decreases the reference count +automatically when an `Rc` value goes out of scope. -In the beginning of this section, we said that `Rc` only allows you to share -data for multiple parts of your program to read through immutable references to -the `T` value the `Rc` contains. If `Rc` let us have a mutable reference, -we’d run into the problem that the borrowing rules disallow that we discussed -in Chapter 4: two mutable borrows to the same place can cause data races and -inconsistencies. But mutating data is very useful! In the next section, we’ll -discuss the interior mutability pattern and the `RefCell` type that we can -use in conjunction with an `Rc` to work with this restriction on -immutability. +What we can’t see from this example is that when `b` and then `a` go out of +scope at the end of `main`, the count is then 0, and the `Rc` is cleaned up +completely at that point. Using `Rc` allows a single value to have multiple +owners, and the count will ensure that the value remains valid as long as any +of the owners still exist. + +`Rc` allows us to share data between multiple parts of our program for +reading only, via immutable references. If `Rc` allowed us to have multiple +mutable references too, we’d be able to violate one of the the borrowing rules +that we discussed in Chapter 4: multiple mutable borrows to the same place can +cause data races and inconsistencies. But being able to mutate data is very +useful! In the next section, we’ll discuss the interior mutability pattern and +the `RefCell` type that we can use in conjunction with an `Rc` to work +with this restriction on immutability. ## `RefCell` and the Interior Mutability Pattern + + + + + + *Interior mutability* is a design pattern in Rust for allowing you to mutate -data even though there are immutable references to that data, which would -normally be disallowed by the borrowing rules. The interior mutability pattern -involves using `unsafe` code inside a data structure to bend Rust’s usual rules -around mutation and borrowing. We haven’t yet covered unsafe code; we will in -Chapter 19. The interior mutability pattern is used when you can ensure that -the borrowing rules will be followed at runtime, even though the compiler can’t +data even when there are immutable references to that data, normally disallowed +by the borrowing rules. To do so, the pattern uses `unsafe` code inside a data +structure to bend Rust’s usual rules around mutation and borrowing. We haven’t +yet covered unsafe code; we will in Chapter 19. We can choose to use types that +make use of the interior mutability pattern when we can ensure that the +borrowing rules will be followed at runtime, even though the compiler can’t ensure that. The `unsafe` code involved is then wrapped in a safe API, and the outer type is still immutable. Let’s explore this by looking at the `RefCell` type that follows the interior mutability pattern. -### `RefCell` has Interior Mutability +### Enforcing Borrowing Rules at Runtime with `RefCell` Unlike `Rc`, the `RefCell` type represents single ownership over the data -that it holds. So, what makes `RefCell` different than a type like `Box`? +it holds. So, what makes `RefCell` different than a type like `Box`? Let’s recall the borrowing rules we learned in Chapter 4: 1. At any given time, you can have *either* but not both of: @@ -774,162 +1409,405 @@ compile time. With `RefCell`, these invariants are enforced *at runtime*. With references, if you break these rules, you’ll get a compiler error. With `RefCell`, if you break these rules, you’ll get a `panic!`. -Static analysis, like the Rust compiler performs, is inherently conservative. -There are properties of code that are impossible to detect by analyzing the -code: the most famous is the Halting Problem, which is out of scope of this -book but an interesting topic to research if you’re interested. - -Because some analysis is impossible, the Rust compiler does not try to even -guess if it can’t be sure, so it’s conservative and sometimes rejects correct -programs that would not actually violate Rust’s guarantees. Put another way, if -Rust accepts an incorrect program, people would not be able to trust in the -guarantees Rust makes. If Rust rejects a correct program, the programmer will -be inconvenienced, but nothing catastrophic can occur. `RefCell` is useful -when you know that the borrowing rules are respected, but the compiler can’t -understand that that’s true. - -Similarly to `Rc`, `RefCell` is only for use in single-threaded -scenarios. We’ll talk about how to get the functionality of `RefCell` in a -multithreaded program in the next chapter on concurrency. For now, all you -need to know is that if you try to use `RefCell` in a multithreaded -context, you’ll get a compile time error. - -With references, we use the `&` and `&mut` syntax to create references and -mutable references, respectively. But with `RefCell`, we use the `borrow` -and `borrow_mut` methods, which are part of the safe API that `RefCell` has. -`borrow` returns the smart pointer type `Ref`, and `borrow_mut` returns the -smart pointer type `RefMut`. These two types implement `Deref` so that we can -treat them as if they’re regular references. `Ref` and `RefMut` track the -borrows dynamically, and their implementation of `Drop` releases the borrow -dynamically. - -Listing 15-14 shows what it looks like to use `RefCell` with functions that -borrow their parameters immutably and mutably. Note that the `data` variable is -declared as immutable with `let data` rather than `let mut data`, yet -`a_fn_that_mutably_borrows` is allowed to borrow the data mutably and make -changes to the data! - -Filename: src/main.rs + + + +The advantages to checking the borrowing rules at compile time are that errors +will be caught sooner in the development process and there is no impact on +runtime performance since all the analysis is completed beforehand. For those +reasons, checking the borrowing rules at compile time is the best choice for +the majority of cases, which is why this is Rust’s default. + +The advantage to checking the borrowing rules at runtime instead is that +certain memory safe scenarios are then allowed, whereas they are disallowed by +the compile time checks. Static analysis, like the Rust compiler, is inherently +conservative. Some properties of code are impossible to detect by analyzing the +code: the most famous example is the Halting Problem, which is out of scope of +this book but an interesting topic to research if you’re interested. + + + + +Because some analysis is impossible, if the Rust compiler can’t be sure the +code complies with the ownership rules, it may reject a correct program; in +this way, it is conservative. If Rust were to accept an incorrect program, +users would not be able to trust in the guarantees Rust makes. However, if Rust +rejects a correct program, the programmer will be inconvenienced, but nothing +catastrophic can occur. `RefCell` is useful when you yourself are sure that +your code follows the borrowing rules, but the compiler is not able to +understand and guarantee that. + +Similarly to `Rc`, `RefCell` is only for use in single-threaded scenarios +and will give you a compile time error if you try in a multithreaded context. +We’ll talk about how to get the functionality of `RefCell` in a +multithreaded program in Chapter 16. + + + + +To recap the reasons to choose `Box`, `Rc`, or `RefCell`: + +- `Rc` enables multiple owners of the same data; `Box` and `RefCell` + have single owners. +- `Box` allows immutable or mutable borrows checked at compile time; `Rc` + only allows immutable borrows checked at compile time; `RefCell` allows + immutable or mutable borrows checked at runtime. +- Because `RefCell` allows mutable borrows checked at runtime, we can mutate + the value inside the `RefCell` even when the `RefCell` is itself + immutable. + +The last reason is the *interior mutability* pattern. Let’s look at a case when +interior mutability is useful and discuss how this is possible. + +### Interior Mutability: A Mutable Borrow to an Immutable Value + +A consequence of the borrowing rules is that when we have an immutable value, +we can’t borrow it mutably. For example, this code won’t compile: ``` -use std::cell::RefCell; - -fn a_fn_that_immutably_borrows(a: &i32) { - println!("a is {}", a); +fn main() { + let x = 5; + let y = &mut x; } +``` + +If we try to compile this, we’ll get this error: -fn a_fn_that_mutably_borrows(b: &mut i32) { - *b += 1; +``` +error[E0596]: cannot borrow immutable local variable `x` as mutable + --> src/main.rs:3:18 + | +2 | let x = 5; + | - consider changing this to `mut x` +3 | let y = &mut x; + | ^ cannot borrow mutably +``` + +However, there are situations where it would be useful for a value to be able +to mutate itself in its methods, but to other code, the value would appear to +be immutable. Code outside the value’s methods would not be able to mutate the +value. `RefCell` is one way to get the ability to have interior mutability. +`RefCell` isn’t getting around the borrowing rules completely, but the +borrow checker in the compiler allows this interior mutability and the +borrowing rules are checked at runtime instead. If we violate the rules, we’ll +get a `panic!` instead of a compiler error. + +Let’s work through a practical example where we can use `RefCell` to make it +possible to mutate an immutable value and see why that’s useful. + +#### A Use Case for Interior Mutability: Mock Objects + +A *test double* is the general programming concept for a type that stands in +the place of another type during testing. *Mock objects* are specific types of +test doubles that record what happens during a test so that we can assert that +the correct actions took place. + +While Rust doesn’t have objects in the exact same sense that other languages +have objects, and Rust doesn’t have mock object functionality built into the +standard library like some other languages do, we can definitely create a +struct that will serve the same purposes as a mock object. + +Here’s the scenario we’d like to test: we’re creating a library that tracks a +value against a maximum value, and sends messages based on how close to the +maximum value the current value is. This could be used for keeping track of a +user’s quota for the number of API calls they’re allowed to make, for example. + +Our library is only going to provide the functionality of tracking how close to +the maximum a value is, and what the messages should be at what times. +Applications that use our library will be expected to provide the actual +mechanism for sending the messages: the application could choose to put a +message in the application, send an email, send a text message, or something +else. Our library doesn’t need to know about that detail; all it needs is +something that implements a trait we’ll provide called `Messenger`. Listing +15-15 shows our library code: + +Filename: src/lib.rs + +``` +pub trait Messenger { + fn send(&self, msg: &str); } -fn demo(r: &RefCell) { - a_fn_that_immutably_borrows(&r.borrow()); - a_fn_that_mutably_borrows(&mut r.borrow_mut()); - a_fn_that_immutably_borrows(&r.borrow()); +pub struct LimitTracker<'a, T: 'a + Messenger> { + messenger: &'a T, + value: usize, + max: usize, } -fn main() { - let data = RefCell::new(5); - demo(&data); +impl<'a, T> LimitTracker<'a, T> + where T: Messenger { + pub fn new(messenger: &T, max: usize) -> LimitTracker { + LimitTracker { + messenger, + value: 0, + max, + } + } + + pub fn set_value(&mut self, value: usize) { + self.value = value; + + let percentage_of_max = self.value as f64 / self.max as f64; + + if percentage_of_max >= 0.75 && percentage_of_max < 0.9 { + self.messenger.send("Warning: You've used up over 75% of your quota!"); + } else if percentage_of_max >= 0.9 && percentage_of_max < 1.0 { + self.messenger.send("Urgent warning: You've used up over 90% of your quota!"); + } else if percentage_of_max >= 1.0 { + self.messenger.send("Error: You are over your quota!"); + } + } } ``` -Listing 15-14: Using `RefCell`, `borrow`, and `borrow_mut` +Listing 15-15: A library to keep track of how close to a maximum value a value +is, and warn when the value is at certain levels -This example prints: +One important part of this code is that the `Messenger` trait has one method, +`send`, that takes an immutable reference to `self` and text of the message. +This is the interface our mock object will need to have. The other important +part is that we want to test the behavior of the `set_value` method on the +`LimitTracker`. We can change what we pass in for the `value` parameter, but +`set_value` doesn’t return anything for us to make assertions on. What we want +to be able to say is that if we create a `LimitTracker` with something that +implements the `Messenger` trait and a particular value for `max`, when we pass +different numbers for `value`, the messenger gets told to send the appropriate +messages. + +What we need is a mock object that, instead of actually sending an email or +text message when we call `send`, will only keep track of the messages it’s +told to send. We can create a new instance of the mock object, create a +`LimitTracker` that uses the mock object, call the `set_value` method on +`LimitTracker`, then check that the mock object has the messages we expect. +Listing 15-16 shows an attempt of implementing a mock object to do just that, +but that the borrow checker won’t allow: + +Filename: src/lib.rs ``` -a is 5 -a is 6 -``` +#[cfg(test)] +mod tests { + use super::*; -In `main`, we’ve created a new `RefCell` containing the value 5, and stored -in the variable `data`, declared without the `mut` keyword. We then call the -`demo` function with an immutable reference to `data`: as far as `main` is -concerned, `data` is immutable! + struct MockMessenger { + sent_messages: Vec, + } -In the `demo` function, we get an immutable reference to the value inside the -`RefCell` by calling the `borrow` method, and we call -`a_fn_that_immutably_borrows` with that immutable reference. More -interestingly, we can get a *mutable* reference to the value inside the -`RefCell` with the `borrow_mut` method, and the function -`a_fn_that_mutably_borrows` is allowed to change the value. We can see that the -next time we call `a_fn_that_immutably_borrows` that prints out the value, it’s -6 instead of 5. + impl MockMessenger { + fn new() -> MockMessenger { + MockMessenger { sent_messages: vec![] } + } + } -### Borrowing Rules are Checked at Runtime on `RefCell` + impl Messenger for MockMessenger { + fn send(&self, message: &str) { + self.sent_messages.push(String::from(message)); + } + } -Recall from Chapter 4 that because of the borrowing rules, this code using -regular references that tries to create two mutable borrows in the same scope -won’t compile: + #[test] + fn it_sends_an_over_75_percent_warning_message() { + let mock_messenger = MockMessenger::new(); + let mut limit_tracker = LimitTracker::new(&mock_messenger, 100); -``` -let mut s = String::from("hello"); + limit_tracker.set_value(80); -let r1 = &mut s; -let r2 = &mut s; + assert_eq!(mock_messenger.sent_messages.len(), 1); + } +} ``` -We’ll get this compiler error: +Listing 15-16: An attempt to implement a `MockMessenger` that isn’t allowed by +the borrow checker + +This test code defines a `MockMessenger` struct that has a `sent_messages` +field with a `Vec` of `String` values to keep track of the messages it’s told +to send. We also defined an associated function `new` to make it convenient to +create new `MockMessenger` values that start with an empty list of messages. We +then implement the `Messenger` trait for `MockMessenger` so that we can give a +`MockMessenger` to a `LimitTracker`. In the definition of the `send` method, we +take the message passed in as a parameter and store it in the `MockMessenger` +list of `sent_messages`. + +In the test, we’re testing what happens when the `LimitTracker` is told to set +`value` to something that’s over 75% of the `max` value. First, we create a new +`MockMessenger`, which will start with an empty list of messages. Then we +create a new `LimitTracker` and give it a reference to the new `MockMessenger` +and a `max` value of 100. We call the `set_value` method on the `LimitTracker` +with a value of 80, which is more than 75% of 100. Then we assert that the list +of messages that the `MockMessenger` is keeping track of should now have one +message in it. + +There’s one problem with this test, however: ``` -error[E0499]: cannot borrow `s` as mutable more than once at a time - --> - | -5 | let r1 = &mut s; - | - first mutable borrow occurs here -6 | let r2 = &mut s; - | ^ second mutable borrow occurs here -7 | } - | - first borrow ends here +error[E0596]: cannot borrow immutable field `self.sent_messages` as mutable + --> src/lib.rs:46:13 + | +45 | fn send(&self, message: &str) { + | ----- use `&mut self` here to make mutable +46 | self.sent_messages.push(String::from(message)); + | ^^^^^^^^^^^^^^^^^^ cannot mutably borrow immutable field ``` -In contrast, using `RefCell` and calling `borrow_mut` twice in the same -scope *will* compile, but it’ll panic at runtime instead. This code: +We can’t modify the `MockMessenger` to keep track of the messages because the +`send` method takes an immutable reference to `self`. We also can’t take the +suggestion from the error text to use `&mut self` instead because then the +signature of `send` wouldn’t match the signature in the `Messenger` trait +definition (feel free to try and see what error message you get). + +This is where interior mutability can help! We’re going to store the +`sent_messages` within a `RefCell`, and then the `send` message will be able to +modify `sent_messages` to store the messages we’ve seen. Listing 15-17 shows +what that looks like: + +Filename: src/lib.rs ``` -use std::cell::RefCell; +#[cfg(test)] +mod tests { + use super::*; + use std::cell::RefCell; -fn main() { - let s = RefCell::new(String::from("hello")); + struct MockMessenger { + sent_messages: RefCell>, + } + + impl MockMessenger { + fn new() -> MockMessenger { + MockMessenger { sent_messages: RefCell::new(vec![]) } + } + } + + impl Messenger for MockMessenger { + fn send(&self, message: &str) { + self.sent_messages.borrow_mut().push(String::from(message)); + } + } + + #[test] + fn it_sends_an_over_75_percent_warning_message() { + // ...snip... +# let mock_messenger = MockMessenger::new(); +# let mut limit_tracker = LimitTracker::new(&mock_messenger, 100); +# limit_tracker.set_value(75); + + assert_eq!(mock_messenger.sent_messages.borrow().len(), 1); + } +} +``` + +Listing 15-17: Using `RefCell` to be able to mutate an inner value while the +outer value is considered immutable + +The `sent_messages` field is now of type `RefCell>` instead of +`Vec`. In the `new` function, we create a new `RefCell` instance around +the empty vector. + +For the implementation of the `send` method, the first parameter is still an +immutable borrow of `self`, which matches the trait definition. We call +`borrow_mut` on the `RefCell` in `self.sent_messages` to get a mutable +reference to the value inside the `RefCell`, which is the vector. Then we can +call `push` on the mutable reference to the vector in order to keep track of +the messages seen during the test. + +The last change we have to make is in the assertion: in order to see how many +items are in the inner vector, we call `borrow` on the `RefCell` to get an +immutable reference to the vector. + +Now that we’ve seen how to use `RefCell`, let’s dig into how it works! - let r1 = s.borrow_mut(); - let r2 = s.borrow_mut(); +#### `RefCell` Keeps Track of Borrows at Runtime + +When creating immutable and mutable references we use the `&` and `&mut` +syntax, respectively. With `RefCell`, we use the `borrow` and `borrow_mut` +methods, which are part of the safe API that belongs to `RefCell`. The +`borrow` method returns the smart pointer type `Ref`, and `borrow_mut` returns +the smart pointer type `RefMut`. Both types implement `Deref` so we can treat +them like regular references. + + + + +The `RefCell` keeps track of how many `Ref` and `RefMut` smart pointers are +currently active. Every time we call `borrow`, the `RefCell` increases its +count of how many immutable borrows are active. When a `Ref` value goes out of +scope, the count of immutable borrows goes down by one. Just like the compile +time borrowing rules, `RefCell` lets us have many immutable borrows or one +mutable borrow at any point in time. + +If we try to violate these rules, rather than getting a compiler error like we +would with references, the implementation of `RefCell` will `panic!` at +runtime. Listing 15-18 shows a modification to the implementation of `send` +from Listing 15-17 where we’re deliberately trying to create two mutable +borrows active for the same scope in order to illustrate that `RefCell` +prevents us from doing this at runtime: + +Filename: src/lib.rs + +``` +impl Messenger for MockMessenger { + fn send(&self, message: &str) { + let mut one_borrow = self.sent_messages.borrow_mut(); + let mut two_borrow = self.sent_messages.borrow_mut(); + + one_borrow.push(String::from(message)); + two_borrow.push(String::from(message)); + } } ``` -compiles but panics with the following error when we `cargo run`: +Listing 15-18: Creating two mutable references in the same scope to see that +`RefCell` will panic + +We create a variable `one_borrow` for the `RefMut` smart pointer returned from +`borrow_mut`. Then we create another mutable borrow in the same way in the +variable `two_borrow`. This makes two mutable references in the same scope, +which isn’t allowed. If we run the tests for our library, this code will +compile without any errors, but the test will fail: ``` - Finished dev [unoptimized + debuginfo] target(s) in 0.83 secs - Running `target/debug/refcell` -thread 'main' panicked at 'already borrowed: BorrowMutError', -/stable-dist-rustc/build/src/libcore/result.rs:868 +---- tests::it_sends_an_over_75_percent_warning_message stdout ---- + thread 'tests::it_sends_an_over_75_percent_warning_message' panicked at + 'already borrowed: BorrowMutError', src/libcore/result.rs:906:4 note: Run with `RUST_BACKTRACE=1` for a backtrace. ``` -This runtime `BorrowMutError` is similar to the compiler error: it says we’ve -already borrowed `s` mutably once, so we’re not allowed to borrow it again. We -aren’t getting around the borrowing rules, we’re just choosing to have Rust -enforce them at runtime instead of compile time. You could choose to use -`RefCell` everywhere all the time, but in addition to having to type -`RefCell` a lot, you’d find out about possible problems later (possibly in -production rather than during development). Also, checking the borrowing rules -while your program is running has a performance penalty. - -### Multiple Owners of Mutable Data by Combining `Rc` and `RefCell` - -So why would we choose to make the tradeoffs that using `RefCell` involves? -Well, remember when we said that `Rc` only lets you have an immutable -reference to `T`? Given that `RefCell` is immutable, but has interior -mutability, we can combine `Rc` and `RefCell` to get a type that’s both -reference counted and mutable. Listing 15-15 shows an example of how to do -that, again going back to our cons list from Listing 15-5. In this example, -instead of storing `i32` values in the cons list, we’ll be storing -`Rc>` values. We want to store that type so that we can have an -owner of the value that’s not part of the list (the multiple owners -functionality that `Rc` provides), and so we can mutate the inner `i32` -value (the interior mutability functionality that `RefCell` provides): +We can see that the code panicked with the message `already borrowed: +BorrowMutError`. This is how `RefCell` handles violations of the borrowing +rules at runtime. + +Catching borrowing errors at runtime rather than compile time means that we’d +find out that we made a mistake in our code later in the development process-- +and possibly not even until our code was deployed to production. There’s also a +small runtime performance penalty our code will incur as a result of keeping +track of the borrows at runtime rather than compile time. However, using +`RefCell` made it possible for us to write a mock object that can modify itself +to keep track of the messages it has seen while we’re using it in a context +where only immutable values are allowed. We can choose to use `RefCell` +despite its tradeoffs to get more abilities than regular references give us. + +### Having Multiple Owners of Mutable Data by Combining `Rc` and `RefCell` + +A common way to use `RefCell` is in combination with `Rc`. Recall that +`Rc` lets us have multiple owners of some data, but it only gives us +immutable access to that data. If we have an `Rc` that holds a `RefCell`, +then we can get a value that can have multiple owners *and* that we can mutate! + + + + +For example, recall the cons list example from Listing 15-13 where we used +`Rc` to let us have multiple lists share ownership of another list. Because +`Rc` only holds immutable values, we aren’t able to change any of the values +in the list once we’ve created them. Let’s add in `RefCell` to get the +ability to change the values in the lists. Listing 15-19 shows that by using a +`RefCell` in the `Cons` definition, we’re allowed to modify the value stored +in all the lists: Filename: src/main.rs @@ -947,83 +1825,89 @@ use std::cell::RefCell; fn main() { let value = Rc::new(RefCell::new(5)); - let a = Cons(value.clone(), Rc::new(Nil)); - let shared_list = Rc::new(a); + let a = Rc::new(Cons(Rc::clone(&value), Rc::new(Nil))); - let b = Cons(Rc::new(RefCell::new(6)), shared_list.clone()); - let c = Cons(Rc::new(RefCell::new(10)), shared_list.clone()); + let b = Cons(Rc::new(RefCell::new(6)), Rc::clone(&a)); + let c = Cons(Rc::new(RefCell::new(10)), Rc::clone(&a)); *value.borrow_mut() += 10; - println!("shared_list after = {:?}", shared_list); + println!("a after = {:?}", a); println!("b after = {:?}", b); println!("c after = {:?}", c); } ``` -Listing 15-15: Using `Rc>` to create a `List` that we can mutate +Listing 15-19: Using `Rc>` to create a `List` that we can mutate + +We create a value that’s an instance of `Rc` and store it in a +variable named `value` so we can access it directly later. Then we create a +`List` in `a` with a `Cons` variant that holds `value`. We need to clone +`value` so that both `a` and `value` have ownership of the inner `5` value, +rather than transferring ownership from `value` to `a` or having `a` borrow +from `value`. -We’re creating a value, which is an instance of `Rc>`. We’re -storing it in a variable named `value` because we want to be able to access it -directly later. Then we create a `List` in `a` that has a `Cons` variant that -holds `value`, and `value` needs to be cloned since we want `value` to also -have ownership in addition to `a`. Then we wrap `a` in an `Rc` so that we -can create lists `b` and `c` that start differently but both refer to `a`, -similarly to what we did in Listing 15-12. + + -Once we have the lists in `shared_list`, `b`, and `c` created, then we add 10 -to the 5 in `value` by dereferencing the `Rc` and calling `borrow_mut` on -the `RefCell`. +We wrap the list `a` in an `Rc` so that when we create lists `b` and +`c`, they can both refer to `a`, the same as we did in Listing 15-13. -When we print out `shared_list`, `b`, and `c`, we can see that they all have -the modified value of 15: +Once we have the lists in `a`, `b`, and `c` created, we add 10 to the value in +`value`. We do this by calling `borrow_mut` on `value`, which uses the +automatic dereferencing feature we discussed in Chapter 5 (“Where’s the `->` +Operator?”) to dereference the `Rc` to the inner `RefCell` value. The +`borrow_mut` method returns a `RefMut` smart pointer, and we use the +dereference operator on it and change the inner value. + +When we print out `a`, `b`, and `c`, we can see that they all have the modified +value of 15 rather than 5: ``` -shared_list after = Cons(RefCell { value: 15 }, Nil) +a after = Cons(RefCell { value: 15 }, Nil) b after = Cons(RefCell { value: 6 }, Cons(RefCell { value: 15 }, Nil)) c after = Cons(RefCell { value: 10 }, Cons(RefCell { value: 15 }, Nil)) ``` -This is pretty neat! By using `RefCell`, we can have an outwardly immutable +This is pretty neat! By using `RefCell`, we have an outwardly immutable `List`, but we can use the methods on `RefCell` that provide access to its -interior mutability to be able to modify our data when we need to. The runtime -checks of the borrowing rules that `RefCell` does protect us from data -races, and we’ve decided that we want to trade a bit of speed for the -flexibility in our data structures. - -`RefCell` is not the only standard library type that provides interior -mutability. `Cell` is similar but instead of giving references to the inner -value like `RefCell` does, the value is copied in and out of the `Cell`. -`Mutex` offers interior mutability that is safe to use across threads, and -we’ll be discussing its use in the next chapter on concurrency. Check out the -standard library docs for more details on the differences between these types. - -## Creating Reference Cycles and Leaking Memory is Safe - -Rust makes a number of guarantees that we’ve talked about, for example that -we’ll never have a null value, and data races will be disallowed at compile -time. Rust’s memory safety guarantees make it more difficult to create memory -that never gets cleaned up, which is known as a *memory leak*. Rust does not -make memory leaks *impossible*, however, preventing memory leaks is *not* one -of Rust’s guarantees. In other words, memory leaks are memory safe. - -By using `Rc` and `RefCell`, it is possible to create cycles of -references where items refer to each other in a cycle. This is bad because the -reference count of each item in the cycle will never reach 0, and the values -will never be dropped. Let’s take a look at how that might happen and how to -prevent it. - -In Listing 15-16, we’re going to use another variation of the `List` definition -from Listing 15-5. We’re going back to storing an `i32` value as the first -element in the `Cons` variant. The second element in the `Cons` variant is now -`RefCell>`: instead of being able to modify the `i32` value this time, -we want to be able to modify which `List` a `Cons` variant is pointing to. -We’ve also added a `tail` method to make it convenient for us to access the -second item, if we have a `Cons` variant: +interior mutability so we can modify our data when we need to. The runtime +checks of the borrowing rules protect us from data races, and it’s sometimes +worth trading a bit of speed for this flexibility in our data structures. + +The standard library has other types that provide interior mutability, too, +like `Cell`, which is similar except that instead of giving references to +the inner value, the value is copied in and out of the `Cell`. There’s also +`Mutex`, which offers interior mutability that’s safe to use across threads, +and we’ll be discussing its use in the next chapter on concurrency. Check out +the standard library docs for more details on the differences between these +types. + +## Reference Cycles Can Leak Memory + +Rust’s memory safety guarantees make it *difficult* to accidentally create +memory that’s never cleaned up, known as a *memory leak*, but not impossible. +Entirely preventing memory leaks is not one of Rust’s guarantees in the same +way that disallowing data races at compile time is, meaning memory leaks are +memory safe in Rust. We can see this with `Rc` and `RefCell`: it’s +possible to create references where items refer to each other in a cycle. This +creates memory leaks because the reference count of each item in the cycle will +never reach 0, and the values will never be dropped. + +### Creating a Reference Cycle + +Let’s take a look at how a reference cycle might happen and how to prevent it, +starting with the definition of the `List` enum and a `tail` method in Listing +15-20: Filename: src/main.rs ``` +use std::rc::Rc; +use std::cell::RefCell; +use List::{Cons, Nil}; + #[derive(Debug)] enum List { Cons(i32, RefCell>), @@ -1040,37 +1924,71 @@ impl List { } ``` -Listing 15-16: A cons list definition that holds a `RefCell` so that we can +Listing 15-20: A cons list definition that holds a `RefCell` so that we can modify what a `Cons` variant is referring to -Next, in Listing 15-17, we’re going to create a `List` value in the variable -`a` that initially is a list of `5, Nil`. Then we’ll create a `List` value in -the variable `b` that is a list of the value 10 and then points to the list in -`a`. Finally, we’ll modify `a` so that it points to `b` instead of `Nil`, which -will then create a cycle: +We’re using another variation of the `List` definition from Listing 15-6. The +second element in the `Cons` variant is now `RefCell>`, meaning that +instead of having the ability to modify the `i32` value like we did in Listing +15-19, we want to be able to modify which `List` a `Cons` variant is pointing +to. We’ve also added a `tail` method to make it convenient for us to access the +second item, if we have a `Cons` variant. + + + + +In listing 15-21, we’re adding a `main` function that uses the definitions from +Listing 15-20. This code creates a list in `a`, a list in `b` that points to +the list in `a`, and then modifies the list in `a` to point to `b`, which +creates a reference cycle. There are `println!` statements along the way to +show what the reference counts are at various points in this process. + + + Filename: src/main.rs ``` -use List::{Cons, Nil}; -use std::rc::Rc; -use std::cell::RefCell; - +# use List::{Cons, Nil}; +# use std::rc::Rc; +# use std::cell::RefCell; +# #[derive(Debug)] +# enum List { +# Cons(i32, RefCell>), +# Nil, +# } +# +# impl List { +# fn tail(&self) -> Option<&RefCell>> { +# match *self { +# Cons(_, ref item) => Some(item), +# Nil => None, +# } +# } +# } +# fn main() { - let a = Rc::new(Cons(5, RefCell::new(Rc::new(Nil)))); println!("a initial rc count = {}", Rc::strong_count(&a)); println!("a next item = {:?}", a.tail()); - let b = Rc::new(Cons(10, RefCell::new(a.clone()))); + let b = Rc::new(Cons(10, RefCell::new(Rc::clone(&a)))); println!("a rc count after b creation = {}", Rc::strong_count(&a)); println!("b initial rc count = {}", Rc::strong_count(&b)); println!("b next item = {:?}", b.tail()); if let Some(ref link) = a.tail() { - *link.borrow_mut() = b.clone(); + *link.borrow_mut() = Rc::clone(&b); } println!("b rc count after changing a = {}", Rc::strong_count(&b)); @@ -1082,72 +2000,153 @@ fn main() { } ``` -Listing 15-17: Creating a reference cycle of two `List` values pointing to -each other +Listing 15-21: Creating a reference cycle of two `List` values pointing to each +other + +We create an `Rc` instance holding a `List` value in the variable `a` with an +initial list of `5, Nil`. We then create an `Rc` instance holding another +`List` value in the variable `b` that contains the value 10, then points to the +list in `a`. + +Finally, we modify `a` so that it points to `b` instead of `Nil`, which creates +a cycle. We do that by using the `tail` method to get a reference to the +`RefCell` in `a`, which we put in the variable `link`. Then we use the +`borrow_mut` method on the `RefCell` to change the value inside from an `Rc` +that holds a `Nil` value to the `Rc` in `b`. + +If we run this code, keeping the last `println!` commented out for the moment, +we’ll get this output: + +``` +a initial rc count = 1 +a next item = Some(RefCell { value: Nil }) +a rc count after b creation = 2 +b initial rc count = 1 +b next item = Some(RefCell { value: Cons(5, RefCell { value: Nil }) }) +b rc count after changing a = 2 +a rc count after changing a = 2 +``` + +We can see that the reference count of the `Rc` instances in both `a` and `b` +are 2 after we change the list in `a` to point to `b`. At the end of `main`, +Rust will try and drop `b` first, which will decrease the count in each of the +`Rc` instances in `a` and `b` by one. + + + + + + -We use the `tail` method to get a reference to the `RefCell` in `a`, which we -put in the variable `link`. Then we use the `borrow_mut` method on the -`RefCell` to change the value inside from an `Rc` that holds a `Nil` value to -the `Rc` in `b`. We’ve created a reference cycle that looks like Figure 15-18: +However, because `a` is still referencing the `Rc` that was in `b`, that `Rc` +has a count of 1 rather than 0, so the memory the `Rc` has on the heap won’t be +dropped. The memory will just sit there with a count of one, forever. + +To visualize this, we’ve created a reference cycle that looks like Figure 15-22: Reference cycle of lists -Figure 15-18: A reference cycle of lists `a` and `b` pointing to each other - -If you uncomment the last `println!`, Rust will try and print this cycle out -with `a` pointing to `b` pointing to `a` and so forth until it overflows the -stack. - -Looking at the results of the `println!` calls before the last one, we’ll see -that the reference count of both `a` and `b` are 2 after we change `a` to point -to `b`. At the end of `main`, Rust will try and drop `b` first, which will -decrease the count of the `Rc` by one. However, because `a` is still -referencing that `Rc`, its count is 1 rather than 0, so the memory the `Rc` has -on the heap won’t be dropped. It’ll just sit there with a count of one, -forever. In this specific case, the program ends right away, so it’s not a -problem, but in a more complex program that allocates lots of memory in a cycle -and holds onto it for a long time, this would be a problem. The program would -be using more memory than it needs to be, and might overwhelm the system and -cause it to run out of memory available to use. - -Now, as you can see, creating reference cycles is difficult and inconvenient in -Rust. But it’s not impossible: preventing memory leaks in the form of reference -cycles is not one of the guarantees Rust makes. If you have `RefCell` values -that contain `Rc` values or similar nested combinations of types with -interior mutability and reference counting, be aware that you’ll have to ensure -that you don’t create cycles. In the example in Listing 15-14, the solution -would probably be to not write code that could create cycles like this, since -we do want `Cons` variants to own the list they point to. - -With data structures like graphs, it’s sometimes necessary to have references -that create cycles in order to have parent nodes point to their children and -children nodes point back in the opposite direction to their parents, for -example. If one of the directions is expressing ownership and the other isn’t, -one way of being able to model the relationship of the data without creating -reference cycles and memory leaks is using `Weak`. Let’s explore that next! - -### Prevent Reference Cycles: Turn an `Rc` into a `Weak` - -The Rust standard library provides `Weak`, a smart pointer type for use in -situations that have cycles of references but only one direction expresses -ownership. We’ve been showing how cloning an `Rc` increases the -`strong_count` of references; `Weak` is a way to reference an `Rc` that -does not increment the `strong_count`: instead it increments the `weak_count` -of references to an `Rc`. When an `Rc` goes out of scope, the inner value will -get dropped if the `strong_count` is 0, even if the `weak_count` is not 0. To -be able to get the value from a `Weak`, we first have to upgrade it to an -`Option>` by using the `upgrade` method. The result of upgrading a -`Weak` will be `Some` if the `Rc` value has not been dropped yet, and `None` -if the `Rc` value has been dropped. Because `upgrade` returns an `Option`, we -know Rust will make sure we handle both the `Some` case and the `None` case and -we won’t be trying to use an invalid pointer. - -Instead of the list in Listing 15-17 where each item knows only about the -next item, let’s say we want a tree where the items know about their children -items *and* their parent items. - -Let’s start just with a struct named `Node` that holds its own `i32` value as -well as references to its children `Node` values: +Figure 15-22: A reference cycle of lists `a` and `b` pointing to each other + +If you uncomment the last `println!` and run the program, Rust will try and +print this cycle out with `a` pointing to `b` pointing to `a` and so forth +until it overflows the stack. + + + + +In this specific case, right after we create the reference cycle, the program +ends. The consequences of this cycle aren’t so dire. If a more complex program +allocates lots of memory in a cycle and holds onto it for a long time, the +program would be using more memory than it needs, and might overwhelm the +system and cause it to run out of available memory. + +Creating reference cycles is not easily done, but it’s not impossible either. +If you have `RefCell` values that contain `Rc` values or similar nested +combinations of types with interior mutability and reference counting, be aware +that you have to ensure you don’t create cycles yourself; you can’t rely on +Rust to catch them. Creating a reference cycle would be a logic bug in your +program that you should use automated tests, code reviews, and other software +development practices to minimize. + + + + +Another solution is reorganizing your data structures so that some references +express ownership and some references don’t. In this way, we can have cycles +made up of some ownership relationships and some non-ownership relationships, +and only the ownership relationships affect whether a value may be dropped or +not. In Listing 15-20, we always want `Cons` variants to own their list, so +reorganizing the data structure isn’t possible. Let’s look at an example using +graphs made up of parent nodes and child nodes to see when non-ownership +relationships are an appropriate way to prevent reference cycles. + +### Preventing Reference Cycles: Turn an `Rc` into a `Weak` + +So far, we’ve shown how calling `Rc::clone` increases the `strong_count` of an +`Rc` instance, and that an `Rc` instance is only cleaned up if its +`strong_count` is 0. We can also create a *weak reference* to the value within +an `Rc` instance by calling `Rc::downgrade` and passing a reference to the +`Rc`. When we call `Rc::downgrade`, we get a smart pointer of type `Weak`. +Instead of increasing the `strong_count` in the `Rc` instance by one, calling +`Rc::downgrade` increases the `weak_count` by one. The `Rc` type uses +`weak_count` to keep track of how many `Weak` references exist, similarly to +`strong_count`. The difference is the `weak_count` does not need to be 0 in +order for the `Rc` instance to be cleaned up. + + + + +Strong references are how we can share ownership of an `Rc` instance. Weak +references don’t express an ownership relationship. They won’t cause a +reference cycle since any cycle involving some weak references will be broken +once the strong reference count of values involved is 0. + + + + +Because the value that `Weak` references might have been dropped, in order +to do anything with the value that a `Weak` is pointing to, we have to check +to make sure the value is still around. We do this by calling the `upgrade` +method on a `Weak` instance, which will return an `Option>`. We’ll get +a result of `Some` if the `Rc` value has not been dropped yet, and `None` if +the `Rc` value has been dropped. Because `upgrade` returns an `Option`, we can +be sure that Rust will handle both the `Some` case and the `None` case, and +there won’t be an invalid pointer. + +As an example, rather than using a list whose items know only about the next +item, we’ll create a tree whose items know about their children items *and* +their parent items. + +#### Creating a Tree Data Structure: a `Node` with Child Nodes + +To start building this tree, we’ll create a struct named `Node` that holds its +own `i32` value as well as references to its children `Node` values: Filename: src/main.rs @@ -1162,17 +2161,28 @@ struct Node { } ``` -We want to be able to have a `Node` own its children, and we also want to be -able to have variables own each node so we can access them directly. That’s why -the items in the `Vec` are `Rc` values. We want to be able to modify what -nodes are another node’s children, so that’s why we have a `RefCell` in -`children` around the `Vec`. In Listing 15-19, let’s create one instance of -`Node` named `leaf` with the value 3 and no children, and another instance -named `branch` with the value 5 and `leaf` as one of its children: +We want a `Node` to own its children, and we want to be able to share that +ownership with variables so we can access each `Node` in the tree directly. To +do this, we define the `Vec` items to be values of type `Rc`. We also +want to be able to modify which nodes are children of another node, so we have +a `RefCell` in `children` around the `Vec`. + +Next, let’s use our struct definition and create one `Node` instance named +`leaf` with the value 3 and no children, and another instance named `branch` +with the value 5 and `leaf` as one of its children, as shown in Listing 15-23: Filename: src/main.rs ``` +# use std::rc::Rc; +# use std::cell::RefCell; +# +# #[derive(Debug)] +# struct Node { +# value: i32, +# children: RefCell>>, +# } +# fn main() { let leaf = Rc::new(Node { value: 3, @@ -1181,29 +2191,41 @@ fn main() { let branch = Rc::new(Node { value: 5, - children: RefCell::new(vec![leaf.clone()]), + children: RefCell::new(vec![Rc::clone(&leaf)]), }); } ``` -Listing 15-19: Creating a `leaf` node and a `branch` node where `branch` has -`leaf` as one of its children but `leaf` has no reference to `branch` +Listing 15-23: Creating a `leaf` node with no children and a `branch` node with +`leaf` as one of its children + +We clone the `Rc` in `leaf` and store that in `branch`, meaning the `Node` in +`leaf` now has two owners: `leaf` and `branch`. We can get from `branch` to +`leaf` through `branch.children`, but there’s no way to get from `leaf` to +`branch`. `leaf` has no reference to `branch` and doesn’t know they are +related. We’d like `leaf` to know that `branch` is its parent. + +#### Adding a Reference from a Child to its Parent -The `Node` in `leaf` now has two owners: `leaf` and `branch`, since we clone -the `Rc` in `leaf` and store that in `branch`. The `Node` in `branch` knows -it’s related to `leaf` since `branch` has a reference to `leaf` in -`branch.children`. However, `leaf` doesn’t know that it’s related to `branch`, -and we’d like `leaf` to know that `branch` is its parent. +To make the child node aware of its parent, we need to add a `parent` field to +our `Node` struct definition. The trouble is in deciding what the type of +`parent` should be. We know it can’t contain an `Rc` because that would +create a reference cycle, with `leaf.parent` pointing to `branch` and +`branch.children` pointing to `leaf`, which would cause their `strong_count` +values to never be zero. -To do that, we’re going to add a `parent` field to our `Node` struct -definition, but what should the type of `parent` be? We know it can’t contain -an `Rc`, since `leaf.parent` would point to `branch` and `branch.children` -contains a pointer to `leaf`, which makes a reference cycle. Neither `leaf` nor -`branch` would get dropped since they would always refer to each other and -their reference counts would never be zero. +Thinking about the relationships another way, a parent node should own its +children: if a parent node is dropped, its child nodes should be dropped as +well. However, a child should not own its parent: if we drop a child node, the +parent should still exist. This is a case for weak references! -So instead of `Rc`, we’re going to make the type of `parent` use `Weak`, -specifically a `RefCell>`: +So instead of `Rc`, we’ll make the type of `parent` use `Weak`, specifically +a `RefCell>`. Now our `Node` struct definition looks like this: + + + Filename: src/main.rs @@ -1219,14 +2241,35 @@ struct Node { } ``` -This way, a node will be able to refer to its parent node if it has one, -but it does not own its parent. A parent node will be dropped even if -it has child nodes referring to it, as long as it doesn’t have a parent -node as well. Now let’s update `main` to look like Listing 15-20: + + + +This way, a node will be able to refer to its parent node, but does not own its +parent. In Listing 15-24, let’s update `main` to use this new definition so +that the `leaf` node will have a way to refer to its parent, `branch`: + + + Filename: src/main.rs ``` +# use std::rc::{Rc, Weak}; +# use std::cell::RefCell; +# +# #[derive(Debug)] +# struct Node { +# value: i32, +# parent: RefCell>, +# children: RefCell>>, +# } +# fn main() { let leaf = Rc::new(Node { value: 3, @@ -1239,7 +2282,7 @@ fn main() { let branch = Rc::new(Node { value: 5, parent: RefCell::new(Weak::new()), - children: RefCell::new(vec![leaf.clone()]), + children: RefCell::new(vec![Rc::clone(&leaf)]), }); *leaf.parent.borrow_mut() = Rc::downgrade(&branch); @@ -1248,30 +2291,45 @@ fn main() { } ``` -Listing 15-20: A `leaf` node and a `branch` node where `leaf` has a `Weak` -reference to its parent, `branch` +Listing 15-24: A `leaf` node with a `Weak` reference to its parent node, +`branch` -Creating the `leaf` node looks similar; since it starts out without a parent, -we create a new `Weak` reference instance. When we try to get a reference to -the parent of `leaf` by using the `upgrade` method, we’ll get a `None` value, -as shown by the first `println!` that outputs: + + +Creating the `leaf` node looks similar to how creating the `leaf` node looked +in Listing 15-23, with the exception of the `parent` field: `leaf` starts out +without a parent, so we create a new, empty `Weak` reference instance. + +At this point, when we try to get a reference to the parent of `leaf` by using +the `upgrade` method, we get a `None` value. We see this in the output from the +first `println!`: ``` leaf parent = None ``` -Similarly, `branch` will also have a new `Weak` reference, since `branch` does -not have a parent node. We still make `leaf` be one of the children of -`branch`. Once we have a new `Node` instance in `branch`, we can modify `leaf` -to have a `Weak` reference to `branch` for its parent. We use the `borrow_mut` -method on the `RefCell` in the `parent` field of `leaf`, then we use the -`Rc::downgrade` function to create a `Weak` reference to `branch` from the `Rc` -in `branch.` + + + +When we create the `branch` node, it will also have a new `Weak` reference, +since `branch` does not have a parent node. We still have `leaf` as one of the +children of `branch`. Once we have the `Node` instance in `branch`, we can +modify `leaf` to give it a `Weak` reference to its parent. We use the +`borrow_mut` method on the `RefCell` in the `parent` field of `leaf`, then we +use the `Rc::downgrade` function to create a `Weak` reference to `branch` from +the `Rc` in `branch.` + + + When we print out the parent of `leaf` again, this time we’ll get a `Some` -variant holding `branch`. Also notice we don’t get a cycle printed out that -eventually ends in a stack overflow like we did in Listing 15-14: the `Weak` -references are just printed as `(Weak)`: +variant holding `branch`: `leaf` can now access its parent! When we print out +`leaf`, we also avoid the cycle that eventually ended in a stack overflow like +we had in Listing 15-21: the `Weak` references are printed as `(Weak)`: ``` leaf parent = Some(Node { value: 5, parent: RefCell { value: (Weak) }, @@ -1279,12 +2337,17 @@ children: RefCell { value: [Node { value: 3, parent: RefCell { value: (Weak) }, children: RefCell { value: [] } }] } }) ``` -The fact that we don’t get infinite output (or at least until the stack -overflows) is one way we can see that we don’t have a reference cycle in this -case. Another way we can tell is by looking at the values we get from calling -`Rc::strong_count` and `Rc::weak_count`. In Listing 15-21, let’s create a new -inner scope and move the creation of `branch` in there, so that we can see what -happens when `branch` is created and then dropped when it goes out of scope: +The lack of infinite output indicates that this code didn’t create a reference +cycle. We can also tell this by looking at the values we get from calling +`Rc::strong_count` and `Rc::weak_count`. + +#### Visualizing Changes to `strong_count` and `weak_count` + +Let’s look at how the `strong_count` and `weak_count` values of the `Rc` +instances change by creating a new inner scope and moving the creation of +`branch` into that scope. This will let us see what happens when `branch` is +created and then dropped when it goes out of scope. The modifications are shown +in Listing 15-25: Filename: src/main.rs @@ -1306,7 +2369,7 @@ fn main() { let branch = Rc::new(Node { value: 5, parent: RefCell::new(Weak::new()), - children: RefCell::new(vec![leaf.clone()]), + children: RefCell::new(vec![Rc::clone(&leaf)]), }); *leaf.parent.borrow_mut() = Rc::downgrade(&branch); @@ -1332,53 +2395,60 @@ fn main() { } ``` -Listing 15-21: Creating `branch` in an inner scope and examining strong and -weak reference counts of `leaf` and `branch` +Listing 15-25: Creating `branch` in an inner scope and examining strong and +weak reference counts -Right after creating `leaf`, its strong count is 1 (for `leaf` itself) and its -weak count is 0. In the inner scope, after we create `branch` and associate -`leaf` and `branch`, `branch` will have a strong count of 1 (for `branch` -itself) and a weak count of 1 (for `leaf.parent` pointing to `branch` with a -`Weak`). `leaf` will have a strong count of 2, since `branch` now has a -clone the `Rc` of `leaf` stored in `branch.children`. `leaf` still has a weak -count of 0. +Once `leaf` is created, its `Rc` has a strong count of 1 and a weak count of 0. +In the inner scope we create `branch` and associate it with `leaf`, at which +point the `Rc` in `branch` will have a strong count of 1 and a weak count of 1 +(for `leaf.parent` pointing to `branch` with a `Weak`). Here `leaf` will +have a strong count of 2, because `branch` now has a clone of the `Rc` of +`leaf` stored in `branch.children`, but will still have a weak count of 0. -When the inner scope ends, `branch` goes out of scope, and its strong count -decreases to 0, so its `Node` gets dropped. The weak count of 1 from -`leaf.parent` has no bearing on whether `Node` gets dropped or not, so we don’t -have a memory leak! +When the inner scope ends, `branch` goes out of scope and the strong count of +the `Rc` decreases to 0, so its `Node` gets dropped. The weak count of 1 from +`leaf.parent` has no bearing on whether `Node` is dropped or not, so we don’t +get any memory leaks! If we try to access the parent of `leaf` after the end of the scope, we’ll get -`None` again like we did before `leaf` had a parent. At the end of the program, -`leaf` has a strong count of 1 and a weak count of 0, since `leaf` is now the -only thing pointing to it again. - -All of the logic managing the counts and whether a value should be dropped or -not was managed by `Rc` and `Weak` and their implementations of the `Drop` -trait. By specifying that the relationship from a child to its parent should be -a `Weak` reference in the definition of `Node`, we’re able to have parent -nodes point to child nodes and vice versa without creating a reference cycle -and memory leaks. +`None` again. At the end of the program, the `Rc` in `leaf` has a strong count +of 1 and a weak count of 0, because the variable `leaf` is now the only +reference to the `Rc` again. + + + + +All of the logic that manages the counts and value dropping is built in to +`Rc` and `Weak` and their implementations of the `Drop` trait. By specifying +that the relationship from a child to its parent should be a `Weak` +reference in the definition of `Node`, we’re able to have parent nodes point to +child nodes and vice versa without creating a reference cycle and memory leaks. + + + ## Summary -We’ve now covered how you can use different kinds of smart pointers to choose -different guarantees and tradeoffs than those Rust makes with regular +This chapter covered how you can use smart pointers to make different +guarantees and tradeoffs than those Rust makes by default with regular references. `Box` has a known size and points to data allocated on the heap. `Rc` keeps track of the number of references to data on the heap so that data can have multiple owners. `RefCell` with its interior mutability gives -us a type that can be used where we need an immutable type, and enforces the -borrowing rules at runtime instead of at compile time. +us a type that can be used when we need an immutable type but need the ability +to change an inner value of that type, and enforces the borrowing rules at +runtime instead of at compile time. -We’ve also discussed the `Deref` and `Drop` traits that enable a lot of smart -pointers’ functionality. We explored how it’s possible to create a reference -cycle that would cause a memory leak, and how to prevent reference cycles by -using `Weak`. +We also discussed the `Deref` and `Drop` traits that enable a lot of the +functionality of smart pointers. We explored reference cycles that can cause +memory leaks, and how to prevent them using `Weak`. -If this chapter has piqued your interest and you now want to implement your own -smart pointers, check out The Nomicon at -*https://doc.rust-lang.org/stable/nomicon/vec.html* for even more useful -information. +If this chapter has piqued your interest and you want to implement your own +smart pointers, check out “The Nomicon” at +*https://doc.rust-lang.org/stable/nomicon/* for even more useful information. Next, let’s talk about concurrency in Rust. We’ll even learn about a few new -smart pointers that can help us with it. +smart pointers. diff --git a/src/doc/book/second-edition/nostarch/chapter16.md b/src/doc/book/second-edition/nostarch/chapter16.md index dc2435ed39..86b3795049 100644 --- a/src/doc/book/second-edition/nostarch/chapter16.md +++ b/src/doc/book/second-edition/nostarch/chapter16.md @@ -3,39 +3,56 @@ # Fearless Concurrency -Ensuring memory safety isn’t Rust’s only goal: being a language that is better -equipped to handle concurrent and parallel programming has always been another -major goal of Rust. *Concurrent programming*, where different parts of a -program execute independently, and *parallel programming*, where different -parts of a program are executing at the same time, are becoming more important -as more computers have multiple processors for our programs to take advantage -of. Historically, programming in these contexts has been difficult and error -prone: Rust hopes to change that. - -Originally, we thought that memory safety and preventing concurrency problems -were two separate challenges to be solved with different methods. However, over -time, we discovered that ownership and the type system are a powerful set of -tools that help in dealing with both memory safety *and* concurrency problems! -By leveraging ownership and type checking, many concurrency errors are *compile -time* errors in Rust, rather than runtime errors. We’ve nicknamed this aspect -of Rust *fearless concurrency*. Fearless concurrency means Rust not only allows -you to have confidence that your code is free of subtle bugs, but also lets you -refactor this kind of code easily without worrying about introducing new bugs. - -> Note: given that Rust’s slogan is *fearless concurrency*, we’ll be referring -> to many of the problems here as *concurrent* rather than being more precise -> by saying *concurrent and/or parallel*, for simplicity’s sake. If this were a -> book specifically about concurrency and/or parallelism, we’d be sure to be -> more specific. For this chapter, please mentally substitute -> *concurrent and/or parallel* whenever we say *concurrent*. - -Many languages are strongly opinionated about the solutions they offer you to -deal with concurrent problems. That’s a very reasonable strategy, especially -for higher-level languages, but lower-level languages don’t have that luxury. -Lower-level languages are expected to enable whichever solution would provide -the best performance in a given situation, and they have fewer abstractions -over the hardware. Rust, therefore, gives us a variety of tools for modeling -our problems in whatever way is appropriate for our situation and requirements. +Handling concurrent programming safely and efficiently is another of Rust’s +major goals. *Concurrent programming*, where different parts of a program +execute independently, and *parallel programming*, where different parts of a +program are executing at the same time, are becoming increasingly important as +more computers have multiple processors to take advantage of. Historically, +programming in these contexts has been difficult and error prone: Rust hopes to +change that. + +Initially, the Rust team thought that ensuring memory safety and preventing +concurrency problems were two separate challenges to be solved with different +methods. Over time, they discovered that the ownership and type systems are a +powerful set of tools to help in dealing with both memory safety *and* +concurrency problems! By leveraging ownership and type checking, many +concurrency errors are *compile time* errors in Rust, rather than runtime +errors. Rather than spending lots of time trying to reproduce the exact +circumstances under which a runtime concurrency bug occurs, incorrect code will +refuse to compile with an error explaining the problem. This lets you fix your +code while you’re working on it, rather than potentially after it’s been +shipped to production. We’ve nicknamed this aspect of Rust *fearless +concurrency*. Fearless concurrency allows you to write code that’s free of +subtle bugs and is easy to refactor without introducing new bugs. + + + + +> Note: we’ll be referring to many of the problems here as *concurrent* rather +> than being more precise by saying *concurrent and/or parallel*, for +> simplicity’s sake. If this were a book specifically about concurrency and/or +> parallelism, we’d be sure to be more specific. For this chapter, please +> mentally substitute *concurrent and/or parallel* whenever we say *concurrent*. + + + + +Many languages are strongly opinionated about the solutions they offer for +dealing with concurrent problems. For example, Erlang has elegant functionality +for message passing concurrency, but only obscure ways to share state between +threads. Only supporting a subset of possible solutions is a reasonable +strategy for higher-level languages to take, because a higher-level language +promises benefits from giving up some control in order to gain abstractions. +However, lower-level languages are expected to provide the solution with the +best performance in any given situation, and have fewer abstractions over the +hardware. Rust, therefore, gives us a variety of tools for modeling your +problems in whatever way is appropriate for your situation and requirements. Here’s what we’ll cover in this chapter: @@ -44,76 +61,91 @@ Here’s what we’ll cover in this chapter: between threads. * *Shared state* concurrency, where multiple threads have access to some piece of data. -* The `Sync` and `Send` traits, which allow Rust’s concurrency guarantees to be - extended to user-defined types as well as types provided by the standard - library. +* The `Sync` and `Send` traits, which extend Rust’s concurrency guarantees to + user-defined types as well as types provided by the standard library. ## Using Threads to Run Code Simultaneously -In most operating systems in use today, when your program executes, the context -in which the operating system runs your code is called a *process*. The -operating system runs many processes, and the operating system managing these -processes is what lets multiple programs execute at the same time on your -computer. - -We can take the idea of processes each running a program down one level of -abstraction: your program can also have independent parts that run -simultaneously within the context of your program. The feature that enables -this functionality is called *threads*. - -Splitting up the computation your program needs to do into multiple threads can -improve performance, since the program will be doing multiple things at the -same time. Programming with threads can add complexity, however. Since threads -run simultaneously, there’s no inherent guarantee about the order in which the -parts of your code on different threads will run. This can lead to race -conditions where threads are accessing data or resources in an inconsistent -order, deadlocks where two threads both prevent each other from continuing, or -bugs that only happen in certain situations that are hard to reproduce -reliably. Rust lessens the effect of these and other downsides of using -threads, but programming in a multithreaded context still takes thought and -code structured differently than for programs only expected to run in a single -thread. - -There are a few different ways that programming languages implement threads. -Many operating systems provide an API for creating new threads. In addition, -many programming languages provide their own special implementation of threads. -Programming language provided threads are sometimes called *lightweight* or -*green* threads. These languages take a number of green threads and execute -them in the context of a different number of operating system threads. For this -reason, the model where a language calls the operating system APIs to create -threads is sometimes called *1:1*, one OS thread per one language thread. The -green threaded model is called the *M:N* model, `M` green threads per `N` OS -threads, where `M` and `N` are not necessarily the same number. - -Each model has its own advantages and tradeoffs. The tradeoff that’s most -important to Rust is runtime support. *Runtime* is a confusing term; it can -have different meaning in different contexts. Here, we mean some code included -by the language in every binary. For some languages, this code is large, and -for others, this code is small. Colloquially, “no runtime” is often what people -will say when they mean “small runtime”, since every non-assembly language has -some amount of runtime. Smaller runtimes have fewer features but have the -advantage of resulting in smaller binaries. Smaller binaries make it easier to -combine the language with other languages in more contexts. While many -languages are okay with increasing the runtime in exchange for more features, -Rust needs to have nearly no runtime, and cannot compromise on being able to -call into C in order to maintain performance. - -The green threading model is a feature that requires a larger language runtime -in order to manage the threads. As such, the Rust standard library only -provides an implementation of 1:1 threading. Because Rust is such a low-level -language, there are crates that implement M:N threading if you would rather -trade overhead for aspects such as more control over which threads run when and -lower costs of context switching, for example. - -Now that we’ve defined what threads are in Rust, let’s explore how to use the -thread-related API that the standard library provides for us. +In most operating systems today, an executed program’s code is run in a +*process*, and the operating system manages multiple process at once. Within +your program, you can also have independent parts that run simultaneously. The +feature that runs these independent parts is called *threads*. + + + + +Splitting the computation in your program up into multiple threads can improve +performance, since the program will be doing multiple things at the same time, +but it also adds complexity. Because threads may run simultaneously, there’s no +inherent guarantee about the order in which parts of your code on different +threads will run. This can lead to problems such as: + +- Race conditions, where threads are accessing data or resources in an + inconsistent order +- Deadlocks, where two threads are waiting for each other to finish using a + resource the other thread has, which prevents both threads from continuing +- Bugs that only happen in certain situations and are hard to reproduce and + fix reliably + + + + +Rust attempts to mitigate negative effects of using threads. Programming in a +multithreaded context still takes careful thought and requires a code structure +that’s different from programs that run in a single thread. + +Programming languages implement threads in a few different ways. Many operating +systems provide an API for creating new threads. This model where a language +calls the operating system APIs to create threads is sometimes called *1:1*, +one OS thread per one language thread. + +Many programming languages provide their own special implementation of threads. +Programming language-provided threads are known as *green* threads, and +languages that use these green threads will execute them in the context of a +different number of operating system threads. For this reason, the green +threaded model is called the *M:N* model, `M` green threads per `N` OS threads, +where `M` and `N` are not necessarily the same number. + +Each model has its own advantages and tradeoffs, and the tradeoff most +important to Rust is runtime support. *Runtime* is a confusing term and can +have different meanings in different contexts. + + + + +In this context, by runtime we mean code that’s included by the language in +every binary. This code can be large or small depending on the language, but +every non-assembly language will have some amount of runtime code. For that +reason, colloquially when people say a language has “no runtime” they often +mean “small runtime.” Smaller runtimes have fewer features but have the +advantage of resulting in smaller binaries, which make it easier to combine the +language with other languages in more contexts. While many languages are okay +with increasing the runtime size in exchange for more features, Rust needs to +have nearly no runtime, and cannot compromise on being able to call into C in +order to maintain performance. + +The green threading M:N model requires a larger language runtime to manage +threads. As such, the Rust standard library only provides an implementation of +1:1 threading. Because Rust is such a low-level language, there are crates that +implement M:N threading if you would rather trade overhead for aspects such as +more control over which threads run when and lower costs of context switching, +for example. + +Now that we’ve defined threads in Rust, let’s explore how to use the +thread-related API provided by the standard library. ### Creating a New Thread with `spawn` -To create a new thread, we call the `thread::spawn` function and pass it a -closure (we talked about closures in Chapter 13), containing the code we want -to run in the new thread. The example in Listing 16-1 prints some text from a -new thread and other text from the main thread: +To create a new thread, we call the `thread::spawn` function, and pass it a +closure (we talked about closures in Chapter 13) containing the code we want to +run in the new thread. The example in Listing 16-1 prints some text from a main +thread and other text from a new thread: Filename: src/main.rs @@ -133,12 +165,13 @@ fn main() { } ``` -Listing 16-1: Creating a new thread to print one thing while the main thread is -printing something else +Listing 16-1: Creating a new thread to print one thing while the main thread +prints something else -Note that the way this function is written, when the main thread ends, it will -stop the new thread too. The output from this program might be a little -different every time, but it will look similar to this: +Note that with this function, the new thread will be stopped when the main +thread ends, whether it has finished running or not. The output from this +program might be a little different every time, but it will look similar to +this: ``` hi number 1 from the main thread! @@ -152,21 +185,42 @@ hi number 4 from the spawned thread! hi number 5 from the spawned thread! ``` -The threads will probably take turns, but that’s not guaranteed. In this run, -the main thread printed first, even though the print statement from the spawned -thread appears first in the code we wrote. And even though we told the spawned -thread to print until `i` is 9, it only got to 5 before the main thread shut -down. If you always only see one thread, or if you don’t see any overlap, try + + + +The threads will probably take turns, but that’s not guaranteed: it depends on +how your operating system schedules the threads. In this run, the main thread +printed first, even though the print statement from the spawned thread appears +first in the code. And even though we told the spawned thread to print until +`i` is 9, it only got to 5 before the main thread shut down. + +If you run this code and only see one thread, or don’t see any overlap, try increasing the numbers in the ranges to create more opportunities for a thread to take a break and give the other thread a turn. #### Waiting for All Threads to Finish Using `join` Handles -Not only does the code in Listing 16-1 not allow the spawned thread to finish -most of the time since the main thread ends before the spawned thread is done, -there’s actualy no guarantee that the spawned thread will get to run at all! We -can fix this by saving the return value of `thread::spawn`, which is a -`JoinHandle`. That looks like Listing 16-2: +The code in Listing 16-1 not only stops the spawned thread prematurely most of +the time, because the main thread ends before the spawned thread is done, +there’s actually no guarantee that the spawned thread will get to run at all, +because there’s no guarantee on the order in which threads run! + + + + +We can fix this by saving the return value of `thread::spawn` in a variable. +The return type of `thread::spawn` is `JoinHandle`. A `JoinHandle` is an owned +value that, when we call the `join` method on it, will wait for its thread to +finish. Listing 16-2 shows how to use the `JoinHandle` of the thread we created +in Listing 16-1 and call `join` in order to make sure the spawned thread +finishes before the `main` exits: + + + Filename: src/main.rs @@ -191,11 +245,15 @@ fn main() { Listing 16-2: Saving a `JoinHandle` from `thread::spawn` to guarantee the thread is run to completion -A `JoinHandle` is an owned value that can wait for a thread to finish, which is -what the `join` method does. By calling `join` on the handle, the current -thread will block until the thread that the handle represents terminates. Since -we’ve put the call to `join` after the main thread’s `for` loop, running this -example should produce output that looks something like this: +Calling `join` on the handle blocks the thread currently running until the +thread represented by the handle terminates. *Blocking* a thread means that +thread is prevented from performing work or exiting. Because we’ve put the call +to `join` after the main thread’s `for` loop, running this example should +produce output that looks something like this: + + ``` hi number 1 from the main thread! @@ -238,9 +296,8 @@ fn main() { } ``` -The main thread will wait for the spawned thread to finish before the main -thread starts running its `for` loop, so the output won’t be interleaved -anymore: +The main thread will wait for the spawned thread to finish and then run its +`for` loop, so the output won’t be interleaved anymore: ``` hi number 1 from the spawned thread! @@ -263,21 +320,24 @@ your threads are actually running at the same time or not. ### Using `move` Closures with Threads -There’s a feature of closures that we didn’t cover in Chapter 13 that’s often -useful with `thread::spawn`: `move` closures. We said this in Chapter 13: +The `move` closure, which we didn’t cover in Chapter 13, is often used +alongside `thread::spawn`, as it allows us to use data from one thread in +another thread. + +In Chapter 13, we said that “Creating closures that capture values from their +environment is mostly used in the context of starting new threads.” -> Creating closures that capture values from their environment is mostly used -> in the context of starting new threads. + Now we’re creating new threads, so let’s talk about capturing values in closures! -Notice the closure that we pass to `thread::spawn` in Listing 16-1 takes no +Notice in Listing 16-1 that the closure we pass to `thread::spawn` takes no arguments: we’re not using any data from the main thread in the spawned -thread’s code. In order to use data in the spawned thread that comes from the -main thread, we need the spawned thread’s closure to capture the values it -needs. Listing 16-3 shows an attempt to create a vector in the main thread and -use it in the spawned thread, which won’t work the way this example is written: +thread’s code. In order to do so, the spawned thread’s closure must capture the +values it needs. Listing 16-3 shows an attempt to create a vector in the main +thread and use it in the spawned thread. However, this won’t yet work, as +you’ll see in a moment: Filename: src/main.rs @@ -295,12 +355,12 @@ fn main() { } ``` -Listing 16-3: Attempting to use a vector created by the main thread from -another thread +Listing 16-3: Attempting to use a vector created by the main thread in another +thread -The closure uses `v`, so the closure will capture `v` and make `v` part of the -closure’s environment. Because `thread::spawn` runs this closure in a new -thread, we can access `v` inside that new thread. +The closure uses `v`, so will capture `v` and make it part of the closure’s +environment. Because `thread::spawn` runs this closure in a new thread, we +should be able to access `v` inside that new thread. When we compile this example, however, we’ll get the following error: @@ -319,14 +379,13 @@ variables), use the `move` keyword, as shown: | let handle = thread::spawn(move || { ``` -When we capture something in a closure’s environment, Rust will try to infer -how to capture it. `println!` only needs a reference to `v`, so the closure -tries to borrow `v`. There’s a problem, though: we don’t know how long the -spawned thread will run, so we don’t know if the reference to `v` will always -be valid. +Rust *infers* how to capture `v`, and since `println!` only needs a reference +to `v`, the closure tries to borrow `v`. There’s a problem, though: Rust can’t +tell how long the spawned thread will run, so doesn’t know if the reference to +`v` will always be valid. -Consider the code in Listing 16-4 that shows a scenario where it’s more likely -that the reference to `v` won’t be valid: +Let’s look at a scenario that’s more likely to have a reference to `v` that +won’t be valid, shown Listing 16-4: Filename: src/main.rs @@ -349,14 +408,15 @@ fn main() { Listing 16-4: A thread with a closure that attempts to capture a reference to `v` from a main thread that drops `v` -This code could be run, and the spawned thread could immediately get put in the -background without getting a chance to run at all. The spawned thread has a -reference to `v` inside, but the main thread is still running: it immediately -drops `v`, using the `drop` function that we discussed in Chapter 15 that -explicitly drops its argument. Then, the spawned thread starts to execute. `v` -is now invalid, so a reference to it is also invalid. Oh no! +If we run this code, there’s a possibility the spawned thread will be +immediately put in the background without getting a chance to run at all. The +spawned thread has a reference to `v` inside, but the main thread immediately +drops `v`, using the `drop` function we discussed in Chapter 15. Then, when the +spawned thread starts to execute, `v` is no longer valid, so a reference to it +is also invalid. Oh no! -To fix this problem, we can listen to the advice of the error message: +To fix the problem in Listing 16-3, we can listen to the advice of the error +message: ``` help: to force the closure to take ownership of `v` (and any other referenced @@ -365,9 +425,9 @@ variables), use the `move` keyword, as shown: ``` By adding the `move` keyword before the closure, we force the closure to take -ownership of the values it’s using, rather than inferring borrowing. This -modification to the code from Listing 16-3 shown in Listing 16-5 will compile -and run as we intend: +ownership of the values it’s using, rather than allowing Rust to infer that it +should borrow. The modification to Listing 16-3 shown in Listing 16-5 will +compile and run as we intend: Filename: src/main.rs @@ -388,9 +448,15 @@ fn main() { Listing 16-5: Using the `move` keyword to force a closure to take ownership of the values it uses -What about the code in Listing 16-4 where the main thread called `drop`? If we -add `move` to the closure, we’ve moved `v` into the closure’s environment, and -we can no longer call `drop` on it. We get this compiler error instead: + + + +What would happen to the code in Listing 16-4 where the main thread called +`drop` if we use a `move` closure? Would `move` fix that case? Nope, we get a +different error, because what Listing 16-4 is trying to do isn’t allowed for a +different reason! If we add `move` to the closure, we’d move `v` into the +closure’s environment, and we could no longer call `drop` on it in the main +thread. We would get this compiler error instead: ``` error[E0382]: use of moved value: `v` @@ -406,33 +472,60 @@ error[E0382]: use of moved value: `v` not implement the `Copy` trait ``` -Rust’s ownership rules have saved us again! +Rust’s ownership rules have saved us again! We got an error from the code in +Listing 16-3 because Rust was being conservative and only borrowing `v` for the +thread, which meant the main thread could theoretically invalidate the spawned +thread’s reference. By telling Rust to move ownership of `v` to the spawned +thread, we’re guaranteeing to Rust that the main thread won’t use `v` anymore. +If we change Listing 16-4 in the same way, we’re then violating the ownership +rules when we try to use `v` in the main thread. The `move` keyword overrides +Rust’s conservative default of borrowing; it doesn’t let us violate the +ownership rules. + + + Now that we have a basic understanding of threads and the thread API, let’s talk about what we can actually *do* with threads. ## Message Passing to Transfer Data Between Threads -One approach to concurrency that’s seen a rise in popularity as of late is -*message passing*, where threads or actors communicate by sending each other -messages containing data. Here’s the idea in slogan form: +One increasingly popular approach to ensuring safe concurrency is *message +passing*, where threads or actors communicate by sending each other messages +containing data. Here’s the idea in slogan form from the Go language +documentation: > Do not communicate by sharing memory; instead, share memory by > communicating. > > --Effective Go at *http://golang.org/doc/effective_go.html* -A major tool to accomplish this goal is the *channel*. A channel has two -halves, a transmitter and a receiver. One part of our code can call methods on -the transmitter with the data we want to send, and another part can check the -receiving end for arriving messages. + + + +One major tool Rust has for accomplishing message sending concurrency is the +*channel*, a programming concept that Rust’s standard library provides an +implementation of. You can imagine a channel in programming like a channel of +water, such as a stream or a river. If you put something like a rubber duck or +a boat into a stream, it will travel downstream to the end of the river. -We’re going to work up to an example where we have one thread that will -generate values and send them down a channel. The main thread will receive the -values and print them out. +A channel in programming has two halves: a transmitter and a receiver. The +transmitter half is like the upstream location where we put rubber ducks into +the river, and the receiver half is the downstream place where the rubber duck +ends up. One part of our code calls methods on the transmitter with the data we +want to send, and another part checks the receiving end for arriving messages. -First, though, let’s start by creating a channel but not doing anything with it -in Listing 16-6: +Here we’ll work up to a program that has one thread to generate values and send +them down a channel, and another thread that will receive the values and print +them out. We’re going to be sending simple values between threads using a +channel for the purposes of illustration. Once you’re familiar with the +technique, you could use channels to implement a chat system, or a system where +many threads perform parts of a calculation and send the parts to one thread +that aggregates the results. + +First, we’ll create a channel but not do anything with it in Listing 16-6: Filename: src/main.rs @@ -446,21 +539,38 @@ fn main() { Listing 16-6: Creating a channel and assigning the two halves to `tx` and `rx` -The `mpsc::channel` function crates a new channel. `mpsc` stands for *multiple -producer, single consumer*. In short, we can have multiple *sending* ends of a -channel that produce values, but only one *receiving* end that consumes those -values. We’re going to start with a single producer for now, but we’ll add +We create a new channel using the `mpsc::channel` function; `mpsc` stands for +*multiple producer, single consumer*. In short, the way Rust’s standard library +has implemented channels is such that a channel can have multiple *sending* +ends that produce values, but only one *receiving* end that consumes those +values. Imagine multiple rivers and streams flowing together into one big +river: everything sent down any of the streams will end up in one river at the +end. We’re going to start with a single producer for now, but we’ll add multiple producers once we get this example working. -`mpsc::channel` returns a tuple: the first element is the sending end, and the -second element is the receiving end. For historical reasons, many people use -`tx` and `rx` to abbreviate *transmitter* and *receiver*, so those are the -names we’re using for the variables bound to each end. We’re using a `let` -statement with a pattern that destructures the tuples; we’ll be discussing the -use of patterns in `let` statements and destructuring in Chapter 18. +The `mpsc::channel` function returns a tuple, the first element of which is the +sending end and the second element the receiving end. The abbreviations `tx` +and `rx` are traditionally used in many fields for *transmitter* and *receiver* +respectively, so we give our variables those names to indicate each end. We’re +using a `let` statement with a pattern that destructures the tuples; we’ll be +discussing the use of patterns in `let` statements and destructuring in Chapter +18. Using a `let` statement in this way is a convenient way to extract the +pieces of the tuple returned by `mpsc::channel`. + + + Let’s move the transmitting end into a spawned thread and have it send one -string, shown in Listing 16-7: +string so that the spawned thread is communicating with the main thread, shown +in Listing 16-7. This is like putting a rubber duck in the river upstream or +sending a chat message from one thread to another: + + + Filename: src/main.rs @@ -480,20 +590,21 @@ fn main() { Listing 16-7: Moving `tx` to a spawned thread and sending “hi” -We’re using `thread::spawn` to create a new thread, just as we did in the -previous section. We use a `move` closure to make `tx` move into the closure so -that the thread owns it. +We’re again using `thread::spawn` to create a new thread, and then use `move` +to move `tx` into the closure so the spawned thread owns `tx`. The spawned +thread needs to own the transmitting end of the channel in order to be able to +send messages through the channel. -The transmitting end of a channel has the `send` method that takes the value we -want to send down the channel. The `send` method returns a `Result` type, -because if the receiving end has already been dropped, there’s nowhere to send -a value to, so the send operation would error. In this example, we’re simply -calling `unwrap` to ignore this error, but for a real application, we’d want to -handle it properly. Chapter 9 is where you’d go to review strategies for proper -error handling. +The transmitting end has a `send` method that takes the value we want to send. +The `send` method returns a `Result` type, so that if the receiving end +has already been dropped and there’s nowhere to send a value, the send +operation will error. In this example, we’re simply calling `unwrap` to panic +in case of error, but for a real application, we’d handle it properly--return +to Chapter 9 to review strategies for proper error handling. -In Listing 16-8, let’s get the value from the receiving end of the channel in -the main thread: +In Listing 16-8, we’ll get the value from the receiving end of the channel in +the main thread. This is like retrieving the rubber duck from the water at the +end of the river, or like getting a chat message: Filename: src/main.rs @@ -517,11 +628,32 @@ fn main() { Listing 16-8: Receiving the value “hi” in the main thread and printing it out The receiving end of a channel has two useful methods: `recv` and `try_recv`. -Here, we’re using `recv`, which is short for *receive*. This method will block -execution until a value is sent down the channel. Once a value is sent, `recv` -will return it in a `Result`. When the sending end of the channel closes, -`recv` will return an error. The `try_recv` method will not block; it instead -returns a `Result` immediately. +We’re using `recv`, short for *receive*, which will block the main thread’s +execution and wait until a value is sent down the channel. Once a value is +sent, `recv` will return it in a `Result`. When the sending end of the +channel closes, `recv` will return an error to signal that no more values will +be coming. + + + + +The `try_recv` method doesn’t block, but will instead return a `Result` +immediately: an `Ok` value holding a message if one is available, and an `Err` +value if there aren’t any messages this time. Using `try_recv` is useful if +this thread has other work to do while waiting for messages: we could write a +loop that calls `try_recv` every so often, handles a message if one is +available, and otherwise does other work for a little while until checking +again. + +We’ve chosen to use `recv` in this example for simplicity; we don’t have any +other work for the main thread to do other than wait for messages, so blocking +the main thread is appropriate. + + + If we run the code in Listing 16-8, we’ll see the value printed out from the main thread: @@ -530,11 +662,29 @@ main thread: Got: hi ``` -### How Channels Interact with Ownership +Perfect! + +### Channels and Ownership Transference -Let’s do an experiment at this point to see how channels and ownership work -together: we’ll try to use `val` in the spawned thread after we’ve sent it down -the channel. Try compiling the code in Listing 16-9: + + + +The ownership rules play a vital role in message sending as far as helping us +write safe, concurrent code. Preventing errors in concurrent programming is the +advantage we get by making the tradeoff of having to think about ownership +throughout our Rust programs. Let’s do an experiment to show how channels and +ownership work together to prevent problems: we’ll try to use a `val` value in +the spawned thread *after* we’ve sent it down the channel. Try compiling the +code in Listing 16-9: Filename: src/main.rs @@ -559,12 +709,12 @@ fn main() { Listing 16-9: Attempting to use `val` after we have sent it down the channel Here, we try to print out `val` after we’ve sent it down the channel via -`tx.send`. This is a bad idea: once we’ve sent the value to another thread, -that thread could modify it or drop it before we try to use the value again. -This could cause errors or unexpected results due to inconsistent or -nonexistent data. +`tx.send`. Allowing this would be a bad idea: once the value has been sent to +another thread, that thread could modify or drop it before we try to use the +value again, which would potentially cause errors or unexpected results due to +inconsistent or nonexistent data. -If we try to compile this code, Rust will error: +However, Rust gives us an error if we try to compile this code: ``` error[E0382]: use of moved value: `val` @@ -579,24 +729,18 @@ error[E0382]: use of moved value: `val` not implement the `Copy` trait ``` -Our concurrency mistake has caused a compile-time error! `send` takes ownership -of its parameter and moves the value so that the value is owned by the -receiver. This means we can’t accidentally use the value again after sending -it; the ownership system checks that everything is okay. - -In this regard, message passing is very similar to single ownership in Rust. -Message passing enthusiasts enjoy message passing for similar reasons that -Rustaceans enjoy Rust’s ownership: single ownership means certain classes of -problems go away. If only one thread at a time can use some memory, there’s no -chance of a data race. +Our concurrency mistake has caused a compile-time error! The `send` function +takes ownership of its parameter, and when the value is moved the receiver +takes ownership of it. This stops us from accidentally use the value again +after sending it; the ownership system checks that everything is okay. ### Sending Multiple Values and Seeing the Receiver Waiting -The code in Listing 16-8 compiled and ran, but it wasn’t very interesting: it’s -hard to see that we have two separate threads talking to each other over a -channel. Listing 16-10 has some modifications that will prove to us that this -code is running concurrently: the spawned thread will now send multiple -messages and pause for a second between each message. +The code in Listing 16-8 compiled and ran, but doesn’t show us very clearly +that two separate threads are talking to each other over the channel. In +Listing 16-10 we’ve made some modifications that will prove this code is +running concurrently: the spawned thread will now send multiple messages and +pause for a second between each message. Filename: src/main.rs @@ -618,7 +762,7 @@ fn main() { for val in vals { tx.send(val).unwrap(); - thread::sleep(Duration::new(1, 0)); + thread::sleep(Duration::from_secs(1)); } }); @@ -630,17 +774,17 @@ fn main() { Listing 16-10: Sending multiple messages and pausing between each one -This time, we have a vector of strings in the spawned thread that we want to -send to the main thread. We iterate over them, sending each individually and -then pausing by calling the `thread::sleep` function with a `Duration` value of +This time, the spawned thread has a vector of strings that we want to send to +the main thread. We iterate over them, sending each individually, and pause +between each by calling the `thread::sleep` function with a `Duration` value of one second. In the main thread, we’re not calling the `recv` function explicitly anymore: instead we’re treating `rx` as an iterator. For each value received, we’re printing it out. When the channel is closed, iteration will end. -When running the code in Listing 16-10, we’ll see this output, with a one second -pause in between each line: +When running the code in Listing 16-10, you should see the following output, +with a one second pause in between each line: ``` Got: hi @@ -649,16 +793,26 @@ Got: the Got: thread ``` -We don’t have any pausing or code that would take a while in the `for` loop in -the main thread, so we can tell that the main thread is waiting to receive -values from the spawned thread. +Because we don’t have any code that pauses or delays in the `for` loop in the +main thread, we can tell that the main thread is waiting to receive values from +the spawned thread. + + + -### Create Multiple Producers by Cloning the Transmitter +### Creating Multiple Producers by Cloning the Transmitter Near the start of this section, we mentioned that `mpsc` stood for *multiple -producer, single consumer*. We can expand the code from Listing 16-10 to create -multiple threads that all send values to the same receiver. We do that by -cloning the transmitting half of the channel, as shown in Listing 16-11: +producer, single consumer*. Let’s put that ability to use and expand the code +from Listing 16-10 to create multiple threads that all send values to the same +receiver. We can do that by cloning the transmitting half of the channel, as +shown in Listing 16-11: Filename: src/main.rs @@ -666,7 +820,7 @@ Filename: src/main.rs // ...snip... let (tx, rx) = mpsc::channel(); -let tx1 = tx.clone(); +let tx1 = mpsc::Sender::clone(&tx); thread::spawn(move || { let vals = vec![ String::from("hi"), @@ -677,7 +831,7 @@ thread::spawn(move || { for val in vals { tx1.send(val).unwrap(); - thread::sleep(Duration::new(1, 0)); + thread::sleep(Duration::from_secs(1)); } }); @@ -691,7 +845,7 @@ thread::spawn(move || { for val in vals { tx.send(val).unwrap(); - thread::sleep(Duration::new(1, 0)); + thread::sleep(Duration::from_secs(1)); } }); // ...snip... @@ -700,9 +854,9 @@ thread::spawn(move || { Listing 16-11: Sending multiple messages and pausing between each one This time, before we create the first spawned thread, we call `clone` on the -sending end of the channel. This will give us a new sending handle that we can -pass to the first spawned thread. We’ll pass the original sending end of the -channel to a second spawned thread, and each thread is sending different +sending end of the channel. This will give us a new sending handle we can pass +to the first spawned thread. We pass the original sending end of the channel to +a second spawned thread. This gives us two threads, each sending different messages to the receiving end of the channel. If you run this, you’ll *probably* see output like this: @@ -718,66 +872,81 @@ Got: thread Got: you ``` -You might see the values in a different order, though. It depends on your -system! This is what makes concurrency interesting as well as difficult. If you -play around with `thread::sleep`, giving it different values in the different -threads, you can make the runs more non-deterministic and create different -output each time. +You might see the values in a different order, it depends on your system! This +is what makes concurrency interesting as well as difficult. If you play around +with `thread::sleep`, giving it different values in the different threads, each +run will be more non-deterministic and create different output each time. -Now that we’ve seen how channels work, let’s look at shared-memory concurrency. +Now that we’ve seen how channels work, let’s look at a different method of +concurrency. ## Shared State Concurrency -While message passing is a fine way of dealing with concurrency, it’s not the +Message passing is a fine way of dealing with concurrency, but it’s not the only one. Consider this slogan again: > Do not communicate by sharing memory; instead, share memory by > communicating. What would “communicate by sharing memory” look like? And moreover, why would -message passing enthusiasts dislike it, and dislike it enough to invert it -entirely? - -Remember how channels are sort of like single ownership? Shared memory -concurrency is sort of like multiple ownership: multiple threads can access the -same memory location at the same time. As we saw with multiple ownership made -possible by smart pointers in Chapter 15, multiple ownership can add additional -complexity, since we need to manage these different owners somehow. - -Rust’s type system and ownership can help a lot here in getting this management +message passing enthusiasts choose not to use it and do the opposite instead? + + + + +In a way, channels in any programming language are sort of like single +ownership, because once you transfer a value down a channel, you shouldn’t use +that value any longer. Shared memory concurrency is sort of like multiple +ownership: multiple threads can access the same memory location at the same +time. As we saw in Chapter 15 where multiple ownership was made possible by +smart pointers, multiple ownership can add additional complexity because these +different owners need managing. + +Rust’s type system and ownership rules assist a lot in getting this management correct, though. For an example, let’s look at one of the more common concurrency primitives for shared memory: mutexes. ### Mutexes Allow Access to Data from One Thread at a Time A *mutex* is a concurrency primitive for sharing memory. It’s short for “mutual -exclusion”, that is, it only allows one thread to access some data at any given -time. Mutexes have a reputation for being hard to use, since there’s a lot you -have to remember: - -1. You have to remember to attempt to acquire the lock before using the data. -2. One you’re done with the data that’s being guarded by the mutex, you have - to remember to unlock the data so other threads can acquire the lock. - -For a real-world example of a mutex, imagine a panel discussion at a conference -where there is only one microphone. Before a panelist may speak, they have to +exclusion”, as in, it only allows one thread to access some data at any given +time. In order to access the data in a mutex, a thread must first signal that +it wants access by asking to acquire the mutex’s *lock*. The lock is a data +structure that is part of the mutex that keeps track of who currently has +exclusive access to the data. We therefore describe the mutex as *guarding* the +data it holds via the locking system. + +Mutexes have a reputation for being hard to use because there are some +rules you have to remember: + + + + +1. You must attempt to acquire the lock before using the data. +2. Once you’re done with the data that’s guarded by the mutex, you must unlock + the data so other threads can acquire the lock. + +For a real-world metaphor of a mutex, imagine a panel discussion at a +conference with only one microphone. Before a panelist may speak, they have to ask or signal that they would like to use the microphone. Once they get the microphone, they may talk for as long as they would like, then hand the -microphone to the next panelist who would like to speak. It would be rude for a -panelist to start shouting without having the microphone or to steal the -microphone before another panelist was finished. No one else would be able to -speak if a panelist forgot to hand the microphone to the next person when they -finished using it. If the management of the shared microphone went wrong in any -of these ways, the panel would not work as planned! +microphone to the next panelist who requests to speak. If a panelist forgets to +hand the microphone off when they’re finished with it, no one else is able to +speak. If management of the shared microphone goes wrong, the panel would not +work as planned! Management of mutexes can be incredibly tricky to get right, and that’s why so -many people are enthusiastic about channels. However, in Rust, we can’t get -locking and unlocking wrong, thanks to the type system and ownership. +many people are enthusiastic about channels. However, thanks to Rust’s type +system and ownership rules, we can’t get locking and unlocking wrong. #### The API of `Mutex` -Let’s look at an example of using a mutex in Listing 16-12, without involving -multiple threads for the moment: +Let’s start simply with an example of using a mutex in a single-threaded +context, shown in Listing 16-12: Filename: src/main.rs @@ -799,29 +968,39 @@ fn main() { Listing 16-12: Exploring the API of `Mutex` in a single threaded context for simplicity -Like many types, we create a `Mutex` through an associated function named -`new`. To access the data inside the mutex, we use the `lock` method to acquire -the lock. This call will block until it’s our turn to have the lock. This call -can fail if another thread was holding the lock and then that thread panicked. -In a similar way as we did in Listing 16-6 in the last section, we’re using -`unwrap()` for now, rather than better error handling. See Chapter 9 for better -tools. - -Once we have acquired the lock, we can treat the return value, named `num` in -this case, as a mutable reference to the data inside. The type system is how -Rust ensures that we acquire a lock before using this value: `Mutex` is -not an `i32`, so we *must* acquire the lock in order to be able to use the -`i32` value. We can’t forget; the type system won’t let us do otherwise. - -As you may have suspected, `Mutex` is a smart pointer. Well, more -accurately, the call to `lock` returns a smart pointer called `MutexGuard`. -This smart pointer implements `Deref` to point at our inner data, similar to -the other smart pointers we saw in Chapter 15. In addition, `MutexGuard` has a -`Drop` implementation that releases the lock. This way, we can’t forget to -release the lock. It happens for us automatically when the `MutexGuard` goes -out of scope, which it does at the end of the inner scope in Listing 16-12. We -can print out the mutex value and see that we were able to change the inner -`i32` to 6. +As with many types, we create a `Mutex` using the associated function `new`. +To access the data inside the mutex, we use the `lock` method to acquire the +lock. This call will block the current thread so that it can’t do any work +until it’s our turn to have the lock. + + + + +The call to `lock` would fail if another thread holding the lock panicked. In +that case, no one would ever be able to get the lock, so we’ve chosen to +`unwrap` and have this thread panic if we’re in that situation. + + + + +Once we’ve acquired the lock, we can treat the return value, named `num` in +this case, as a mutable reference to the data inside. The type system ensures +that we acquire a lock before using this value: `Mutex` is not an `i32`, +so we *must* acquire the lock in order to be able to use the `i32` value. We +can’t forget; the type system won’t let us do it otherwise. + +As you may suspect, `Mutex` is a smart pointer. More accurately, the call to +`lock` *returns* a smart pointer called `MutexGuard`. This smart pointer +implements `Deref` to point at our inner data, and also has a `Drop` +implementation that releases the lock automatically when `MutexGuard` goes out +of scope, which happens at the end of the inner scope in Listing 16-12. This +way, we don’t risk forgetting to release the lock and blocking it from use by +other threads, because it happens automatically. + +After dropping the lock, we can print out the mutex value and see that we were +able to change the inner `i32` to 6. #### Sharing a `Mutex` Between Multiple Threads @@ -843,7 +1022,7 @@ fn main() { let mut handles = vec![]; for _ in 0..10 { - let handle = thread::spawn(|| { + let handle = thread::spawn(move || { let mut num = counter.lock().unwrap(); *num += 1; @@ -859,50 +1038,27 @@ fn main() { } ``` -Listing 16-13: The start of a program having 10 threads each increment a -counter guarded by a `Mutex` +Listing 16-13: Ten threads each increment a counter guarded by a `Mutex` We’re creating a `counter` variable to hold an `i32` inside a `Mutex`, like we did in Listing 16-12. Next, we’re creating 10 threads by mapping over a range of numbers. We use `thread::spawn` and give all the threads the same -closure: they’re each going to acquire a lock on the `Mutex` by calling the -`lock` method and then add 1 to the value in the mutex. When a thread finishes -running its closure, `num` will go out of scope and release the lock so that -another thread can acquire it. - -In the main thread, we’re collecting all the join handles like we did in -Listing 16-2, and then calling `join` on each of them to make sure all the -threads finish. At that point, the main thread will acquire the lock and print -out the result of this program. - -We hinted that this example won’t compile, let’s find out why! - -``` -error[E0373]: closure may outlive the current function, but it borrows -`counter`, which is owned by the current function - --> - | -9 | let handle = thread::spawn(|| { - | ^^ may outlive borrowed value `counter` -10 | let mut num = counter.lock().unwrap(); - | ------- `counter` is borrowed here - | -help: to force the closure to take ownership of `counter` (and any other -referenced variables), use the `move` keyword, as shown: - | let handle = thread::spawn(move || { -``` +closure, one that moves the counter into the thread, acquires a lock on the +`Mutex` by calling the `lock` method, and then adds 1 to the value in the +mutex. When a thread finishes running its closure, `num` will go out of scope +and release the lock so another thread can acquire it. -This is similar to the problem we solved in Listing 16-5. Given that we spin up -multiple threads, Rust can’t know how long the threads will run and whether -`counter` will still be valid when each thread tries to borrow it. The help -message has a reminder for how to solve this: we can use `move` to give -ownership to each thread. Let’s try it by making this change to the closure: +In the main thread, we collect all the join handles like we did in Listing +16-2, and then call `join` on each to make sure all the threads finish. At that +point, the main thread will acquire the lock and print out the result of this +program. -``` -thread::spawn(move || { -``` +We hinted that this example won’t compile, now let’s find out why! -And trying to compile again. We’ll get different errors this time! + + ``` error[E0382]: capture of moved value: `counter` @@ -931,14 +1087,14 @@ error[E0382]: use of moved value: `counter` error: aborting due to 2 previous errors ``` -`move` didn’t fix this program like it fixed Listing 16-5. Why not? This error -message is a little confusing to read, because it’s saying that the `counter` -value is moved into the closure, then is captured when we call `lock`. That -sounds like what we wanted, but it’s not allowed. +The error message is saying that the `counter` value is moved into the closure, +then is captured when we call `lock`. That sounds like what we wanted, but it’s +not allowed! -Let’s reason this out. Instead of making 10 threads in a `for` loop, let’s just -make two threads without a loop and see what happens then. Replace the first -`for` loop in Listing 16-13 with this code instead: +Let’s reason this out by simplifying the program. Instead of making 10 threads +in a `for` loop, let’s just make two threads without a loop and see what +happens then. Replace the first `for` loop in Listing 16-13 with this code +instead: ``` let handle = thread::spawn(move || { @@ -956,10 +1112,8 @@ let handle2 = thread::spawn(move || { handles.push(handle2); ``` -Here we’re making 2 threads, and we changed the variable names used with the -second thread to `handle2` and `num2`. We’re simplifying our example for the -moment to see if we can understand the error message we’re getting. This time, -compiling gives us: +We make two threads and change the variable names used with the second thread +to `handle2` and `num2`. When we run this time, compiling gives us: ``` error[E0382]: capture of moved value: `counter` @@ -989,23 +1143,23 @@ error[E0382]: use of moved value: `counter` error: aborting due to 2 previous errors ``` -Aha! In the first error message, Rust is showing us that `counter` is moved -into the closure for the thread that goes with `handle`. That move is -preventing us from capturing `counter` when we try to call `lock` on it and -store the result in `num2`, which is in the second thread! So Rust is telling -us that we can’t move ownership of `counter` into multiple threads. This was -hard to see before since we were creating multiple threads in a loop, and Rust -can’t point to different threads in different iterations of the loop. +Aha! The first error message tells us that `counter` is moved into the closure +for the thread associated with `handle`. That move is preventing us from +capturing `counter` when we try to call `lock` on it and store the result in +`num2` in the second thread! So Rust is telling us that we can’t move ownership +of `counter` into multiple threads. This was hard to see before because our +threads were in a loop, and Rust can’t point to different threads in different +iterations of the loop. Let’s try to fix this with a multiple-ownership method +we saw in Chapter 15. #### Multiple Ownership with Multiple Threads -In Chapter 15, we were able to have multiple ownership of a value by using the -smart pointer `Rc` to create a reference-counted value. We mentioned in -Chapter 15 that `Rc` was only for single-threaded contexts, but let’s try -using `Rc` in this case anyway and see what happens. We’ll wrap the -`Mutex` in `Rc` in Listing 16-14, and clone the `Rc` before moving -ownership to the thread. We’ll switch back to the `for` loop for creating the -threads, and keep the `move` keyword with the closure: +In Chapter 15, we were able to give a value multiple owners by using the smart +pointer `Rc` to create a reference-counted value. Let’s try to do the same +here and see what happens. We’ll wrap the `Mutex` in `Rc` in Listing +16-14, and clone the `Rc` before moving ownership to the thread. Now we’ve +seen the errors, we’ll also switch back to using the `for` loop, and we’ll keep +the `move` keyword with the closure: Filename: src/main.rs @@ -1019,7 +1173,7 @@ fn main() { let mut handles = vec![]; for _ in 0..10 { - let counter = counter.clone(); + let counter = Rc::clone(&counter); let handle = thread::spawn(move || { let mut num = counter.lock().unwrap(); @@ -1059,42 +1213,49 @@ std::marker::Send` is not satisfied = note: required by `std::thread::spawn` ``` -Wow, that’s quite wordy! Some important parts to pick out: the first note says -`Rc> cannot be sent between threads safely`. The reason for this is -in the error message, which, once distilled, says `the trait bound Send is not -satisfied`. We’re going to talk about `Send` in the next section; it’s one of -the traits that ensures the types we use with threads are meant for use in -concurrent situations. +Wow, that’s quite wordy! Here are some important parts to pick out: the first +note says `Rc> cannot be sent between threads safely`. The reason +for this is in the error message, which, once distilled, says `the trait bound +Send is not satisfied`. We’re going to talk about `Send` in the next section; +it’s one of the traits that ensures the types we use with threads are meant for +use in concurrent situations. + + + Unfortunately, `Rc` is not safe to share across threads. When `Rc` -manages the reference count, it has to add to the count for each call to -`clone` and subtract from the count when each clone is dropped. `Rc` doesn’t -use any concurrency primitives to make sure that changes to the count happen in -an operation that couldn’t be interrupted by another thread. This could lead to -subtle bugs where the counts are wrong, which could lead to memory leaks or -dropping a value before we’re done with it. So what if we had a type that was -exactly like `Rc`, but made changes to the reference count in a thread-safe -way? +manages the reference count, it adds to the count for each call to `clone` and +subtracts from the count when each clone is dropped, but it doesn’t use any +concurrency primitives to make sure that changes to the count can’t be +interrupted by another thread. This could lead to wrong counts: subtle bugs +that could in turn lead to memory leaks or a value being dropped before we’re +done with it. What we need is a type exactly like `Rc`, but that makes +changes to the reference count in a thread-safe way. #### Atomic Reference Counting with `Arc` -If you thought that question sounded like a leading one, you’d be right. There -is a type like `Rc` that’s safe to use in concurrent situations: `Arc`. -The ‘a’ stands for *atomic*, so it’s an *atomically reference counted* type. -Atomics are an additional kind of concurrency primitive that we won’t cover -here; see the standard library documentation for `std::sync::atomic` for more -details. The gist of it is this: atomics work like primitive types, but are -safe to share across threads. +Luckily for us, there *is* a type like `Rc` that’s safe to use in concurrent +situations: `Arc`. The ‘a’ stands for *atomic*, meaning it’s an *atomically +reference counted* type. Atomics are an additional kind of concurrency +primitive that we won’t cover in detail here; see the standard library +documentation for `std::sync::atomic` for more details. What you need to know +here is that atomics work like primitive types, but are safe to share across +threads. + +You might then wonder why all primitive types aren’t atomic, and why standard +library types aren’t implemented to use `Arc` by default. The reason is that +thread safety comes with a performance penalty that you only want to pay when +you really need to. If you’re only doing operations on values within a single +thread, your code can run faster if it doesn’t have to enforce the guarantees +atomics provide. + +Back to our example: `Arc` and `Rc` have the same API, so we fix our +program by changing the `use` line and the call to `new`. The code in Listing +16-15 will finally compile and run: -Why aren’t all primitive types atomic, and why aren’t all standard library -types implemented to use `Arc` by default? Thread safety comes with a -performance penalty that we only want to pay when we need it. If we’re only -doing operations on values within a single thread, our code can run faster -since it doesn’t need the guarantees that atomics give us. - -Back to our example: `Arc` and `Rc` are identical except for the atomic -internals of `Arc`. Their API is the same, so we can change the `use` line -and the call to `new`. The code in Listing 16-15 will finally compile and run: +Filename: src/main.rs ``` use std::sync::{Mutex, Arc}; @@ -1105,7 +1266,7 @@ fn main() { let mut handles = vec![]; for _ in 0..10 { - let counter = counter.clone(); + let counter = Arc::clone(&counter); let handle = thread::spawn(move || { let mut num = counter.lock().unwrap(); @@ -1131,122 +1292,126 @@ This will print: Result: 10 ``` -We did it! We counted from 0 to 10, which may not seem very impressive, but we -learned a lot about `Mutex` and thread safety along the way! The structure -that we’ve built in this example could be used to do more complicated -operations than just incrementing a counter. Calculations that can be divided -up into independent parts could be split across threads in this way, and we can -use a `Mutex` to allow each thread to update the final result with its part. - -You may have noticed that, since `counter` is immutable but we could get a -mutable reference to the value inside it, this means `Mutex` provides -interior mutability, like the `Cell` family does. In the same way that we used -`RefCell` in Chapter 15 to be able to mutate contents inside an `Rc`, we -use `Mutex` to be able to mutate contents inside of an `Arc`. - -Recall that `Rc` did not prevent every possible problem: we also talked -about the possibility of creating reference cycles where two `Rc` values -refer to each other, which would cause a memory leak. We have a similar problem -with `Mutex` that Rust also doesn’t prevent: deadlocks. A *deadlock* is a -situation in which an operation needs to lock two resources, and two threads -have each acquired one of the locks and will now wait for each other forever. -If you’re interested in this topic, try creating a Rust program that has a -deadlock, then research deadlock mitigation strategies that apply to the use of -mutexes in any language and try implementing them in Rust. The standard library -API documentation for `Mutex` and `MutexGuard` will have useful information. - -Rust’s type system and ownership has made sure that our threads have exclusive -access to the shared value when they’re updating it, so the threads won’t -overwrite each other’s answers in unpredictable ways. It took us a while to -work with the compiler to get everything right, but we’ve saved future time +We did it! We counted from 0 to 10, which may not seem very impressive, but it +did teach us a lot about `Mutex` and thread safety! This structure could +also be used to do more complicated operations than just incrementing a +counter: these methods allow us to divide calculations up into independent +parts, which we could split across threads, and then we can use a `Mutex` to +have each thread update the final result with its part. + +### Similarities between `RefCell`/`Rc` and `Mutex`/`Arc` + +You may have noticed that `counter` is immutable but we could get a mutable +reference to the value inside it; this means `Mutex` provides interior +mutability, like the `Cell` family does. In the same way we used `RefCell` +in Chapter 15 to allow us to mutate contents inside an `Rc`, we use +`Mutex` to mutate contents inside of an `Arc`. + +Another thing to note is that Rust can’t prevent us from all kinds of logic +errors when using `Mutex`. Recall from Chapter 15 that using `Rc` came +with the risk of creating reference cycles, where two `Rc` values refer to +each other, causing memory leaks. Similarly, `Mutex` comes the risk of +*deadlocks*. These occur when an operation needs to lock two resources and two +threads have each acquired one of the locks, causing them to wait for each +other forever. If you’re interested in this topic, try creating a Rust program +that has a deadlock, then research deadlock mitigation strategies for mutexes +in any language, and have a go at implementing them in Rust. The standard +library API documentation for `Mutex` and `MutexGuard` will have useful +information. + + + + Let’s round out this chapter by talking about the `Send` and `Sync` traits and how we could use them with custom types. ## Extensible Concurrency with the `Sync` and `Send` Traits -One interesting aspect of Rust’s concurrency model is that the language knows -*very* little about concurrency. Almost everything we’ve been talking about so -far has been part of the standard library, not the language itself. Because we -don’t need the language to provide everything we need to program in a -concurrent context, we’re not limited to the concurrency options that the -standard library or language provide: we can write our own or use ones others -have written. - -We said *almost* everything wasn’t in the language, so what is? There are two -traits, both in `std::marker`: `Sync` and `Send`. - -### `Send` for Indicating Ownership May Be Transferred to Another Thread - -The `Send` marker trait indicates that ownership of that type may be -transferred between threads. Almost every Rust type is `Send`, but there are -some exceptions. One type provided by the standard library that is not `Send` -is `Rc`: if we clone an `Rc` value and try to transfer ownership of the -clone to another thread, both threads might update the reference count at the -same time. As we mentioned in the previous section, `Rc` is implemented for -use in single-threaded situations where you don’t want to pay the performance -penalty of having a threadsafe reference count. - -Because `Rc` is not marked `Send`, Rust’s type system and trait bounds -ensure that we can never forget and accidentally send an `Rc` value across -threads unsafely. We tried to do this in Listing 16-14, and we got an error -that said `the trait Send is not implemented for Rc>`. When we -switched to `Arc`, which is `Send`, the code compiled. - -Any type that is composed entirely of `Send` types is automatically marked as -`Send` as well. Almost all primitive types are `Send`, aside from raw pointers, -which we’ll discuss in Chapter 19. Most standard library types are `Send`, -aside from `Rc`. - -### `Sync` for Indicating Access from Multiple Threads is Safe - -The `Sync` marker trait indicates that a type is safe to have references to a -value from multiple threads. Another way to say this is for any type `T`, `T` -is `Sync` if `&T` (a reference to `T`) is `Send` so that the reference can be -sent safely to another thread. In a similar manner as `Send`, primitive types -are `Sync` and types composed entirely of types that are `Sync` are also `Sync`. +Interestingly, the Rust language itself knows *very* little about concurrency. +Almost everything we’ve talked about so far in this chapter has been part of +the standard library, not the language. Our concurrency options are not limited +to the language or the standard library, meaning we can write our own +concurrency options or use ones others have written. + +There *are* two concurrency concepts embedded in the language, however: the +`std::marker` traits `Sync` and `Send`. + +### Allowing Transference of Ownership Between Threads with `Send` + +The `Send` marker trait indicates that ownership of the type implementing +`Send` may be transferred between threads. Almost every Rust type is `Send`, +but there are some exceptions, including `Rc`: this cannot be `Send` because +if we cloned an `Rc` value and tried to transfer ownership of the clone to +another thread, both threads might update the reference count at the same time. +For this reason, `Rc` is implemented for use in single-threaded situations +where you don’t want to pay the threadsafe performance penalty. + +In this way Rust’s type system and trait bounds ensure we can never +accidentally send an `Rc` value across threads unsafely. When we tried to do +this in Listing 16-14, we got an error that said `the trait Send is not +implemented for Rc>`. When we switched to `Arc`, which is `Send`, +the code compiled. + +Any type composed entirely of `Send` types is automatically marked as `Send` as +well. Almost all primitive types are `Send`, aside from raw pointers, which +we’ll discuss in Chapter 19. + +### Allowing Access from Multiple Threads with `Sync` + +The `Sync` marker trait indicates that it is safe for the type implementing +`Sync` to be referenced from multiple threads. Another way to say this is that +any type `T` is `Sync` if `&T` (a reference to `T`) is `Send`, meaning the +reference can be sent safely to another thread. In a similar manner as `Send`, +primitive types are `Sync` and types composed entirely of types that are `Sync` +are also `Sync`. `Rc` is also not `Sync`, for the same reasons that it’s not `Send`. `RefCell` (which we talked about in Chapter 15) and the family of related -`Cell` types are not `Sync`. The implementation of the borrow checking at -runtime that `RefCell` does is not threadsafe. `Mutex` is `Sync`, and can +`Cell` types are not `Sync`. The implementation of borrow checking that +`RefCell` does at runtime is not threadsafe. `Mutex` is `Sync`, and can be used to share access with multiple threads as we saw in the previous section. ### Implementing `Send` and `Sync` Manually is Unsafe -Usually, we don’t need to implement the `Send` and `Sync` traits, since types -that are made up of `Send` and `Sync` traits are automatically also `Send` and -`Sync`. Because they’re marker traits, they don’t even have any methods to -implement. They’re just useful for enforcing concurrency-related invariants. +Because types that are made up of `Send` and `Sync` traits are automatically +also `Send` and `Sync`, we don’t have to implement those traits ourselves. As +marker traits, they don’t even have any methods to implement. They’re just +useful for enforcing concurrency-related invariants. -Implementing the guarantees that these traits are markers for involves -implementing unsafe Rust code. We’re going to be talking about using unsafe -Rust code in Chapter 19; for now, the important information is that building -new concurrent types that aren’t made up of `Send` and `Sync` parts requires -careful thought to make sure the safety guarantees are upheld. The Nomicon at *https://doc.rust-lang.org/stable/nomicon/vec.html* +Manually implementing these traits involves implementing unsafe Rust code. +We’re going to be talking about using unsafe Rust code in Chapter 19; for now, +the important information is that building new concurrent types not made up of +`Send` and `Sync` parts requires careful thought, in order to uphold the safety +guarantees. The Nomicon at *https://doc.rust-lang.org/stable/nomicon/* has more information about these guarantees and how to uphold them. ## Summary This isn’t the last we’ll see of concurrency in this book; the project in Chapter 20 will use these concepts in a more realistic situation than the -smaller examples we discussed in this chapter. +smaller examples discussed here. -As we mentioned, since very little of how Rust deals with concurrency has to be -part of the language, there are many concurrency solutions implemnted as -crates. These evolve more quickly than the standard library; search online for -the current state-of-the-art crates for use in multithreaded situations. +As we mentioned, since very little of how Rust deals with concurrency is part +of the language, many concurrency solutions are implemented as crates. These +evolve more quickly than the standard library; search online for the current +state-of-the-art crates to use in multithreaded situations. Rust provides channels for message passing and smart pointer types like `Mutex` and `Arc` that are safe to use in concurrent contexts. The type -system and the borrow checker will make sure the code we write using these -solutions won’t have data races or invalid references. Once we get our code -compiling, we can rest assured that our code will happily run on multiple -threads without the kinds of hard-to-track-down bugs common in other -programming languages. Concurrent programming is no longer something to be -afraid of: go forth and make your programs concurrent, fearlessly! +system and the borrow checker will make sure the code using these solutions +won’t end up with data races or invalid references. Once we get our code +compiling, we can rest assured that it will happily run on multiple threads +without the kinds of hard-to-track-down bugs common in other languages. +Concurrent programming is no longer something to be afraid of: go forth and +make your programs concurrent, fearlessly! Next, let’s talk about idiomatic ways to model problems and structure solutions as your Rust programs get bigger, and how Rust’s idioms relate to those you diff --git a/src/doc/book/second-edition/src/appendix-02-operators.md b/src/doc/book/second-edition/src/appendix-02-operators.md index cbe0bb4f2d..ac36623be0 100644 --- a/src/doc/book/second-edition/src/appendix-02-operators.md +++ b/src/doc/book/second-edition/src/appendix-02-operators.md @@ -171,25 +171,25 @@ Any such expression always has the `unit` type. #### Operator precedence -The precedence of Rust binary operators is ordered as follows, going from -strong to weak: - -```text -as : -* / % -+ - -<< >> -& -^ -| -== != < > <= >= -&& -|| -.. ... -<- -= -``` - -Operators at the same precedence level are evaluated left-to-right. Unary -operators have the same precedence level and are stronger than any of the -binary operators. +The precedence of Rust operators is ordered as follows, going from strong to +weak. Binary Operators at the same precedence level are evaluated in the order +given by their associativity. + + +| Operator | Associativity | +|-----------------------------|---------------------| +| `?` | | +| Unary `-` `*` `!` `&` `&mut` | | +| `as` `:` | left to right | +| `*` `/` `%` | left to right | +| `+` `-` | left to right | +| `<<` `>>` | left to right | +| `&` | left to right | +| `^` | left to right | +| | | left to right | +| `==` `!=` `<` `>` `<=` `>=` | Require parentheses | +| `&&` | left to right | +| || | left to right | +| `..` `...` | Require parentheses | +| `<-` | right to left | +| `=` `+=` `-=` `*=` `/=` `%=`
`&=` |= `^=` `<<=` `>>=` | right to left | diff --git a/src/doc/book/second-edition/src/ch01-02-hello-world.md b/src/doc/book/second-edition/src/ch01-02-hello-world.md index 23f4558457..0635147487 100644 --- a/src/doc/book/second-edition/src/ch01-02-hello-world.md +++ b/src/doc/book/second-edition/src/ch01-02-hello-world.md @@ -92,10 +92,10 @@ says, “I’m declaring a function named `main` that has no parameters and retu nothing.” If there were parameters, their names would go inside the parentheses, `(` and `)`. -Also note that the function body is wrapped in curly braces, `{` and `}`. Rust -requires these around all function bodies. It’s considered good style to put -the opening curly brace on the same line as the function declaration, with one -space in between. +Also note that the function body is wrapped in curly brackets, `{` and `}`. +Rust requires these around all function bodies. It’s considered good style to +put the opening curly bracket on the same line as the function declaration, +with one space in between. Inside the `main` function: @@ -319,7 +319,7 @@ $ cargo build ``` This should have created an executable file in *target/debug/hello_cargo* (or -*target\debug\hello_cargo.exe* on Windows), which you can run with this command: +*target\\debug\\hello_cargo.exe* on Windows), which you can run with this command: ```text $ ./target/debug/hello_cargo # or .\target\debug\hello_cargo.exe on Windows diff --git a/src/doc/book/second-edition/src/ch02-00-guessing-game-tutorial.md b/src/doc/book/second-edition/src/ch02-00-guessing-game-tutorial.md index db3c5ed9d0..897e9415b6 100644 --- a/src/doc/book/second-edition/src/ch02-00-guessing-game-tutorial.md +++ b/src/doc/book/second-edition/src/ch02-00-guessing-game-tutorial.md @@ -60,6 +60,7 @@ using the `cargo run` command: ```text $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 1.50 secs Running `target/debug/guessing_game` Hello, world! ``` @@ -134,8 +135,8 @@ println!("Guess the number!"); println!("Please input your guess."); ``` -This code is just printing a prompt stating what the game is and requesting -input from the user. +This code is printing a prompt stating what the game is and requesting input +from the user. ### Storing Values with Variables @@ -284,21 +285,24 @@ If we don’t call `expect`, the program will compile, but we’ll get a warning ```text $ cargo build Compiling guessing_game v0.1.0 (file:///projects/guessing_game) -src/main.rs:10:5: 10:39 warning: unused result which must be used, -#[warn(unused_must_use)] on by default -src/main.rs:10 io::stdin().read_line(&mut guess); - ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +warning: unused `std::result::Result` which must be used + --> src/main.rs:10:5 + | +10 | io::stdin().read_line(&mut guess); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: #[warn(unused_must_use)] on by default ``` Rust warns that we haven’t used the `Result` value returned from `read_line`, indicating that the program hasn’t handled a possible error. The right way to -suppress the warning is to actually write error handling, but since we just -want to crash this program when a problem occurs, we can use `expect`. You’ll -learn about recovering from errors in Chapter 9. +suppress the warning is to actually write error handling, but since we want to +crash this program when a problem occurs, we can use `expect`. You’ll learn +about recovering from errors in Chapter 9. ### Printing Values with `println!` Placeholders -Aside from the closing curly brace, there’s only one more line to discuss in +Aside from the closing curly brackets, there’s only one more line to discuss in the code added so far, which is the following: ```rust,ignore @@ -328,6 +332,7 @@ Let’s test the first part of the guessing game. You can run it using ```text $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 2.53 secs Running `target/debug/guessing_game` Guess the number! Please input your guess. @@ -391,6 +396,7 @@ $ cargo build Compiling libc v0.2.14 Compiling rand v0.3.14 Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 2.53 secs ``` Listing 2-2: The output from running `cargo build` after @@ -565,6 +571,7 @@ Try running the program a few times: ```text $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 2.53 secs Running `target/debug/guessing_game` Guess the number! The secret number is: 7 @@ -758,13 +765,12 @@ We bind `guess` to the expression `guess.trim().parse()`. The `guess` in the expression refers to the original `guess` that was a `String` with the input in it. The `trim` method on a `String` instance will eliminate any whitespace at the beginning and end. `u32` can only contain numerical characters, but the -user must press the return key to satisfy -`read_line`. When the user presses return, a -newline character is added to the string. For example, if the user types -5 and presses -return, `guess` looks like this: `5\n`. The `\n` represents “newline,” -the return key. The `trim` method eliminates -`\n`, resulting in just `5`. +user must press the enter key to satisfy +`read_line`. When the user presses enter, a +newline character is added to the string. For example, if the user types 5 and presses enter, +`guess` looks like this: `5\n`. The `\n` represents “newline,” the enter key. +The `trim` method eliminates `\n`, resulting in just `5`. The [`parse` method on strings][parse] parses a string into some kind of number. Because this method can parse a variety of number types, we @@ -795,6 +801,7 @@ Let’s run the program now! ```text $ cargo run Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Finished dev [unoptimized + debuginfo] target(s) in 0.43 secs Running `target/guessing_game` Guess the number! The secret number is: 58 diff --git a/src/doc/book/second-edition/src/ch03-01-variables-and-mutability.md b/src/doc/book/second-edition/src/ch03-01-variables-and-mutability.md index fbad8096be..9878359239 100644 --- a/src/doc/book/second-edition/src/ch03-01-variables-and-mutability.md +++ b/src/doc/book/second-edition/src/ch03-01-variables-and-mutability.md @@ -83,6 +83,7 @@ When we run this program, we get the following: ```text $ cargo run Compiling variables v0.1.0 (file:///projects/variables) + Finished dev [unoptimized + debuginfo] target(s) in 0.30 secs Running `target/debug/variables` The value of x is: 5 The value of x is: 6 @@ -174,6 +175,7 @@ When you run this program, it will output the following: ```text $ cargo run Compiling variables v0.1.0 (file:///projects/variables) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/variables` The value of x is: 12 ``` diff --git a/src/doc/book/second-edition/src/ch03-02-data-types.md b/src/doc/book/second-edition/src/ch03-02-data-types.md index f10c6bb252..788fc481b0 100644 --- a/src/doc/book/second-edition/src/ch03-02-data-types.md +++ b/src/doc/book/second-edition/src/ch03-02-data-types.md @@ -21,13 +21,14 @@ error, which means the compiler needs more information from us to know which possible type we want to use: ```text -error[E0282]: unable to infer enough type information about `_` +error[E0282]: type annotations needed --> src/main.rs:2:9 | 2 | let guess = "42".parse().expect("Not a number!"); - | ^^^^^ cannot infer type for `_` - | - = note: type annotations or generic parameter binding required + | ^^^^^ + | | + | cannot infer type for `_` + | consider giving `guess` a type ``` You’ll see different type annotations as we discuss the various data types. @@ -42,11 +43,11 @@ work in Rust. #### Integer Types An *integer* is a number without a fractional component. We used one integer -type earlier in this chapter, the `i32` type. This type declaration indicates -that the value it’s associated with should be a signed integer (hence the `i`, -as opposed to a `u` for unsigned) that takes up 32 bits of space. Table 3-1 -shows the built-in integer types in Rust. Each variant in the Signed and -Unsigned columns (for example, *i32*) can be used to declare the type of an +type earlier in this chapter, the `u32` type. This type declaration indicates +that the value it’s associated with should be an unsigned integer (signed +integer types start with `i` instead of `u`) that takes up 32 bits of space. +Table 3-1 shows the built-in integer types in Rust. Each variant in the Signed +and Unsigned columns (for example, *i16*) can be used to declare the type of an integer value. Table 3-1: Integer Types in Rust @@ -104,12 +105,8 @@ you’d use `isize` or `usize` is when indexing some sort of collection. Rust also has two primitive types for *floating-point numbers*, which are numbers with decimal points. Rust’s floating-point types are `f32` and `f64`, which are 32 bits and 64 bits in size, respectively. The default type is `f64` -because it’s roughly the same speed as `f32` but is capable of more precision. -It’s possible to use an `f64` type on 32-bit systems, but it will be slower -than using an `f32` type on those systems. Most of the time, trading potential -worse performance for better precision is a reasonable initial choice, and you -should benchmark your code if you suspect floating-point size is a problem in -your situation. +because on modern CPUs it’s roughly the same speed as `f32` but is capable of +more precision. Here’s an example that shows floating-point numbers in action: @@ -181,7 +178,8 @@ section. So far we’ve only worked with numbers, but Rust supports letters too. Rust’s `char` type is the language’s most primitive alphabetic type, and the following -code shows one way to use it: +code shows one way to use it. Note that the `char` type is specified with +single quotes, as opposed to strings that use double quotes: Filename: src/main.rs @@ -348,6 +346,7 @@ Running this code using `cargo run` produces the following result: ```text $ cargo run Compiling arrays v0.1.0 (file:///projects/arrays) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/arrays` thread '
' panicked at 'index out of bounds: the len is 5 but the index is 10', src/main.rs:6 diff --git a/src/doc/book/second-edition/src/ch03-03-how-functions-work.md b/src/doc/book/second-edition/src/ch03-03-how-functions-work.md index f5f0beb2f0..3c18bfe5dd 100644 --- a/src/doc/book/second-edition/src/ch03-03-how-functions-work.md +++ b/src/doc/book/second-edition/src/ch03-03-how-functions-work.md @@ -24,8 +24,8 @@ fn another_function() { ``` Function definitions in Rust start with `fn` and have a set of parentheses -after the function name. The curly braces tell the compiler where the function -body begins and ends. +after the function name. The curly brackets tell the compiler where the +function body begins and ends. We can call any function we’ve defined by entering its name followed by a set of parentheses. Because `another_function` is defined in the program, it can be @@ -41,6 +41,7 @@ should see the following output: ```text $ cargo run Compiling functions v0.1.0 (file:///projects/functions) + Finished dev [unoptimized + debuginfo] target(s) in 0.28 secs Running `target/debug/functions` Hello, world! Another function. @@ -80,13 +81,14 @@ Try running this program; you should get the following output: ```text $ cargo run Compiling functions v0.1.0 (file:///projects/functions) + Finished dev [unoptimized + debuginfo] target(s) in 1.21 secs Running `target/debug/functions` The value of x is: 5 ``` The declaration of `another_function` has one parameter named `x`. The type of `x` is specified as `i32`. When `5` is passed to `another_function`, the -`println!` macro puts `5` where the pair of curly braces were in the format +`println!` macro puts `5` where the pair of curly brackets were in the format string. In function signatures, you *must* declare the type of each parameter. This is @@ -122,6 +124,7 @@ project’s *src/main.rs* file with the preceding example, and run it using ```text $ cargo run Compiling functions v0.1.0 (file:///projects/functions) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/functions` The value of x is: 5 The value of y is: 6 @@ -226,19 +229,21 @@ This expression: ``` is a block that, in this case, evaluates to `4`. That value gets bound to `y` -as part of the `let` statement. Note the line without a semicolon at the end, -unlike most of the lines you’ve seen so far. Expressions do not include ending -semicolons. If you add a semicolon to the end of an expression, you turn it -into a statement, which will then not return a value. Keep this in mind as you -explore function return values and expressions next. +as part of the `let` statement. Note the `x + 1` line without a semicolon at +the end, unlike most of the lines you’ve seen so far. Expressions do not +include ending semicolons. If you add a semicolon to the end of an expression, +you turn it into a statement, which will then not return a value. Keep this in +mind as you explore function return values and expressions next. ### Functions with Return Values Functions can return values to the code that calls them. We don’t name return values, but we do declare their type after an arrow (`->`). In Rust, the return value of the function is synonymous with the value of the final expression in -the block of the body of a function. Here’s an example of a function that -returns a value: +the block of the body of a function. You can return early from a function by +using the `return` keyword and specifying a value, but most functions return +the last expression implicitly. Here’s an example of a function that returns a +value: Filename: src/main.rs @@ -262,6 +267,7 @@ running this code; the output should look like this: ```text $ cargo run Compiling functions v0.1.0 (file:///projects/functions) + Finished dev [unoptimized + debuginfo] target(s) in 0.30 secs Running `target/debug/functions` The value of x is: 5 ``` @@ -322,16 +328,12 @@ error[E0308]: mismatched types 7 | fn plus_one(x: i32) -> i32 { | ____________________________^ 8 | | x + 1; + | | - help: consider removing this semicolon 9 | | } | |_^ expected i32, found () | = note: expected type `i32` found type `()` -help: consider removing this semicolon: - --> src/main.rs:8:10 - | -8 | x + 1; - | ^ ``` The main error message, “mismatched types,” reveals the core issue with this diff --git a/src/doc/book/second-edition/src/ch03-04-comments.md b/src/doc/book/second-edition/src/ch03-04-comments.md index 96718915f9..389e8552fd 100644 --- a/src/doc/book/second-edition/src/ch03-04-comments.md +++ b/src/doc/book/second-edition/src/ch03-04-comments.md @@ -43,4 +43,5 @@ fn main() { } ``` -That’s all there is to comments. They’re not particularly complicated. +Rust also has another kind of comment, documentation comments, which we’ll +discuss in Chapter 14. diff --git a/src/doc/book/second-edition/src/ch03-05-control-flow.md b/src/doc/book/second-edition/src/ch03-05-control-flow.md index 5f27aa3c70..8b259e4ad9 100644 --- a/src/doc/book/second-edition/src/ch03-05-control-flow.md +++ b/src/doc/book/second-edition/src/ch03-05-control-flow.md @@ -35,7 +35,7 @@ All `if` expressions start with the keyword `if`, which is followed by a condition. In this case, the condition checks whether or not the variable `number` has a value less than 5. The block of code we want to execute if the condition is true is placed immediately after the condition inside curly -braces. Blocks of code associated with the conditions in `if` expressions are +brackets. Blocks of code associated with the conditions in `if` expressions are sometimes called *arms*, just like the arms in `match` expressions that we discussed in the “Comparing the Guess to the Secret Number” section of Chapter 2. Optionally, we can also include an `else` expression, which we chose @@ -49,6 +49,7 @@ Try running this code; you should see the following output: ```text $ cargo run Compiling branches v0.1.0 (file:///projects/branches) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/branches` condition was true ``` @@ -65,6 +66,7 @@ Run the program again, and look at the output: ```text $ cargo run Compiling branches v0.1.0 (file:///projects/branches) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/branches` condition was false ``` @@ -149,6 +151,7 @@ see the following output: ```text $ cargo run Compiling branches v0.1.0 (file:///projects/branches) + Finished dev [unoptimized + debuginfo] target(s) in 0.31 secs Running `target/debug/branches` number is divisible by 3 ``` @@ -193,6 +196,7 @@ expression. Run this code to see what happens: ```text $ cargo run Compiling branches v0.1.0 (file:///projects/branches) + Finished dev [unoptimized + debuginfo] target(s) in 0.30 secs Running `target/debug/branches` The value of number is: 5 ``` @@ -238,7 +242,7 @@ error[E0308]: if and else have incompatible types | |_____^ expected integral variable, found reference | = note: expected type `{integer}` - found type `&'static str` + found type `&str` ``` The expression in the `if` block evaluates to an integer, and the expression in @@ -285,6 +289,7 @@ continual loop. Give it a try: ```text $ cargo run Compiling loops v0.1.0 (file:///projects/loops) + Finished dev [unoptimized + debuginfo] target(s) in 0.29 secs Running `target/debug/loops` again! again! @@ -339,7 +344,7 @@ true, the code runs; otherwise, it exits the loop. #### Looping Through a Collection with `for` You could use the `while` construct to loop over the elements of a collection, -such as an array. For example: +such as an array. For example, let’s look at Listing 3-5: Filename: src/main.rs @@ -367,6 +372,7 @@ element in the array: ```text $ cargo run Compiling loops v0.1.0 (file:///projects/loops) + Finished dev [unoptimized + debuginfo] target(s) in 0.32 secs Running `target/debug/loops` the value is: 10 the value is: 20 @@ -385,7 +391,7 @@ code to perform the conditional check on every element on every iteration through the loop. As a more efficient alternative, you can use a `for` loop and execute some code -for each item in a collection. A `for` loop looks like this: +for each item in a collection. A `for` loop looks like this code in Listing 3-6: Filename: src/main.rs diff --git a/src/doc/book/second-edition/src/ch05-00-structs.md b/src/doc/book/second-edition/src/ch05-00-structs.md index 46bc5a834e..a2c6f46632 100644 --- a/src/doc/book/second-edition/src/ch05-00-structs.md +++ b/src/doc/book/second-edition/src/ch05-00-structs.md @@ -5,7 +5,7 @@ together multiple related values that make up a meaningful group. If you’re familiar with an object-oriented language, a *struct* is like an object’s data attributes. In this chapter, we’ll compare and contrast tuples with structs, demonstrate how to use structs, and discuss how to define methods and -associated functions on structs to specify behavior associated with a struct’s -data. The struct and *enum* (which is discussed in Chapter 6) concepts are the -building blocks for creating new types in your program’s domain to take full -advantage of Rust’s compile time type checking. +associated functions to specify behavior associated with a struct’s data. The +struct and *enum* (which is discussed in Chapter 6) concepts are the building +blocks for creating new types in your program’s domain to take full advantage +of Rust’s compile time type checking. diff --git a/src/doc/book/second-edition/src/ch05-01-defining-structs.md b/src/doc/book/second-edition/src/ch05-01-defining-structs.md index 71c9fb868c..d2c3f9dfaf 100644 --- a/src/doc/book/second-edition/src/ch05-01-defining-structs.md +++ b/src/doc/book/second-edition/src/ch05-01-defining-structs.md @@ -8,7 +8,7 @@ the data to specify or access the values of an instance. To define a struct, we enter the keyword `struct` and name the entire struct. A struct’s name should describe the significance of the pieces of data being -grouped together. Then, inside curly braces, we define the names and types of +grouped together. Then, inside curly brackets, we define the names and types of the pieces of data, which we call *fields*. For example, Listing 5-1 shows a struct to store information about a user account: @@ -25,7 +25,7 @@ struct User { To use a struct after we’ve defined it, we create an *instance* of that struct by specifying concrete values for each of the fields. We create an instance by -stating the name of the struct, and then add curly braces containing `key: +stating the name of the struct, and then add curly brackets containing `key: value` pairs where the keys are the names of the fields and the values are the data we want to store in those fields. We don’t have to specify the fields in the same order in which we declared them in the struct. In other words, the @@ -54,9 +54,9 @@ struct To get a specific value from a struct, we can use dot notation. If we wanted just this user’s email address, we can use `user1.email` wherever we want to -use this value. To change a value in a struct, if the instance is mutable, we -can use the dot notation and assign into a particular field. Listing 5-3 shows -how to change the value in the `email` field of a mutable `User` instance: +use this value. If the instance is mutable, we can change a value by using the +dot notation and assigning into a particular field. Listing 5-3 shows how to +change the value in the `email` field of a mutable `User` instance: ```rust # struct User { @@ -79,11 +79,14 @@ user1.email = String::from("anotheremail@example.com"); Listing 5-3: Changing the value in the `email` field of a `User` instance -Like any expression, we can implicitly return a new instance of a struct from a -function by constructing the new instance as the last expression in the -function body. Listing 5-4 shows a `build_user` function that returns a `User` -instance with the given `email` and `username`. The `active` field gets the -value of `true`, and the `sign_in_count` gets a value of `1`. +Note that the entire instance must be mutable; Rust doesn’t allow us to mark +only certain fields as mutable. Also note that as with any expression, we can +construct a new instance of the struct as the last expression in the function +body to implicitly return that new instance. + +Listing 5-4 shows a `build_user` function that returns a `User` instance with +the given email and username. The `active` field gets the value of `true`, and +the `sign_in_count` gets a value of `1`. ```rust # struct User { @@ -106,24 +109,17 @@ fn build_user(email: String, username: String) -> User { Listing 5-4: A `build_user` function that takes an email and username and returns a `User` instance -Repeating the `email` field name and `email` variable, and the same for -`username`, is a bit tedious, though. It makes sense to name the function -arguments with the same name as the struct fields, but if the struct had more -fields, repeating each name would get even more annoying. Luckily, there’s a -convenient shorthand! - -### Field Init Shorthand when Variables Have the Same Name as Fields +It makes sense to name the function arguments with the same name as the struct +fields, but having to repeat the `email` and `username` field names and +variables is a bit tedious. If the struct had more fields, repeating each name +would get even more annoying. Luckily, there's a convenient shorthand! -If you have variables with the same names as struct fields, you can use *field -init shorthand*. This can make functions that create new instances of structs -more concise. +### Using the Field Init Shorthand when Variables and Fields Have the Same Name -In Listing 5-4, the parameter names `email` and `username` are the same as the -`User` struct’s field names `email` and `username`. Because the names are -exactly the same, we can write `build_user` without the repetition of `email` -and `username` as shown in Listing 5-5. This version of `build_user` behaves -the same way as the one in Listing 5-4. The field init syntax can make cases -like this shorter to write, especially when structs have many fields. +Because the parameter names and the struct field names are exactly the same in +Listing 5-4, we can use the *field init shorthand* syntax to rewrite +`build_user` so that it behaves exactly the same but doesn’t have the +repetition of `email` and `username` in the way shown in Listing 5-5. ```rust # struct User { @@ -144,16 +140,23 @@ fn build_user(email: String, username: String) -> User { ``` Listing 5-5: A `build_user` function that uses field init -syntax since the `email` and `username` parameters have the same name as struct -fields +shorthand since the `email` and `username` parameters have the same name as +struct fields + +Here, we’re creating a new instance of the `User` struct, which has a field +named `email`. We want to set the `email` field’s value to the value in the +`email` parameter of the `build_user` function. Because the `email` field and +the `email` parameter have the same name, we only need to write `email` rather +than `email: email`. ### Creating Instances From Other Instances With Struct Update Syntax -It’s often useful to create a new instance from an old instance, using most of -the old instance’s values but changing some. Listing 5-6 shows an example of -creating a new `User` instance in `user2` by setting the values of `email` and -`username` but using the same values for the rest of the fields from the -`user1` instance we created in Listing 5-2: +It’s often useful to create a new instance of a struct that uses most of an old +instance’s values, but changes some. We do this using *struct update syntax*. + +First, Listing 5-6 shows how we create a new `User` instance in `user2` without +the update syntax. We set new values for `email` and `username`, but otherwise +use the same values from `user1` that we created in Listing 5-2: ```rust # struct User { @@ -178,15 +181,12 @@ let user2 = User { }; ``` -Listing 5-6: Creating a new `User` instance, `user2`, and -setting some fields to the values of the same fields from `user1` +Listing 5-6: Creating a new `User` instance using some of +the values from `user1` -The *struct update syntax* achieves the same effect as the code in Listing 5-6 -using less code. The struct update syntax uses `..` to specify that the -remaining fields not set explicitly should have the same value as the fields in -the given instance. The code in Listing 5-7 also creates an instance in `user2` -that has a different value for `email` and `username` but has the same values -for the `active` and `sign_in_count` fields that `user1` has: +Using struct update syntax, we can achieve the same effect with less code, +shown in Listing 5-7. The syntax `..` specifies that the remaining fields not +explicitly set should have the same value as the fields in the given instance. ```rust # struct User { @@ -214,14 +214,22 @@ let user2 = User { `email` and `username` values for a `User` instance but use the rest of the values from the fields of the instance in the `user1` variable +The code in Listing 5-7 also creates an instance in `user2` that has a +different value for `email` and `username` but has the same values for the +`active` and `sign_in_count` fields from `user1`. + ### Tuple Structs without Named Fields to Create Different Types We can also define structs that look similar to tuples, called *tuple structs*, that have the added meaning the struct name provides, but don’t have names -associated with their fields, just the types of the fields. The definition of a -tuple struct still starts with the `struct` keyword and the struct name, which -are followed by the types in the tuple. For example, here are definitions and -usages of tuple structs named `Color` and `Point`: +associated with their fields, just the types of the fields. Tuple structs are +useful when you want to give the whole tuple a name and make the tuple be a +different type than other tuples, but naming each field as in a regular struct +would be verbose or redundant. + +To define a tuple struct you start with the `struct` keyword and the struct +name followed by the types in the tuple. For example, here are definitions and +usages of two tuple structs named `Color` and `Point`: ```rust struct Color(i32, i32, i32); @@ -233,8 +241,12 @@ let origin = Point(0, 0, 0); Note that the `black` and `origin` values are different types, since they’re instances of different tuple structs. Each struct we define is its own type, -even though the fields within the struct have the same types. Otherwise, tuple -struct instances behave like tuples, which we covered in Chapter 3. +even though the fields within the struct have the same types. For example, a +function that takes a parameter of type `Color` cannot take a `Point` as an +argument, even though both types are made up of three `i32` values. Otherwise, +tuple struct instances behave like tuples, which we covered in Chapter 3: you +can destructure them into their individual pieces, you can use a `.` followed +by the index to access an individual value, and so on. ### Unit-Like Structs without Any Fields diff --git a/src/doc/book/second-edition/src/ch05-02-example-structs.md b/src/doc/book/second-edition/src/ch05-02-example-structs.md index 70e4b1e21b..ce85e51547 100644 --- a/src/doc/book/second-edition/src/ch05-02-example-structs.md +++ b/src/doc/book/second-edition/src/ch05-02-example-structs.md @@ -5,7 +5,7 @@ calculates the area of a rectangle. We’ll start with single variables, and the refactor the program until we’re using structs instead. Let’s make a new binary project with Cargo called *rectangles* that will take -the length and width of a rectangle specified in pixels and will calculate the +the width and height of a rectangle specified in pixels and will calculate the area of the rectangle. Listing 5-8 shows a short program with one way of doing just that in our project’s *src/main.rs*: @@ -13,22 +13,22 @@ just that in our project’s *src/main.rs*: ```rust fn main() { - let length1 = 50; let width1 = 30; + let height1 = 50; println!( "The area of the rectangle is {} square pixels.", - area(length1, width1) + area(width1, height1) ); } -fn area(length: u32, width: u32) -> u32 { - length * width +fn area(width: u32, height: u32) -> u32 { + width * height } ``` Listing 5-8: Calculating the area of a rectangle -specified by its length and width in separate variables +specified by its width and height in separate variables Now, run this program using `cargo run`: @@ -39,20 +39,20 @@ The area of the rectangle is 1500 square pixels. ### Refactoring with Tuples Even though Listing 5-8 works and figures out the area of the rectangle by -calling the `area` function with each dimension, we can do better. The length -and the width are related to each other because together they describe one +calling the `area` function with each dimension, we can do better. The width +and the height are related to each other because together they describe one rectangle. The issue with this method is evident in the signature of `area`: ```rust,ignore -fn area(length: u32, width: u32) -> u32 { +fn area(width: u32, height: u32) -> u32 { ``` The `area` function is supposed to calculate the area of one rectangle, but the function we wrote has two parameters. The parameters are related, but that’s not expressed anywhere in our program. It would be more readable and more -manageable to group length and width together. We’ve already discussed one way +manageable to group width and height together. We’ve already discussed one way we might do that in the Grouping Values into Tuples section of Chapter 3 on page XX: by using tuples. Listing 5-9 shows another version of our program that uses tuples: @@ -61,7 +61,7 @@ uses tuples: ```rust fn main() { - let rect1 = (50, 30); + let rect1 = (30, 50); println!( "The area of the rectangle is {} square pixels.", @@ -74,7 +74,7 @@ fn area(dimensions: (u32, u32)) -> u32 { } ``` -Listing 5-8: Specifying the length and width of the +Listing 5-8: Specifying the width and height of the rectangle with a tuple In one way, this program is better. Tuples let us add a bit of structure, and @@ -82,9 +82,9 @@ we’re now passing just one argument. But in another way this version is less clear: tuples don’t name their elements, so our calculation has become more confusing because we have to index into the parts of the tuple. -It doesn’t matter if we mix up length and width for the area calculation, but +It doesn’t matter if we mix up width and height for the area calculation, but if we want to draw the rectangle on the screen, it would matter! We would have -to keep in mind that `length` is the tuple index `0` and `width` is the tuple +to keep in mind that `width` is the tuple index `0` and `height` is the tuple index `1`. If someone else worked on this code, they would have to figure this out and keep it in mind as well. It would be easy to forget or mix up these values and cause errors, because we haven’t conveyed the meaning of our data in @@ -100,12 +100,12 @@ parts, as shown in Listing 5-10: ```rust struct Rectangle { - length: u32, width: u32, + height: u32, } fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; + let rect1 = Rectangle { width: 30, height: 50 }; println!( "The area of the rectangle is {} square pixels.", @@ -114,16 +114,16 @@ fn main() { } fn area(rectangle: &Rectangle) -> u32 { - rectangle.length * rectangle.width + rectangle.width * rectangle.height } ``` Listing 5-10: Defining a `Rectangle` struct Here we’ve defined a struct and named it `Rectangle`. Inside the `{}` we -defined the fields as `length` and `width`, both of which have type `u32`. Then -in `main` we create a particular instance of a `Rectangle` that has a length of -50 and a width of 30. +defined the fields as `width` and `height`, both of which have type `u32`. Then +in `main` we create a particular instance of a `Rectangle` that has a width of +30 and a height of 50. Our `area` function is now defined with one parameter, which we’ve named `rectangle`, whose type is an immutable borrow of a struct `Rectangle` @@ -132,10 +132,10 @@ take ownership of it. This way, `main` retains its ownership and can continue using `rect1`, which is the reason we use the `&` in the function signature and where we call the function. -The `area` function accesses the `length` and `width` fields of the `Rectangle` +The `area` function accesses the `width` and `height` fields of the `Rectangle` instance. Our function signature for `area` now indicates exactly what we mean: -calculate the area of a `Rectangle` using its `length` and `width` fields. This -conveys that the length and width are related to each other, and gives +calculate the area of a `Rectangle` using its `width` and `height` fields. This +conveys that the width and height are related to each other, and gives descriptive names to the values rather than using the tuple index values of `0` and `1`—a win for clarity. @@ -150,12 +150,12 @@ chapters: ```rust,ignore struct Rectangle { - length: u32, width: u32, + height: u32, } fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; + let rect1 = Rectangle { width: 30, height: 50 }; println!("rect1 is {}", rect1); } @@ -176,7 +176,7 @@ direct end user consumption. The primitive types we’ve seen so far implement `Display` by default, because there’s only one way you’d want to show a `1` or any other primitive type to a user. But with structs, the way `println!` should format the output is less clear because there are more display possibilities: -do you want commas or not? Do you want to print the curly braces? Should all +do you want commas or not? Do you want to print the curly brackets? Should all the fields be shown? Due to this ambiguity, Rust doesn’t try to guess what we want and structs don’t have a provided implementation of `Display`. @@ -216,12 +216,12 @@ definition, as shown in Listing 5-12: ```rust #[derive(Debug)] struct Rectangle { - length: u32, width: u32, + height: u32, } fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; + let rect1 = Rectangle { width: 30, height: 50 }; println!("rect1 is {:?}", rect1); } @@ -234,7 +234,7 @@ Now when we run the program, we won’t get any errors and we’ll see the following output: ```text -rect1 is Rectangle { length: 50, width: 30 } +rect1 is Rectangle { width: 30, height: 50 } ``` Nice! It’s not the prettiest output, but it shows the values of all the fields @@ -245,8 +245,8 @@ When we use the `{:#?}` style in the example, the output will look like this: ```text rect1 is Rectangle { - length: 50, - width: 30 + width: 30, + height: 50 } ``` diff --git a/src/doc/book/second-edition/src/ch05-03-method-syntax.md b/src/doc/book/second-edition/src/ch05-03-method-syntax.md index c2fd6b2ea0..e4606dfd9b 100644 --- a/src/doc/book/second-edition/src/ch05-03-method-syntax.md +++ b/src/doc/book/second-edition/src/ch05-03-method-syntax.md @@ -19,18 +19,18 @@ in Listing 5-13: ```rust #[derive(Debug)] struct Rectangle { - length: u32, width: u32, + height: u32, } impl Rectangle { fn area(&self) -> u32 { - self.length * self.width + self.width * self.height } } fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; + let rect1 = Rectangle { width: 30, height: 50 }; println!( "The area of the rectangle is {} square pixels.", @@ -44,7 +44,7 @@ fn main() { To define the function within the context of `Rectangle`, we start an `impl` (*implementation*) block. Then we move the `area` function within the `impl` -curly braces and change the first (and in this case, only) parameter to be +curly brackets and change the first (and in this case, only) parameter to be `self` in the signature and everywhere within the body. In `main` where we called the `area` function and passed `rect1` as an argument, we can instead use *method syntax* to call the `area` method on our `Rectangle` instance. @@ -131,9 +131,9 @@ method: ```rust,ignore fn main() { - let rect1 = Rectangle { length: 50, width: 30 }; - let rect2 = Rectangle { length: 40, width: 10 }; - let rect3 = Rectangle { length: 45, width: 60 }; + let rect1 = Rectangle { width: 30, height: 50 }; + let rect2 = Rectangle { width: 10, height: 40 }; + let rect3 = Rectangle { width: 60, height: 45 }; println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2)); println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3)); @@ -161,8 +161,8 @@ parameter will be by looking at the code that calls the method: read `rect2` (rather than write, which would mean we’d need a mutable borrow), and we want `main` to retain ownership of `rect2` so we can use it again after calling the `can_hold` method. The return value of `can_hold` will be a -boolean, and the implementation will check whether the length and width of -`self` are both greater than the length and width of the other `Rectangle`, +boolean, and the implementation will check whether the width and height of +`self` are both greater than the width and height of the other `Rectangle`, respectively. Let’s add the new `can_hold` method to the `impl` block from Listing 5-13, shown in Listing 5-15: @@ -171,17 +171,17 @@ Listing 5-13, shown in Listing 5-15: ```rust # #[derive(Debug)] # struct Rectangle { -# length: u32, # width: u32, +# height: u32, # } # impl Rectangle { fn area(&self) -> u32 { - self.length * self.width + self.width * self.height } fn can_hold(&self, other: &Rectangle) -> bool { - self.length > other.length && self.width > other.width + self.width > other.width && self.height > other.height } } ``` @@ -205,7 +205,7 @@ function. Associated functions are often used for constructors that will return a new instance of the struct. For example, we could provide an associated function -that would have one dimension parameter and use that as both length and width, +that would have one dimension parameter and use that as both width and height, thus making it easier to create a square `Rectangle` rather than having to specify the same value twice: @@ -214,13 +214,13 @@ specify the same value twice: ```rust # #[derive(Debug)] # struct Rectangle { -# length: u32, # width: u32, +# height: u32, # } # impl Rectangle { fn square(size: u32) -> Rectangle { - Rectangle { length: size, width: size } + Rectangle { width: size, height: size } } } ``` @@ -239,19 +239,19 @@ in its own `impl` block: ```rust # #[derive(Debug)] # struct Rectangle { -# length: u32, # width: u32, +# height: u32, # } # impl Rectangle { fn area(&self) -> u32 { - self.length * self.width + self.width * self.height } } impl Rectangle { fn can_hold(&self, other: &Rectangle) -> bool { - self.length > other.length && self.width > other.width + self.width > other.width && self.height > other.height } } ``` diff --git a/src/doc/book/second-edition/src/ch06-01-defining-an-enum.md b/src/doc/book/second-edition/src/ch06-01-defining-an-enum.md index e7ee81b2e5..32b3c56067 100644 --- a/src/doc/book/second-edition/src/ch06-01-defining-an-enum.md +++ b/src/doc/book/second-edition/src/ch06-01-defining-an-enum.md @@ -249,7 +249,7 @@ m.call(); The body of the method would use `self` to get the value that we called the method on. In this example, we’ve created a variable `m` that has the value -`Message::Write("hello")`, and that is what `self` will be in the body of the +`Message::Write(String::from("hello"))`, and that is what `self` will be in the body of the `call` method when `m.call()` runs. Let’s look at another enum in the standard library that is very common and diff --git a/src/doc/book/second-edition/src/ch06-02-match.md b/src/doc/book/second-edition/src/ch06-02-match.md index 33d70dbfbb..7aac3ebd40 100644 --- a/src/doc/book/second-edition/src/ch06-02-match.md +++ b/src/doc/book/second-edition/src/ch06-02-match.md @@ -62,9 +62,9 @@ The code associated with each arm is an expression, and the resulting value of the expression in the matching arm is the value that gets returned for the entire `match` expression. -Curly braces typically aren’t used if the match arm code is short, as it is in -Listing 6-3 where each arm just returns a value. If you want to run multiple -lines of code in a match arm, you can use curly braces. For example, the +Curly brackets typically aren’t used if the match arm code is short, as it is +in Listing 6-3 where each arm just returns a value. If you want to run multiple +lines of code in a match arm, you can use curly brackets. For example, the following code would print out “Lucky penny!” every time the method was called with a `Coin::Penny` but would still return the last value of the block, `1`: @@ -99,7 +99,7 @@ As an example, let’s change one of our enum variants to hold data inside it. From 1999 through 2008, the United States minted quarters with different designs for each of the 50 states on one side. No other coins got state designs, so only quarters have this extra value. We can add this information to -our `enum` by changing the `Quarter` variant to include a `State` value stored +our `enum` by changing the `Quarter` variant to include a `UsState` value stored inside it, which we’ve done here in Listing 6-4: ```rust diff --git a/src/doc/book/second-edition/src/ch07-00-modules.md b/src/doc/book/second-edition/src/ch07-00-modules.md index 110da77f8f..a4b400b8ff 100644 --- a/src/doc/book/second-edition/src/ch07-00-modules.md +++ b/src/doc/book/second-edition/src/ch07-00-modules.md @@ -14,7 +14,7 @@ you can choose whether those definitions are visible outside their module (public) or not (private). Here’s an overview of how modules work: * The `mod` keyword declares a new module. Code within the module appears - either immediately following this declaration within curly braces or in + either immediately following this declaration within curly brackets or in another file. * By default, functions, types, constants, and modules are private. The `pub` keyword makes an item public and therefore visible outside its namespace. diff --git a/src/doc/book/second-edition/src/ch07-01-mod-and-the-filesystem.md b/src/doc/book/second-edition/src/ch07-01-mod-and-the-filesystem.md index 749db5983c..18c3e6ef79 100644 --- a/src/doc/book/second-edition/src/ch07-01-mod-and-the-filesystem.md +++ b/src/doc/book/second-edition/src/ch07-01-mod-and-the-filesystem.md @@ -63,7 +63,7 @@ mod network { ``` After the `mod` keyword, we put the name of the module, `network`, and then a -block of code in curly braces. Everything inside this block is inside the +block of code in curly brackets. Everything inside this block is inside the namespace `network`. In this case, we have a single function, `connect`. If we wanted to call this function from a script outside the `network` module, we would need to specify the module and use the namespace syntax `::`, like so: @@ -383,7 +383,7 @@ previously, we can do what the note suggests: 1. Make a new *directory* named *network*, the parent module’s name. 2. Move the *src/network.rs* file into the new *network* directory, and - rename *src/network/mod.rs*. + rename it to *src/network/mod.rs*. 3. Move the submodule file *src/server.rs* into the *network* directory. Here are commands to carry out these steps: diff --git a/src/doc/book/second-edition/src/ch07-03-importing-names-with-use.md b/src/doc/book/second-edition/src/ch07-03-importing-names-with-use.md index dc31695fe9..ad76a7057b 100644 --- a/src/doc/book/second-edition/src/ch07-03-importing-names-with-use.md +++ b/src/doc/book/second-edition/src/ch07-03-importing-names-with-use.md @@ -83,7 +83,7 @@ directly. Because enums also form a sort of namespace like modules, we can import an enum’s variants with `use` as well. For any kind of `use` statement, if you’re importing multiple items from one namespace, you can list them using curly -braces and commas in the last position, like so: +brackets and commas in the last position, like so: ```rust enum TrafficLight { diff --git a/src/doc/book/second-edition/src/ch08-00-common-collections.md b/src/doc/book/second-edition/src/ch08-00-common-collections.md index bdde3bf2bf..b45e49d994 100644 --- a/src/doc/book/second-edition/src/ch08-00-common-collections.md +++ b/src/doc/book/second-edition/src/ch08-00-common-collections.md @@ -1,18 +1,18 @@ # Common Collections -Rust’s standard library includes a number of really useful data structures -called *collections*. Most other data types represent one specific value, but +Rust’s standard library includes a number of very useful data structures called +*collections*. Most other data types represent one specific value, but collections can contain multiple values. Unlike the built-in array and tuple types, the data these collections point to is stored on the heap, which means the amount of data does not need to be known at compile time and can grow or shrink as the program runs. Each kind of collection has different capabilities -and costs, and choosing an appropriate one for the situation you’re in is a -skill you’ll develop over time. In this chapter, we’ll go over three -collections which are used very often in Rust programs: +and costs, and choosing an appropriate one for your current situation is a +skill you’ll develop over time. In this chapter, we’ll discuss three +collections that are used very often in Rust programs: * A *vector* allows us to store a variable number of values next to each other. -* A *string* is a collection of characters. We’ve seen the `String` type - before, but we’ll talk about it in depth now. +* A *string* is a collection of characters. We’ve discussed the `String` type + previously, but in this chapter we’ll talk about it in depth. * A *hash map* allows us to associate a value with a particular key. It’s a particular implementation of the more general data structure called a *map*. @@ -21,5 +21,5 @@ see [the documentation][collections]. [collections]: ../../std/collections/index.html -We’re going to discuss how to create and update vectors, strings, and hash -maps, as well as what makes each special. +We’ll discuss how to create and update vectors, strings, and hash maps, as well +as what makes each special. diff --git a/src/doc/book/second-edition/src/ch08-01-vectors.md b/src/doc/book/second-edition/src/ch08-01-vectors.md index f68a2c308d..c6d6fed7b8 100644 --- a/src/doc/book/second-edition/src/ch08-01-vectors.md +++ b/src/doc/book/second-edition/src/ch08-01-vectors.md @@ -1,46 +1,55 @@ ## Vectors -The first type we’ll look at is `Vec`, also known as a *vector*. Vectors -allow us to store more than one value in a single data structure that puts all -the values next to each other in memory. Vectors can only store values of the -same type. They are useful in situations where you have a list of items, such -as the lines of text in a file or the prices of items in a shopping cart. +The first collection type we’ll look at is `Vec`, also known as a *vector*. +Vectors allow us to store more than one value in a single data structure that +puts all the values next to each other in memory. Vectors can only store values +of the same type. They are useful in situations in which you have a list of +items, such as the lines of text in a file or the prices of items in a shopping +cart. ### Creating a New Vector -To create a new, empty vector, we can call the `Vec::new` function: +To create a new, empty vector, we can call the `Vec::new` function as shown in +Listing 8-1: ```rust let v: Vec = Vec::new(); ``` -Note that we added a type annotation here. Since we aren’t inserting any values -into this vector, Rust doesn’t know what kind of elements we intend to store. -This is an important point. Vectors are homogeneous: they may store many -values, but those values must all be the same type. Vectors are implemented -using generics, which Chapter 10 will cover how to use in your own types. For -now, all you need to know is that the `Vec` type provided by the standard -library can hold any type, and when a specific `Vec` holds a specific type, the -type goes within angle brackets. We’ve told Rust that the `Vec` in `v` will +Listing 8-1: Creating a new, empty vector to hold values +of type `i32` + +Note that we added a type annotation here. Because we aren’t inserting any +values into this vector, Rust doesn’t know what kind of elements we intend to +store. This is an important point. Vectors are implemented using generics; +we’ll cover how to use generics with your own types in Chapter 10. For now, +know that the `Vec` type provided by the standard library can hold any type, +and when a specific vector holds a specific type, the type is specified within +angle brackets. In Listing 8-1, we’ve told Rust that the `Vec` in `v` will hold elements of the `i32` type. -In real code, Rust can infer the type of value we want to store once we insert -values, so you rarely need to do this type annotation. It’s more common to -create a `Vec` that has initial values, and Rust provides the `vec!` macro for -convenience. The macro will create a new `Vec` that holds the values we give -it. This will create a new `Vec` that holds the values `1`, `2`, and `3`: +In more realistic code, Rust can often infer the type of value we want to store +once we insert values, so you rarely need to do this type annotation. It’s more +common to create a `Vec` that has initial values, and Rust provides the +`vec!` macro for convenience. The macro will create a new vector that holds the +values we give it. Listing 8-2 creates a new `Vec` that holds the values +`1`, `2`, and `3`: ```rust let v = vec![1, 2, 3]; ``` +Listing 8-2: Creating a new vector containing +values + Because we’ve given initial `i32` values, Rust can infer that the type of `v` -is `Vec`, and the type annotation isn’t necessary. Let’s look at how to -modify a vector next. +is `Vec`, and the type annotation isn’t necessary. Next, we’ll look at how +to modify a vector. ### Updating a Vector -To create a vector then add elements to it, we can use the `push` method: +To create a vector and then add elements to it, we can use the `push` method as +shown in Listing 8-3: ```rust let mut v = Vec::new(); @@ -51,14 +60,18 @@ v.push(7); v.push(8); ``` -As with any variable as we discussed in Chapter 3, if we want to be able to -change its value, we need to make it mutable with the `mut` keyword. The +Listing 8-3: Using the `push` method to add values to a +vector + +As with any variable, as discussed in Chapter 3, if we want to be able to +change its value, we need to make it mutable using the `mut` keyword. The numbers we place inside are all of type `i32`, and Rust infers this from the data, so we don’t need the `Vec` annotation. -### Dropping a Vector Drops its Elements +### Dropping a Vector Drops Its Elements -Like any other `struct`, a vector will be freed when it goes out of scope: +Like any other `struct`, a vector will be freed when it goes out of scope, as +annotated in Listing 8-4: ```rust { @@ -69,9 +82,12 @@ Like any other `struct`, a vector will be freed when it goes out of scope: } // <- v goes out of scope and is freed here ``` +Listing 8-4: Showing where the vector and its elements +are dropped + When the vector gets dropped, all of its contents will also be dropped, meaning those integers it holds will be cleaned up. This may seem like a -straightforward point, but can get a little more complicated once we start to +straightforward point but can get a bit more complicated when we start to introduce references to the elements of the vector. Let’s tackle that next! ### Reading Elements of Vectors @@ -81,7 +97,7 @@ read their contents is a good next step. There are two ways to reference a value stored in a vector. In the examples, we’ve annotated the types of the values that are returned from these functions for extra clarity. -This example shows both methods of accessing a value in a vector either with +Listing 8-5 shows both methods of accessing a value in a vector either with indexing syntax or the `get` method: ```rust @@ -91,17 +107,20 @@ let third: &i32 = &v[2]; let third: Option<&i32> = v.get(2); ``` -There are a few things to note here. First, that we use the index value of `2` -to get the third element: vectors are indexed by number, starting at zero. -Second, the two different ways to get the third element are: using `&` and -`[]`, which gives us a reference, or using the `get` method with the index -passed as an argument, which gives us an `Option<&T>`. +Listing 8-5: Using indexing syntax or the `get` method to +access an item in a vector + +Note two details here. First, we use the index value of `2` to get the third +element: vectors are indexed by number, starting at zero. Second, the two +different ways to get the third element are by using `&` and `[]`, which gives +us a reference, or by using the `get` method with the index passed as an +argument, which gives us an `Option<&T>`. -The reason Rust has two ways to reference an element is so that you can choose -how the program behaves when you try to use an index value that the vector -doesn’t have an element for. As an example, what should a program do if it has -a vector that holds five elements then tries to access an element at index 100 -like this: +The reason Rust has two ways to reference an element is so you can choose how +the program behaves when you try to use an index value that the vector doesn’t +have an element for. As an example, what should a program do if it has a vector +that holds five elements and then tries to access an element at index 100, as +shown in Listing 8-6: ```rust,should_panic let v = vec![1, 2, 3, 4, 5]; @@ -110,31 +129,32 @@ let does_not_exist = &v[100]; let does_not_exist = v.get(100); ``` -When you run this, you will find that with the first `[]` method, Rust will -cause a `panic!` when a non-existent element is referenced. This method would -be preferable if you want your program to consider an attempt to access an -element past the end of the vector to be a fatal error that should crash the -program. - -When the `get` method is passed an index that is outside the array, it will -return `None` without panicking. You would use this if accessing an element -beyond the range of the vector will happen occasionally under normal -circumstances. Your code can then have logic to handle having either -`Some(&element)` or `None`, as we discussed in Chapter 6. For example, the -index could be coming from a person entering a number. If they accidentally -enter a number that’s too large and your program gets a `None` value, you could -tell the user how many items are in the current `Vec` and give them another -chance to enter a valid value. That would be more user-friendly than crashing -the program for a typo! +Listing 8-6: Attempting to access the element at index +100 in a vector containing 5 elements + +When you run this code, the first `[]` method will cause a `panic!` because it +references a nonexistent element. This method is best used when you want your +program to consider an attempt to access an element past the end of the vector +to be a fatal error that crashes the program. + +When the `get` method is passed an index that is outside the vector, it returns +`None` without panicking. You would use this method if accessing an element +beyond the range of the vector happens occasionally under normal circumstances. +Your code will then have logic to handle having either `Some(&element)` or +`None`, as discussed in Chapter 6. For example, the index could be coming from +a person entering a number. If they accidentally enter a number that’s too +large and the program gets a `None` value, you could tell the user how many +items are in the current `Vec` and give them another chance to enter a valid +value. That would be more user-friendly than crashing the program due to a typo! #### Invalid References -Once the program has a valid reference, the borrow checker will enforce the -ownership and borrowing rules covered in Chapter 4 to ensure this reference and -any other references to the contents of the vector stay valid. Recall the rule -that says we can’t have mutable and immutable references in the same scope. -That rule applies in this example, where we hold an immutable reference to the -first element in a vector and try to add an element to the end: +When the program has a valid reference, the borrow checker enforces the +ownership and borrowing rules (covered in Chapter 4) to ensure this reference +and any other references to the contents of the vector remain valid. Recall the +rule that states we can’t have mutable and immutable references in the same +scope. That rule applies in Listing 8-7 where we hold an immutable reference to +the first element in a vector and try to add an element to the end: ```rust,ignore let mut v = vec![1, 2, 3, 4, 5]; @@ -144,7 +164,10 @@ let first = &v[0]; v.push(6); ``` -Compiling this will give us this error: +Listing 8-7: Attempting to add an element to a vector +while holding a reference to an item + +Compiling this code will result in this error: ```text error[E0502]: cannot borrow `v` as mutable because it is also borrowed as @@ -159,33 +182,67 @@ immutable | - immutable borrow ends here ``` -This code might look like it should work: why should a reference to the first -element care about what changes about the end of the vector? The reason why -this code isn’t allowed is due to the way vectors work. Adding a new element +The code in Listing 8-7 might look like it should work: why should a reference +to the first element care about what changes at the end of the vector? The +reason behind this error is due to the way vectors work: adding a new element onto the end of the vector might require allocating new memory and copying the -old elements over to the new space, in the circumstance that there isn’t enough -room to put all the elements next to each other where the vector was. In that -case, the reference to the first element would be pointing to deallocated -memory. The borrowing rules prevent programs from ending up in that situation. +old elements to the new space if there isn’t enough room to put all the +elements next to each other where the vector was. In that case, the reference +to the first element would be pointing to deallocated memory. The borrowing +rules prevent programs from ending up in that situation. + +> Note: For more on the implementation details of the `Vec` type, see “The +> Nomicon” at https://doc.rust-lang.org/stable/nomicon/vec.html. -> Note: For more on this, see The Nomicon at -*https://doc.rust-lang.org/stable/nomicon/vec.html*. +### Iterating Over the Values in a Vector + +If we want to access each element in a vector in turn, rather than using +indexing to access one element, we can iterate through all of the elements. +Listing 8-8 shows how to use a `for` loop to get immutable references to each +element in a vector of `i32` values and print them out: + +```rust +let v = vec![100, 32, 57]; +for i in &v { + println!("{}", i); +} +``` + +Listing 8-8: Printing each element in a vector by +iterating over the elements using a `for` loop + +We can also iterate over mutable references to each element in a mutable vector +if we want to make changes to all the elements. The `for` loop in Listing 8-9 +will add `50` to each element: + +```rust +let mut v = vec![100, 32, 57]; +for i in &mut v { + *i += 50; +} +``` + +Listing 8-9: Iterating over mutable references to +elements in a vector + +In order to change the value that the mutable reference refers to, before we +can use the `+=` operator with `i`, we have to use the dereference operator +(`*`) to get to the value. ### Using an Enum to Store Multiple Types At the beginning of this chapter, we said that vectors can only store values -that are all the same type. This can be inconvenient; there are definitely use -cases for needing to store a list of things of different types. Luckily, the -variants of an enum are all defined under the same enum type, so when we need -to store elements of a different type in a vector, we can define and use an -enum! - -For example, let’s say we want to get values from a row in a spreadsheet, where -some of the columns in the row contain integers, some floating point numbers, +that are the same type. This can be inconvenient; there are definitely use +cases for needing to store a list of items of different types. Fortunately, the +variants of an enum are defined under the same enum type, so when we need to +store elements of a different type in a vector, we can define and use an enum! + +For example, let’s say we want to get values from a row in a spreadsheet where +some of the columns in the row contain integers, some floating-point numbers, and some strings. We can define an enum whose variants will hold the different -value types, and then all of the enum variants will be considered the same -type, that of the enum. Then we can create a vector that holds that enum and -so, ultimately, holds different types: +value types, and then all the enum variants will be considered the same type, +that of the enum. Then we can create a vector that holds that enum and so, +ultimately, holds different types. We’ve demonstrated this in Listing 8-8: ```rust enum SpreadsheetCell { @@ -201,25 +258,24 @@ let row = vec![ ]; ``` -Listing 8-1: Defining an enum to be able to hold -different types of data in a vector - -The reason Rust needs to know exactly what types will be in the vector at -compile time is so that it knows exactly how much memory on the heap will be -needed to store each element. A secondary advantage to this is that we can be -explicit about what types are allowed in this vector. If Rust allowed a vector -to hold any type, there would be a chance that one or more of the types would -cause errors with the operations performed on the elements of the vector. Using -an enum plus a `match` means that Rust will ensure at compile time that we -always handle every possible case, as we discussed in Chapter 6. - -If you don’t know at the time that you’re writing a program the exhaustive set -of types the program will get at runtime to store in a vector, the enum -technique won’t work. Instead, you can use a trait object, which we’ll cover in -Chapter 17. - -Now that we’ve gone over some of the most common ways to use vectors, be sure -to take a look at the API documentation for all of the many useful methods -defined on `Vec` by the standard library. For example, in addition to `push` -there’s a `pop` method that will remove and return the last element. Let’s move -on to the next collection type: `String`! +Listing 8-8: Defining an `enum` to store values of +different types in one vector + +The reason Rust needs to know what types will be in the vector at compile time +is so it knows exactly how much memory on the heap will be needed to store each +element. A secondary advantage is that we can be explicit about what types are +allowed in this vector. If Rust allowed a vector to hold any type, there would +be a chance that one or more of the types would cause errors with the +operations performed on the elements of the vector. Using an enum plus a +`match` expression means that Rust will ensure at compile time that we always +handle every possible case, as discussed in Chapter 6. + +If you don’t know when you’re writing a program the exhaustive set of types the +program will get at runtime to store in a vector, the enum technique won’t +work. Instead, you can use a trait object, which we’ll cover in Chapter 17. + +Now that we’ve discussed some of the most common ways to use vectors, be sure +to review the API documentation for all the many useful methods defined on +`Vec` by the standard library. For example, in addition to `push`, a `pop` +method removes and returns the last element. Let’s move on to the next +collection type: `String`! diff --git a/src/doc/book/second-edition/src/ch08-02-strings.md b/src/doc/book/second-edition/src/ch08-02-strings.md index 4f9b6e1ca7..0b4d6e5c14 100644 --- a/src/doc/book/second-edition/src/ch08-02-strings.md +++ b/src/doc/book/second-edition/src/ch08-02-strings.md @@ -1,61 +1,62 @@ ## Strings -We’ve already talked about strings a bunch in Chapter 4, but let’s take a more -in-depth look at them now. Strings are an area that new Rustaceans commonly get -stuck on. This is due to a combination of three things: Rust’s propensity for -making sure to expose possible errors, strings being a more complicated data -structure than many programmers give them credit for, and UTF-8. These things -combine in a way that can seem difficult when coming from other languages. - -The reason strings are in the collections chapter is that strings are +We talked about strings in Chapter 4, but we’ll look at them in more depth now. +New Rustaceans commonly get stuck on strings due to a combination of three +concepts: Rust’s propensity for exposing possible errors, strings being a more +complicated data structure than many programmers give them credit for, and +UTF-8. These concepts combine in a way that can seem difficult when you’re +coming from other programming languages. + +This discussion of strings is in the collections chapter because strings are implemented as a collection of bytes plus some methods to provide useful functionality when those bytes are interpreted as text. In this section, we’ll -talk about the operations on `String` that every collection type has, like +talk about the operations on `String` that every collection type has, such as creating, updating, and reading. We’ll also discuss the ways in which `String` is different than the other collections, namely how indexing into a `String` is complicated by the differences between how people and computers interpret `String` data. -### What is a String? +### What Is a String? -Before we can dig into those aspects, we need to talk about what exactly we -mean by the term *string*. Rust actually only has one string type in the core -language itself: `str`, the string slice, which is usually seen in its borrowed -form, `&str`. We talked about *string slices* in Chapter 4: these are a -reference to some UTF-8 encoded string data stored elsewhere. String literals, -for example, are stored in the binary output of the program, and are therefore -string slices. +We’ll first define what we mean by the term *string*. Rust has only one string +type in the core language, which is the string slice `str` that is usually seen +in its borrowed form `&str`. In Chapter 4, we talked about *string slices*, +which are references to some UTF-8 encoded string data stored elsewhere. String +literals, for example, are stored in the binary output of the program and are +therefore string slices. -The type called `String` is provided in Rust’s standard library rather than -coded into the core language, and is a growable, mutable, owned, UTF-8 encoded -string type. When Rustaceans talk about “strings” in Rust, they usually mean -both the `String` and the string slice `&str` types, not just one of those. -This section is largely about `String`, but both these types are used heavily -in Rust’s standard library. Both `String` and string slices are UTF-8 encoded. +The `String` type is provided in Rust’s standard library rather than coded into +the core language and is a growable, mutable, owned, UTF-8 encoded string type. +When Rustaceans refer to “strings” in Rust, they usually mean the `String` and +the string slice `&str` types, not just one of those types. Although this +section is largely about `String`, both types are used heavily in Rust’s +standard library and both `String` and string slices are UTF-8 encoded. Rust’s standard library also includes a number of other string types, such as -`OsString`, `OsStr`, `CString`, and `CStr`. Library crates may provide even +`OsString`, `OsStr`, `CString`, and `CStr`. Library crates can provide even more options for storing string data. Similar to the `*String`/`*Str` naming, they often provide an owned and borrowed variant, just like `String`/`&str`. -These string types may store different encodings or be represented in memory in -a different way, for example. We won’t be talking about these other string +These string types can store text in different encodings or be represented in +memory in a different way, for example. We won’t discuss these other string types in this chapter; see their API documentation for more about how to use them and when each is appropriate. ### Creating a New String Many of the same operations available with `Vec` are available with `String` as -well, starting with the `new` function to create a string, like so: +well, starting with the `new` function to create a string, shown in Listing 8-9: ```rust let mut s = String::new(); ``` -This creates a new empty string called `s` that we can then load data into. +Listing 8-9: Creating a new, empty `String` -Often, we’ll have some initial data that we’d like to start the string off +This line creates a new empty string called `s` that we can then load data +into. Often, we’ll have some initial data that we want to start the string with. For that, we use the `to_string` method, which is available on any type -that implements the `Display` trait, which string literals do: +that implements the `Display` trait, which string literals do. Listing 8-10 +shows two examples: ```rust let data = "initial contents"; @@ -66,78 +67,104 @@ let s = data.to_string(); let s = "initial contents".to_string(); ``` -This creates a string containing `initial contents`. +Listing 8-10: Using the `to_string` method to create a +`String` from a string literal + +This code creates a string containing `initial contents`. We can also use the function `String::from` to create a `String` from a string -literal. This is equivalent to using `to_string`: +literal. The code in Listing 8-11 is equivalent to the code from Listing 8-10 +that uses `to_string`: ```rust let s = String::from("initial contents"); ``` -Because strings are used for so many things, there are many different generic -APIs that can be used for strings, so there are a lot of options. Some of them -can feel redundant, but they all have their place! In this case, `String::from` -and `.to_string` end up doing the exact same thing, so which you choose is a -matter of style. +Listing 8-11: Using the `String::from` function to create +a `String` from a string literal + +Because strings are used for so many things, we can use many different generic +APIs for strings, providing us with a lot of options. Some of them can seem +redundant, but they all have their place! In this case, `String::from` and +`to_string` do the same thing, so which you choose is a matter of style. Remember that strings are UTF-8 encoded, so we can include any properly encoded -data in them: +data in them, as shown in Listing 8-12: ```rust -let hello = "السلام عليكم"; -let hello = "Dobrý den"; -let hello = "Hello"; -let hello = "שָׁלוֹם"; -let hello = "नमस्ते"; -let hello = "こんにちは"; -let hello = "안녕하세요"; -let hello = "你好"; -let hello = "Olá"; -let hello = "Здравствуйте"; -let hello = "Hola"; +let hello = String::from("السلام عليكم"); +let hello = String::from("Dobrý den"); +let hello = String::from("Hello"); +let hello = String::from("שָׁלוֹם"); +let hello = String::from("नमस्ते"); +let hello = String::from("こんにちは"); +let hello = String::from("안녕하세요"); +let hello = String::from("你好"); +let hello = String::from("Olá"); +let hello = String::from("Здравствуйте"); +let hello = String::from("Hola"); ``` +Listing 8-12: Storing greetings in different languages in +strings + +All of these are valid `String` values. + ### Updating a String -A `String` can grow in size and its contents can change just like the contents -of a `Vec`, by pushing more data into it. In addition, `String` has -concatenation operations implemented with the `+` operator for convenience. +A `String` can grow in size and its contents can change, just like the contents +of a `Vec`, by pushing more data into it. In addition, we can conveniently use +the `+` operator or the `format!` macro to concatenate `String` values together. -#### Appending to a String with Push +#### Appending to a String with `push_str` and `push` -We can grow a `String` by using the `push_str` method to append a string slice: +We can grow a `String` by using the `push_str` method to append a string slice, +as shown in Listing 8-13: ```rust let mut s = String::from("foo"); s.push_str("bar"); ``` -`s` will contain “foobar” after these two lines. The `push_str` method takes a +Listing 8-13: Appending a string slice to a `String` +using the `push_str` method + +After these two lines, `s` will contain `foobar`. The `push_str` method takes a string slice because we don’t necessarily want to take ownership of the -parameter. For example, it would be unfortunate if we weren’t able to use `s2` -after appending its contents to `s1`: +parameter. For example, the code in Listing 8-14 shows that it would be +unfortunate if we weren’t able to use `s2` after appending its contents to `s1`: ```rust let mut s1 = String::from("foo"); -let s2 = String::from("bar"); +let s2 = "bar"; s1.push_str(&s2); +println!("s2 is {}", s2); ``` -The `push` method is defined to have a single character as a parameter and add -it to the `String`: +Listing 8-14: Using a string slice after appending its +contents to a `String` + +If the `push_str` method took ownership of `s2`, we wouldn’t be able to print +out its value on the last line. However, this code works as we’d expect! + +The `push` method takes a single character as a parameter and adds it to the +`String`. Listing 8-15 shows code that adds an l to a `String` using the `push` +method: ```rust let mut s = String::from("lo"); s.push('l'); ``` -After this, `s` will contain “lol”. +Listing 8-15: Adding one character to a `String` value +using `push` + +As a result of this code, `s` will contain `lol`. -#### Concatenation with the + Operator or the `format!` Macro +#### Concatenation with the `+` Operator or the `format!` Macro -Often, we’ll want to combine two existing strings together. One way is to use -the `+` operator like this: +Often, we’ll want to combine two existing strings. One way is to use the `+` +operator, as shown in Listing 8-16: ```rust let s1 = String::from("Hello, "); @@ -145,8 +172,11 @@ let s2 = String::from("world!"); let s3 = s1 + &s2; // Note that s1 has been moved here and can no longer be used ``` -After this code the String `s3` will contain `Hello, world!`. The reason that -`s1` is no longer valid after the addition and the reason that we used a +Listing 8-16: Using the `+` operator to combine two +`String` values into a new `String` value + +As a result of this code, the string `s3` will contain `Hello, world!`. The +reason `s1` is no longer valid after the addition and the reason we used a reference to `s2` has to do with the signature of the method that gets called when we use the `+` operator. The `+` operator uses the `add` method, whose signature looks something like this: @@ -155,32 +185,32 @@ signature looks something like this: fn add(self, s: &str) -> String { ``` -This isn’t the exact signature that’s in the standard library; there `add` is -defined using generics. Here, we’re looking at the signature of `add` with -concrete types substituted for the generic ones, which is what happens when we -call this method with `String` values. We’ll be discussing generics in -Chapter 10. This signature gives us the clues we need to understand the tricky -bits of the `+` operator. - -First of all, `s2` has an `&`, meaning that we are adding a *reference* of the -second string to the first string. This is because of the `s` parameter in the -`add` function: we can only add a `&str` to a `String`, we can’t add two -`String` values together. But wait - the type of `&s2` is `&String`, not -`&str`, as specified in the second parameter to `add`. Why does our example -compile? We are able to use `&s2` in the call to `add` because a `&String` -argument can be *coerced* into a `&str` - when the `add` function is called, -Rust uses something called a *deref coercion*, which you could think of here as -turning `&s2` into `&s2[..]` for use in the `add` function. We’ll discuss deref -coercion in more depth in Chapter 15. Because `add` does not take ownership of -the parameter, `s2` will still be a valid `String` after this operation. +This isn’t the exact signature that’s in the standard library: in the standard +library, `add` is defined using generics. Here, we’re looking at the signature +of `add` with concrete types substituted for the generic ones, which is what +happens when we call this method with `String` values. We’ll discuss generics +in Chapter 10. This signature gives us the clues we need to understand the +tricky bits of the `+` operator. + +First, `s2` has an `&`, meaning that we’re adding a *reference* of the second +string to the first string because of the `s` parameter in the `add` function: +we can only add a `&str` to a `String`; we can’t add two `String` values +together. But wait - the type of `&s2` is `&String`, not `&str`, as specified +in the second parameter to `add`. Why does Listing 8-16 compile? We are able to +use `&s2` in the call to `add` because the compiler can *coerce* the `&String` +argument into a `&str`. When we call the `add` method, Rust uses something +called a *deref coercion*, which you could think of here as turning `&s2` into +`&s2[..]`. We’ll discuss deref coercion in more depth in Chapter 15. Because +`add` does not take ownership of the `s` parameter, `s2` will still be a valid +`String` after this operation. Second, we can see in the signature that `add` takes ownership of `self`, -because `self` does *not* have an `&`. This means `s1` in the above example -will be moved into the `add` call and no longer be valid after that. So while -`let s3 = s1 + &s2;` looks like it will copy both strings and create a new one, -this statement actually takes ownership of `s1`, appends a copy of the contents -of `s2`, then returns ownership of the result. In other words, it looks like -it’s making a lot of copies, but isn’t: the implementation is more efficient +because `self` does *not* have an `&`. This means `s1` in Listing 8-16 will be +moved into the `add` call and no longer be valid after that. So although `let +s3 = s1 + &s2;` looks like it will copy both strings and create a new one, this +statement actually takes ownership of `s1`, appends a copy of the contents of +`s2`, and then returns ownership of the result. In other words, it looks like +it’s making a lot of copies but isn’t: the implementation is more efficient than copying. If we need to concatenate multiple strings, the behavior of `+` gets unwieldy: @@ -193,8 +223,8 @@ let s3 = String::from("toe"); let s = s1 + "-" + &s2 + "-" + &s3; ``` -`s` will be “tic-tac-toe” at this point. With all of the `+` and `"` -characters, it gets hard to see what’s going on. For more complicated string +At this point, `s` will be `tic-tac-toe`. With all of the `+` and `"` +characters, it’s difficult to see what’s going on. For more complicated string combining, we can use the `format!` macro: ```rust @@ -205,24 +235,27 @@ let s3 = String::from("toe"); let s = format!("{}-{}-{}", s1, s2, s3); ``` -This code will also set `s` to “tic-tac-toe”. The `format!` macro works in the -same way as `println!`, but instead of printing the output to the screen, it -returns a `String` with the contents. This version is much easier to read, and -also does not take ownership of any of its parameters. +This code also sets `s` to `tic-tac-toe`. The `format!` macro works in the same +way as `println!`, but instead of printing the output to the screen, it returns +a `String` with the contents. The version of the code using `format!` is much +easier to read and also doesn’t take ownership of any of its parameters. ### Indexing into Strings -In many other languages, accessing individual characters in a string by -referencing them by index is a valid and common operation. In Rust, however, if -we try to access parts of a `String` using indexing syntax, we’ll get an error. -That is, this code: +In many other programming languages, accessing individual characters in a +string by referencing them by index is a valid and common operation. However, +if we try to access parts of a `String` using indexing syntax in Rust, we’ll +get an error. Consider the code in Listing 8-17: ```rust,ignore let s1 = String::from("hello"); let h = s1[0]; ``` -will result in this error: +Listing 8-17: Attempting to use indexing syntax with a +String + +This code will result in the following error: ```text error: the trait bound `std::string::String: std::ops::Index<_>` is not @@ -233,33 +266,31 @@ satisfied [--explain E0277] note: the type `std::string::String` cannot be indexed by `_` ``` -The error and the note tell the story: Rust strings don’t support indexing. So -the follow-up question is, why not? In order to answer that, we have to talk a -bit about how Rust stores strings in memory. +The error and the note tell the story: Rust strings don’t support indexing. But +why not? To answer that question, we need to discuss how Rust stores strings in +memory. #### Internal Representation -A `String` is a wrapper over a `Vec`. Let’s take a look at some of our -properly-encoded UTF-8 example strings from before. First, this one: +A `String` is a wrapper over a `Vec`. Let’s look at some of our properly +encoded UTF-8 example strings from Listing 8-12. First, this one: ```rust let len = String::from("Hola").len(); ``` In this case, `len` will be four, which means the `Vec` storing the string -“Hola” is four bytes long: each of these letters takes one byte when encoded in -UTF-8. What about this example, though? +“Hola” is four bytes long. Each of these letters takes one byte when encoded in +UTF-8. But what about the following line? ```rust let len = String::from("Здравствуйте").len(); ``` -A person asked how long the string is might say 12. However, Rust’s answer -is 24. This is the number of bytes that it takes to encode “Здравствуйте” in -UTF-8, since each Unicode scalar value takes two bytes of storage. Therefore, -an index into the string’s bytes will not always correlate to a valid Unicode -scalar value. - +Asked how long the string is, you might say 12. However, Rust’s answer is 24: +that’s the number of bytes it takes to encode “Здравствуйте” in UTF-8, because +each Unicode scalar value takes two bytes of storage. Therefore, an index into +the string’s bytes will not always correlate to a valid Unicode scalar value. To demonstrate, consider this invalid Rust code: ```rust,ignore @@ -270,19 +301,20 @@ let answer = &hello[0]; What should the value of `answer` be? Should it be `З`, the first letter? When encoded in UTF-8, the first byte of `З` is `208`, and the second is `151`, so `answer` should in fact be `208`, but `208` is not a valid character on its -own. Returning `208` is likely not what a person would want if they asked for -the first letter of this string, but that’s the only data that Rust has at byte -index 0. Returning the byte value is probably not what people want, even with -only Latin letters: `&"hello"[0]` would return `104`, not `h`. To avoid -returning an unexpected value and causing bugs that might not be discovered -immediately, Rust chooses to not compile this code at all and prevent -misunderstandings earlier. +own. Returning `208` is likely not what a user would want if they asked for the +first letter of this string; however, that’s the only data that Rust has at +byte index 0. Returning the byte value is probably not what users want, even if +the string contains only Latin letters: if `&"hello"[0]` was valid code that +returned the byte value, it would return `104`, not `h`. To avoid returning an +unexpected value and causing bugs that might not be discovered immediately, +Rust doesn’t compile this code at all and prevents misunderstandings earlier in +the development process. -#### Bytes and Scalar Values and Grapheme Clusters! Oh my! +#### Bytes and Scalar Values and Grapheme Clusters! Oh My! -This leads to another point about UTF-8: there are really three relevant ways -to look at strings, from Rust’s perspective: as bytes, scalar values, and -grapheme clusters (the closest thing to what people would call *letters*). +Another point about UTF-8 is that there are actually three relevant ways to +look at strings from Rust’s perspective: as bytes, scalar values, and grapheme +clusters (the closest thing to what we would call *letters*). If we look at the Hindi word “नमस्ते” written in the Devanagari script, it is ultimately stored as a `Vec` of `u8` values that looks like this: @@ -292,7 +324,7 @@ ultimately stored as a `Vec` of `u8` values that looks like this: 224, 165, 135] ``` -That’s 18 bytes, and is how computers ultimately store this data. If we look at +That’s 18 bytes and is how computers ultimately store this data. If we look at them as Unicode scalar values, which are what Rust’s `char` type is, those bytes look like this: @@ -300,10 +332,10 @@ bytes look like this: ['न', 'म', 'स', '्', 'त', 'े'] ``` -There are six `char` values here, but the fourth and sixth are not letters, +There are six `char` values here, but the fourth and sixth are not letters: they’re diacritics that don’t make sense on their own. Finally, if we look at them as grapheme clusters, we’d get what a person would call the four letters -that make up this word: +that make up the Hindi word: ```text ["न", "म", "स्", "ते"] @@ -313,19 +345,21 @@ Rust provides different ways of interpreting the raw string data that computers store so that each program can choose the interpretation it needs, no matter what human language the data is in. -A final reason Rust does not allow you to index into a `String` to get a +A final reason Rust doesn’t allow us to index into a `String` to get a character is that indexing operations are expected to always take constant time -(O(1)). It isn’t possible to guarantee that performance with a `String`, -though, since Rust would have to walk through the contents from the beginning -to the index to determine how many valid characters there were. +(O(1)). But it isn’t possible to guarantee that performance with a `String`, +because Rust would have to walk through the contents from the beginning to the +index to determine how many valid characters there were. ### Slicing Strings -Because it’s not clear what the return type of string indexing should be, and -it is often a bad idea to index into a string, Rust dissuades you from doing so -by asking you to be more specific if you really need it. The way you can be -more specific than indexing using `[]` with a single number is using `[]` with -a range to create a string slice containing particular bytes: +Indexing into a string is often a bad idea because it’s not clear what the +return type of the string indexing operation should be: a byte value, a +character, a grapheme cluster, or a string slice. Therefore, Rust asks you to +be more specific if you really need to use indices to create string slices. To +be more specific in your indexing and indicate that you want a string slice, +rather than indexing using `[]` with a single number, you can use `[]` with a +range to create a string slice containing particular bytes: ```rust let hello = "Здравствуйте"; @@ -334,27 +368,28 @@ let s = &hello[0..4]; ``` Here, `s` will be a `&str` that contains the first four bytes of the string. -Earlier, we mentioned that each of these characters was two bytes, so that -means that `s` will be “Зд”. +Earlier, we mentioned that each of these characters was two bytes, which means +`s` will be `Зд`. -What would happen if we did `&hello[0..1]`? The answer: it will panic at -runtime, in the same way that accessing an invalid index in a vector does: +What would happen if we used `&hello[0..1]`? The answer: Rust will panic at +runtime in the same way that accessing an invalid index in a vector does: ```text thread 'main' panicked at 'index 0 and/or 1 in `Здравствуйте` do not lie on character boundary', ../src/libcore/str/mod.rs:1694 ``` -You should use this with caution, since it can cause your program to crash. +You should use ranges to create string slices with caution, because it can +crash your program. ### Methods for Iterating Over Strings -Luckily, there are other ways we can access elements in a String. +Fortunately, we can access elements in a string in other ways. If we need to perform operations on individual Unicode scalar values, the best -way to do so is to use the `chars` method. Calling `chars` on “नमस्ते” -separates out and returns six values of type `char`, and you can iterate over -the result in order to access each element: +way to do so is to use the `chars` method. Calling `chars` on “नमस्ते” separates +out and returns six values of type `char`, and you can iterate over the result +in order to access each element: ```rust for c in "नमस्ते".chars() { @@ -362,7 +397,7 @@ for c in "नमस्ते".chars() { } ``` -This code will print: +This code will print the following: ```text न @@ -392,22 +427,22 @@ This code will print the 18 bytes that make up this `String`, starting with: // ... etc ``` -But make sure to remember that valid Unicode scalar values may be made up of -more than one byte. +But be sure to remember that valid Unicode scalar values may be made up of more +than one byte. Getting grapheme clusters from strings is complex, so this functionality is not -provided by the standard library. There are crates available on crates.io if -this is the functionality you need. +provided by the standard library. Crates are available on +[crates.io](https://crates.io) if this is the functionality you need. -### Strings are Not so Simple +### Strings Are Not So Simple To summarize, strings are complicated. Different programming languages make different choices about how to present this complexity to the programmer. Rust has chosen to make the correct handling of `String` data the default behavior -for all Rust programs, which does mean programmers have to put more thought -into handling UTF-8 data upfront. This tradeoff exposes more of the complexity -of strings than other programming languages do, but this will prevent you from -having to handle errors involving non-ASCII characters later in your -development lifecycle. +for all Rust programs, which means programmers have to put more thought into +handling UTF-8 data upfront. This trade-off exposes more of the complexity of +strings than other programming languages do but prevents you from having to +handle errors involving non-ASCII characters later in your development life +cycle. Let’s switch to something a bit less complex: hash maps! diff --git a/src/doc/book/second-edition/src/ch08-03-hash-maps.md b/src/doc/book/second-edition/src/ch08-03-hash-maps.md index d0695d2b1b..6ac862a32a 100644 --- a/src/doc/book/second-edition/src/ch08-03-hash-maps.md +++ b/src/doc/book/second-edition/src/ch08-03-hash-maps.md @@ -4,26 +4,25 @@ The last of our common collections is the *hash map*. The type `HashMap` stores a mapping of keys of type `K` to values of type `V`. It does this via a *hashing function*, which determines how it places these keys and values into memory. Many different programming languages support this kind of data -structure, but often with a different name: hash, map, object, hash table, or -associative array, just to name a few. +structure, but often use a different name, such as hash, map, object, hash +table, or associative array, just to name a few. -Hash maps are useful for when you want to be able to look up data not by an -index, as you can with vectors, but by using a key that can be of any type. For -example, in a game, you could keep track of each team’s score in a hash map -where each key is a team’s name and the values are each team’s score. Given a -team name, you can retrieve their score. +Hash maps are useful for when you want to look up data not by an index, as you +can with vectors, but by using a key that can be of any type. For example, in a +game, you could keep track of each team’s score in a hash map where each key is +a team’s name and the values are each team’s score. Given a team name, you can +retrieve its score. -We’ll go over the basic API of hash maps in this chapter, but there are many -more goodies hiding in the functions defined on `HashMap` by the standard -library. As always, check the standard library documentation for more -information. +We’ll go over the basic API of hash maps in this section, but many more goodies +are hiding in the functions defined on `HashMap` by the standard library. +As always, check the standard library documentation for more information. ### Creating a New Hash Map -We can create an empty `HashMap` with `new`, and add elements with `insert`. -Here we’re keeping track of the scores of two teams whose names are Blue and -Yellow. The Blue team will start with 10 points and the Yellow team starts with -50: +We can create an empty hash map with `new` and add elements with `insert`. In +Listing 8-18, we’re keeping track of the scores of two teams whose names are +Blue and Yellow. The Blue team will start with 10 points, and the Yellow team +starts with 50: ```rust use std::collections::HashMap; @@ -34,6 +33,9 @@ scores.insert(String::from("Blue"), 10); scores.insert(String::from("Yellow"), 50); ``` +Listing 8-18: Creating a new hash map and inserting some +keys and values + Note that we need to first `use` the `HashMap` from the collections portion of the standard library. Of our three common collections, this one is the least often used, so it’s not included in the features imported automatically in the @@ -47,11 +49,11 @@ must have the same type. Another way of constructing a hash map is by using the `collect` method on a vector of tuples, where each tuple consists of a key and its value. The -`collect` method gathers up data into a number of collection types, including +`collect` method gathers data into a number of collection types, including `HashMap`. For example, if we had the team names and initial scores in two separate vectors, we can use the `zip` method to create a vector of tuples where “Blue” is paired with 10, and so forth. Then we can use the `collect` -method to turn that vector of tuples into a `HashMap`: +method to turn that vector of tuples into a `HashMap` as shown in Listing 8-19: ```rust use std::collections::HashMap; @@ -62,17 +64,20 @@ let initial_scores = vec![10, 50]; let scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect(); ``` +Listing 8-19: Creating a hash map from a list of teams +and a list of scores + The type annotation `HashMap<_, _>` is needed here because it’s possible to `collect` into many different data structures, and Rust doesn’t know which you want unless you specify. For the type parameters for the key and value types, -however, we use underscores and Rust can infer the types that the hash map -contains based on the types of the data in the vector. +however, we use underscores, and Rust can infer the types that the hash map +contains based on the types of the data in the vectors. ### Hash Maps and Ownership For types that implement the `Copy` trait, like `i32`, the values are copied into the hash map. For owned values like `String`, the values will be moved and -the hash map will be the owner of those values: +the hash map will be the owner of those values as demonstrated in Listing 8-20: ```rust use std::collections::HashMap; @@ -82,20 +87,25 @@ let field_value = String::from("Blue"); let mut map = HashMap::new(); map.insert(field_name, field_value); -// field_name and field_value are invalid at this point +// field_name and field_value are invalid at this point, try using them and +// see what compiler error you get! ``` -We would not be able to use the bindings `field_name` and `field_value` after -they have been moved into the hash map with the call to `insert`. +Listing 8-20: Showing that keys and values are owned by +the hash map once they’re inserted -If we insert references to values into the hash map, the values themselves will -not be moved into the hash map. The values that the references point to must be -valid for at least as long as the hash map is valid, though. We will talk more -about these issues in the Lifetimes section of Chapter 10. +We aren’t able to use the variables `field_name` and `field_value` after +they’ve been moved into the hash map with the call to `insert`. + +If we insert references to values into the hash map, the values won’t be moved +into the hash map. The values that the references point to must be valid for at +least as long as the hash map is valid. We’ll talk more about these issues in +the “Validating References with Lifetimes” section in Chapter 10. ### Accessing Values in a Hash Map -We can get a value out of the hash map by providing its key to the `get` method: +We can get a value out of the hash map by providing its key to the `get` method +as shown in Listing 8-21: ```rust use std::collections::HashMap; @@ -109,11 +119,14 @@ let team_name = String::from("Blue"); let score = scores.get(&team_name); ``` +Listing 8-21: Accessing the score for the Blue team +stored in the hash map + Here, `score` will have the value that’s associated with the Blue team, and the result will be `Some(&10)`. The result is wrapped in `Some` because `get` -returns an `Option<&V>`; if there’s no value for that key in the hash map, `get` -will return `None`. The program will need to handle the `Option` in one of the -ways that we covered in Chapter 6. +returns an `Option<&V>`; if there’s no value for that key in the hash map, +`get` will return `None`. The program will need to handle the `Option` in one +of the ways that we covered in Chapter 6. We can iterate over each key/value pair in a hash map in a similar manner as we do with vectors, using a `for` loop: @@ -131,7 +144,7 @@ for (key, value) in &scores { } ``` -This will print each pair, in an arbitrary order: +This code will print each pair in an arbitrary order: ```text Yellow: 50 @@ -140,22 +153,22 @@ Blue: 10 ### Updating a Hash Map -While the number of keys and values is growable, each individual key can only -have one value associated with it at a time. When we want to change the data in -a hash map, we have to decide how to handle the case when a key already has a -value assigned. We could choose to replace the old value with the new value, -completely disregarding the old value. We could choose to keep the old value -and ignore the new value, and only add the new value if the key *doesn’t* -already have a value. Or we could combine the old value and the new value. -Let’s look at how to do each of these! +Although the number of keys and values is growable, each key can only have one +value associated with it at a time. When we want to change the data in a hash +map, we have to decide how to handle the case when a key already has a value +assigned. We could replace the old value with the new value, completely +disregarding the old value. We could keep the old value and ignore the new +value, and only add the new value if the key *doesn’t* already have a value. Or +we could combine the old value and the new value. Let’s look at how to do each +of these! #### Overwriting a Value -If we insert a key and a value into a hash map, then insert that same key with -a different value, the value associated with that key will be replaced. Even -though this following code calls `insert` twice, the hash map will only contain -one key/value pair because we’re inserting the value for the Blue team’s key -both times: +If we insert a key and a value into a hash map, and then insert that same key +with a different value, the value associated with that key will be replaced. +Even though the code in Listing 8-22 calls `insert` twice, the hash map will +only contain one key/value pair because we’re inserting the value for the Blue +team’s key both times: ```rust use std::collections::HashMap; @@ -168,18 +181,22 @@ scores.insert(String::from("Blue"), 25); println!("{:?}", scores); ``` -This will print `{"Blue": 25}`. The original value of 10 has been overwritten. +Listing 8-22: Replacing a value stored with a particular +key + +This code will print `{"Blue": 25}`. The original value of `10` has been +overwritten. #### Only Insert If the Key Has No Value -It’s common to want to check if a particular key has a value and, if it does -not, insert a value for it. Hash maps have a special API for this, called -`entry`, that takes the key we want to check as an argument. The return value -of the `entry` function is an enum, `Entry`, that represents a value that might -or might not exist. Let’s say that we want to check if the key for the Yellow +It’s common to check whether a particular key has a value, and if it doesn’t, +insert a value for it. Hash maps have a special API for this called `entry` +that takes the key we want to check as a parameter. The return value of the +`entry` function is an enum called `Entry` that represents a value that might +or might not exist. Let’s say we want to check whether the key for the Yellow team has a value associated with it. If it doesn’t, we want to insert the value -50, and the same for the Blue team. With the entry API, the code for this looks -like: +50, and the same for the Blue team. Using the `entry` API, the code looks like +Listing 8-23: ```rust use std::collections::HashMap; @@ -193,23 +210,29 @@ scores.entry(String::from("Blue")).or_insert(50); println!("{:?}", scores); ``` -The `or_insert` method on `Entry` returns the value for the corresponding -`Entry` key if it exists, and if not, inserts its argument as the new value for -this key and returns the modified `Entry`. This is much cleaner than writing -the logic ourselves, and in addition, plays more nicely with the borrow checker. +Listing 8-23: Using the `entry` method to only insert if +the key does not already have a value + +The `or_insert` method on `Entry` is defined to return the value for the +corresponding `Entry` key if that key exists, and if not, inserts the parameter +as the new value for this key and returns the modified `Entry`. This technique +is much cleaner than writing the logic ourselves, and in addition, plays more +nicely with the borrow checker. -This code will print `{"Yellow": 50, "Blue": 10}`. The first call to `entry` -will insert the key for the Yellow team with the value 50, since the Yellow -team doesn’t have a value already. The second call to `entry` will not change -the hash map since the Blue team already has the value 10. +Running the code in Listing 8-23 will print `{"Yellow": 50, "Blue": 10}`. The +first call to `entry` will insert the key for the Yellow team with the value +`50` because the Yellow team doesn’t have a value already. The second call to +`entry` will not change the hash map because the Blue team already has the +value `10`. -#### Update a Value Based on the Old Value +#### Updating a Value Based on the Old Value -Another common use case for hash maps is to look up a key’s value then update -it, based on the old value. For instance, if we wanted to count how many times -each word appeared in some text, we could use a hash map with the words as keys -and increment the value to keep track of how many times we’ve seen that word. -If this is the first time we’ve seen a word, we’ll first insert the value `0`. +Another common use case for hash maps is to look up a key’s value and then +update it based on the old value. For instance, Listing 8-24 shows code that +counts how many times each word appears in some text. We use a hash map with +the words as keys and increment the value to keep track of how many times we’ve +seen that word. If it’s the first time we’ve seen a word, we’ll first insert +the value `0`: ```rust use std::collections::HashMap; @@ -226,47 +249,51 @@ for word in text.split_whitespace() { println!("{:?}", map); ``` -This will print `{"world": 2, "hello": 1, "wonderful": 1}`. The `or_insert` -method actually returns a mutable reference (`&mut V`) to the value for this -key. Here we store that mutable reference in the `count` variable, so in order -to assign to that value we must first dereference `count` using the asterisk -(`*`). The mutable reference goes out of scope at the end of the `for` loop, so -all of these changes are safe and allowed by the borrowing rules. +Listing 8-24: Counting occurrences of words using a hash +map that stores words and counts + +This code will print `{"world": 2, "hello": 1, "wonderful": 1}`. The +`or_insert` method actually returns a mutable reference (`&mut V`) to the value +for this key. Here we store that mutable reference in the `count` variable, so +in order to assign to that value we must first dereference `count` using the +asterisk (`*`). The mutable reference goes out of scope at the end of the `for` +loop, so all of these changes are safe and allowed by the borrowing rules. ### Hashing Function By default, `HashMap` uses a cryptographically secure hashing function that can provide resistance to Denial of Service (DoS) attacks. This is not the fastest -hashing algorithm out there, but the tradeoff for better security that comes +hashing algorithm available, but the trade-off for better security that comes with the drop in performance is worth it. If you profile your code and find that the default hash function is too slow for your purposes, you can switch to another function by specifying a different *hasher*. A hasher is a type that -implements the `BuildHasher` trait. We’ll be talking about traits and how to +implements the `BuildHasher` trait. We’ll talk about traits and how to implement them in Chapter 10. You don’t necessarily have to implement your own -hasher from scratch; crates.io has libraries that others have shared that -provide hashers implementing many common hashing algorithms. +hasher from scratch; [crates.io](https://crates.io) has libraries shared by +other Rust users that provide hashers implementing many common hashing +algorithms. ## Summary -Vectors, strings, and hash maps will take you far in programs where you need to -store, access, and modify data. Here are some exercises you should now be -equipped to solve: +Vectors, strings, and hash maps will provide a large amount of functionality +that you need in programs where you need to store, access, and modify data. +Here are some exercises you should now be equipped to solve: * Given a list of integers, use a vector and return the mean (average), median (when sorted, the value in the middle position), and mode (the value that occurs most often; a hash map will be helpful here) of the list. -* Convert strings to Pig Latin, where the first consonant of each word is moved - to the end of the word with an added “ay”, so “first” becomes “irst-fay”. - Words that start with a vowel get “hay” added to the end instead (“apple” - becomes “apple-hay”). Remember about UTF-8 encoding! +* Convert strings to pig latin. The first consonant of each word is moved to + the end of the word and “ay” is added, so “first” becomes “irst-fay.” Words + that start with a vowel have “hay” added to the end instead (“apple” becomes + “apple-hay”). Keep in mind the details about UTF-8 encoding! * Using a hash map and vectors, create a text interface to allow a user to add - employee names to a department in the company. For example, “Add Sally to - Engineering” or “Add Amir to Sales”. Then let the user retrieve a list of all + employee names to a department in a company. For example, “Add Sally to + Engineering” or “Add Amir to Sales.” Then let the user retrieve a list of all people in a department or all people in the company by department, sorted alphabetically. -The standard library API documentation describes methods these types have that -will be helpful for these exercises! +The standard library API documentation describes methods that vectors, strings, +and hash maps have that will be helpful for these exercises! -We’re getting into more complex programs where operations can fail, which means -it’s a perfect time to go over error handling next! +We’re getting into more complex programs in which operations can fail; so, it’s +a perfect time to discuss error handling next! diff --git a/src/doc/book/second-edition/src/ch09-00-error-handling.md b/src/doc/book/second-edition/src/ch09-00-error-handling.md index 3fbaa31515..d8ead01c58 100644 --- a/src/doc/book/second-edition/src/ch09-00-error-handling.md +++ b/src/doc/book/second-edition/src/ch09-00-error-handling.md @@ -2,22 +2,23 @@ Rust’s commitment to reliability extends to error handling. Errors are a fact of life in software, so Rust has a number of features for handling situations -in which something goes wrong. In many cases, Rust will require you to -acknowledge the possibility of an error occurring and take some action before -your code will compile. This makes your program more robust by ensuring that -you won’t only discover errors after you’ve deployed your code to production. +in which something goes wrong. In many cases, Rust requires you to acknowledge +the possibility of an error occurring and take some action before your code +will compile. This requirement makes your program more robust by ensuring that +you’ll discover errors and handle them appropriately before you’ve deployed +your code to production! Rust groups errors into two major categories: *recoverable* and *unrecoverable* -errors. Recoverable errors are situations when it’s usually reasonable to -report the problem to the user and retry the operation, like a file not being -found. Unrecoverable errors are always symptoms of bugs, like trying to access -a location beyond the end of an array. +errors. Recoverable errors are situations in which it’s reasonable to report +the problem to the user and retry the operation, like a file not found error. +Unrecoverable errors are always symptoms of bugs, like trying to access a +location beyond the end of an array. -Most languages don’t distinguish between the two kinds of errors, and handle +Most languages don’t distinguish between these two kinds of errors and handle both in the same way using mechanisms like exceptions. Rust doesn’t have exceptions. Instead, it has the value `Result` for recoverable errors and the `panic!` macro that stops execution when it encounters unrecoverable -errors. This chapter will cover calling `panic!` first, then talk about -returning `Result` values. Finally, we’ll discuss considerations to take -into account when deciding whether to try to recover from an error or to stop -execution. +errors. This chapter covers calling `panic!` first and then talks about +returning `Result` values. Additionally, we’ll explore considerations to +take into account when deciding whether to try to recover from an error or to +stop execution. diff --git a/src/doc/book/second-edition/src/ch09-01-unrecoverable-errors-with-panic.md b/src/doc/book/second-edition/src/ch09-01-unrecoverable-errors-with-panic.md index 895f9e15fa..678a590e23 100644 --- a/src/doc/book/second-edition/src/ch09-01-unrecoverable-errors-with-panic.md +++ b/src/doc/book/second-edition/src/ch09-01-unrecoverable-errors-with-panic.md @@ -1,30 +1,31 @@ ## Unrecoverable Errors with `panic!` -Sometimes, bad things happen, and there’s nothing that you can do about it. For -these cases, Rust has the `panic!` macro. When this macro executes, your -program will print a failure message, unwind and clean up the stack, and then -quit. The most common situation this occurs in is when a bug of some kind has -been detected and it’s not clear to the programmer how to handle the error. - -> ### Unwinding the Stack Versus Aborting on Panic +Sometimes, bad things happen in your code, and there’s nothing you can do about +it. In these cases, Rust has the `panic!` macro. When the `panic!` macro +executes, your program will print a failure message, unwind and clean up the +stack, and then quit. The most common situation this occurs in is when a bug of +some kind has been detected, and it’s not clear to the programmer how to handle +the error. + +> ### Unwinding the Stack or Aborting in Response to a `panic!` > -> By default, when a `panic!` occurs, the program starts -> *unwinding*, which means Rust walks back up the stack and cleans up the data -> from each function it encounters, but this walking and cleanup is a lot of -> work. The alternative is to immediately *abort*, which ends the program -> without cleaning up. Memory that the program was using will then need to be -> cleaned up by the operating system. If in your project you need to make the -> resulting binary as small as possible, you can switch from unwinding to -> aborting on panic by adding `panic = 'abort'` to the appropriate `[profile]` -> sections in your *Cargo.toml*. For example, if you want to abort on panic in -> release mode: +> By default, when a `panic!` occurs, the program starts *unwinding*, which +> means Rust walks back up the stack and cleans up the data from each function +> it encounters. But this walking back and cleanup is a lot of work. The +> alternative is to immediately *abort*, which ends the program without +> cleaning up. Memory that the program was using will then need to be cleaned +> up by the operating system. If in your project you need to make the resulting +> binary as small as possible, you can switch from unwinding to aborting on +> panic by adding `panic = 'abort'` to the appropriate `[profile]` sections in +> your *Cargo.toml* file. For example, if you want to abort on panic in release +> mode, add this: > > ```toml > [profile.release] > panic = 'abort' > ``` -Let’s try calling `panic!` with a simple program: +Let’s try calling `panic!` in a simple program: Filename: src/main.rs @@ -34,7 +35,7 @@ fn main() { } ``` -If you run it, you’ll see something like this: +When you run the program, you’ll see something like this: ```text $ cargo run @@ -46,23 +47,26 @@ note: Run with `RUST_BACKTRACE=1` for a backtrace. error: Process didn't exit successfully: `target/debug/panic` (exit code: 101) ``` -The last three lines contain the error message caused by the call to `panic!`. -The first line shows our panic message and the place in our source code where -the panic occurred: *src/main.rs:2* indicates that it’s the second line of our -*src/main.rs* file. +The call to `panic!` causes the error message contained in the last three +lines. The first line shows our panic message and the place in our source code +where the panic occurred: *src/main.rs:2* indicates that it’s the second line +of our *src/main.rs* file. -In this case, the line indicated is part of our code, and if we go to that line -we see the `panic!` macro call. In other cases, the `panic!` call might be in -code that our code calls. The filename and line number reported by the error -message will be someone else’s code where the `panic!` macro is called, not the -line of our code that eventually led to the `panic!`. We can use the backtrace -of the functions the `panic!` call came from to figure this out. +In this case, the line indicated is part of our code, and if we go to that +line, we see the `panic!` macro call. In other cases, the `panic!` call might +be in code that our code calls. The filename and line number reported by the +error message will be someone else’s code where the `panic!` macro is called, +not the line of our code that eventually led to the `panic!` call. We can use +the backtrace of the functions the `panic!` call came from to figure out the +part of our code that is causing the problem. We’ll discuss what a backtrace is +in more detail next. ### Using a `panic!` Backtrace Let’s look at another example to see what it’s like when a `panic!` call comes from a library because of a bug in our code instead of from our code calling -the macro directly: +the macro directly. Listing 9-1 has some code that attempts to access an +element by index in a vector: Filename: src/main.rs @@ -74,22 +78,25 @@ fn main() { } ``` -We’re attempting to access the hundredth element of our vector, but it only has -three elements. In this situation, Rust will panic. Using `[]` is supposed to -return an element, but if you pass an invalid index, there’s no element that -Rust could return here that would be correct. +Listing 9-1: Attempting to access an element beyond the +end of a vector, which will cause a `panic!` + +Here, we’re attempting to access the hundredth element of our vector, but it +has only three elements. In this situation, Rust will panic. Using `[]` is +supposed to return an element, but if you pass an invalid index, there’s no +element that Rust could return here that would be correct. -Other languages like C will attempt to give you exactly what you asked for in +Other languages, like C, will attempt to give you exactly what you asked for in this situation, even though it isn’t what you want: you’ll get whatever is at the location in memory that would correspond to that element in the vector, even though the memory doesn’t belong to the vector. This is called a *buffer -overread*, and can lead to security vulnerabilities if an attacker can +overread* and can lead to security vulnerabilities if an attacker is able to manipulate the index in such a way as to read data they shouldn’t be allowed to that is stored after the array. -In order to protect your program from this sort of vulnerability, if you try to -read an element at an index that doesn’t exist, Rust will stop execution and -refuse to continue. Let’s try it and see: +To protect your program from this sort of vulnerability, if you try to read an +element at an index that doesn’t exist, Rust will stop execution and refuse to +continue. Let’s try it and see: ```text $ cargo run @@ -102,14 +109,21 @@ note: Run with `RUST_BACKTRACE=1` for a backtrace. error: Process didn't exit successfully: `target/debug/panic` (exit code: 101) ``` -This points at a file we didn’t write, *libcollections/vec.rs*. That’s the -implementation of `Vec` in the standard library. The code that gets run when -we use `[]` on our vector `v` is in *libcollections/vec.rs*, and that is where -the `panic!` is actually happening. +This error points at a file we didn’t write, *libcollections/vec.rs*. That’s +the implementation of `Vec` in the standard library. The code that gets run +when we use `[]` on our vector `v` is in *libcollections/vec.rs*, and that is +where the `panic!` is actually happening. The next note line tells us that we can set the `RUST_BACKTRACE` environment -variable to get a backtrace of exactly what happened to cause the error. Let’s -try that. Listing 9-1 shows the output: +variable to get a backtrace of exactly what happened to cause the error. A +*backtrace* is a list of all the functions that have been called to get to this +point. Backtraces in Rust work like they do in other languages: the key to +reading the backtrace is to start from the top and read until you see files you +wrote. That’s the spot where the problem originated. The lines above the lines +mentioning your files are code that your code called; the lines below are code +that called your code. These lines might include core Rust code, standard +library code, or crates that you’re using. Let’s try getting a backtrace: +Listing 9-2 shows output similar to what you’ll see: ```text $ RUST_BACKTRACE=1 cargo run @@ -151,29 +165,26 @@ stack backtrace: 17: 0x0 - ``` -Listing 9-1: The backtrace generated by a call to +Listing 9-2: The backtrace generated by a call to `panic!` displayed when the environment variable `RUST_BACKTRACE` is set -That’s a lot of output! Line 11 of the backtrace points to the line in our -project causing the problem: *src/main.rs*, line four. A backtrace is a list of -all the functions that have been called to get to this point. Backtraces in -Rust work like they do in other languages: the key to reading the backtrace is -to start from the top and read until you see files you wrote. That’s the spot -where the problem originated. The lines above the lines mentioning your files -are code that your code called; the lines below are code that called your code. -These lines might include core Rust code, standard library code, or crates that -you’re using. - -If we don’t want our program to panic, the location pointed to by the first -line mentioning a file we wrote is where we should start investigating in order -to figure out how we got to this location with values that caused the panic. In -our example where we deliberately wrote code that would panic in order to -demonstrate how to use backtraces, the way to fix the panic is to not try to -request an element at index 100 from a vector that only contains three items. -When your code panics in the future, you’ll need to figure out for your -particular case what action the code is taking with what values that causes the -panic and what the code should do instead. - -We’ll come back to `panic!` and when we should and should not use these methods -later in the chapter. Next, we’ll now look at how to recover from an error with -`Result`. +That’s a lot of output! The exact output you see might be different depending +on your operating system and Rust version. In order to get backtraces with this +information, debug symbols must be enabled. Debug symbols are enabled by +default when using cargo build or cargo run without the --release flag, as we +have here. + +In the output in Listing 9-2, line 11 of the backtrace points to the line in +our project that’s causing the problem: *src/main.rs* in line 4. If we don’t +want our program to panic, the location pointed to by the first line mentioning +a file we wrote is where we should start investigating to figure out how we got +to this location with values that caused the panic. In Listing 9-1 where we +deliberately wrote code that would panic in order to demonstrate how to use +backtraces, the way to fix the panic is to not request an element at index 100 +from a vector that only contains three items. When your code panics in the +future, you’ll need to figure out what action the code is taking with what +values that causes the panic and what the code should do instead. + +We’ll come back to `panic!` and when we should and should not use `panic!` to +handle error conditions later in the chapter. Next, we’ll look at how to +recover from an error using `Result`. diff --git a/src/doc/book/second-edition/src/ch09-02-recoverable-errors-with-result.md b/src/doc/book/second-edition/src/ch09-02-recoverable-errors-with-result.md index 17a47b5b19..f3c4abb84a 100644 --- a/src/doc/book/second-edition/src/ch09-02-recoverable-errors-with-result.md +++ b/src/doc/book/second-edition/src/ch09-02-recoverable-errors-with-result.md @@ -6,8 +6,8 @@ interpret and respond to. For example, if we try to open a file and that operation fails because the file doesn’t exist, we might want to create the file instead of terminating the process. -Recall from Chapter 2 the section on “[Handling Potential Failure with the -`Result` Type][handle_failure]” that the `Result` enum is defined +Recall in Chapter 2 in the on “[Handling Potential Failure with the `Result` +Type][handle_failure]” section that the `Result` enum is defined as having two variants, `Ok` and `Err`, as follows: [handle_failure]: ch02-00-guessing-game-tutorial.html#handling-potential-failure-with-the-result-type @@ -19,7 +19,7 @@ enum Result { } ``` -The `T` and `E` are generic type parameters; we’ll go into generics in more +The `T` and `E` are generic type parameters: we’ll discuss generics in more detail in Chapter 10. What you need to know right now is that `T` represents the type of the value that will be returned in a success case within the `Ok` variant, and `E` represents the type of the error that will be returned in a @@ -29,7 +29,7 @@ library has defined on it in many different situations where the successful value and error value we want to return may differ. Let’s call a function that returns a `Result` value because the function could -fail: opening a file, shown in Listing 9-2. +fail: in Listing 9-3 we try to open a file: Filename: src/main.rs @@ -41,21 +41,21 @@ fn main() { } ``` -Listing 9-2: Opening a file +Listing 9-3: Opening a file How do we know `File::open` returns a `Result`? We could look at the standard library API documentation, or we could ask the compiler! If we give `f` a type -annotation of some type that we know the return type of the function is *not*, +annotation of a type that we know the return type of the function is *not* and then we try to compile the code, the compiler will tell us that the types don’t -match. The error message will then tell us what the type of `f` *is*! Let’s try +match. The error message will then tell us what the type of `f` *is*. Let’s try it: we know that the return type of `File::open` isn’t of type `u32`, so let’s -change the `let f` statement to: +change the `let f` statement to this: ```rust,ignore let f: u32 = File::open("hello.txt"); ``` -Attempting to compile now gives us: +Attempting to compile now gives us the following output: ```text error[E0308]: mismatched types @@ -76,9 +76,9 @@ error value is `std::io::Error`. This return type means the call to `File::open` might succeed and return to us a file handle that we can read from or write to. The function call also might -fail: for example, the file might not exist, or we might not have permission to +fail: for example, the file might not exist or we might not have permission to access the file. The `File::open` function needs to have a way to tell us -whether it succeeded or failed, and at the same time give us either the file +whether it succeeded or failed and at the same time give us either the file handle or error information. This information is exactly what the `Result` enum conveys. @@ -87,9 +87,9 @@ In the case where `File::open` succeeds, the value we will have in the variable it fails, the value in `f` will be an instance of `Err` that contains more information about the kind of error that happened. -We need to add to the code from Listing 9-2 to take different actions depending -on the value `File::open` returned. Listing 9-3 shows one way to handle the -`Result` with a basic tool: the `match` expression that we learned about in +We need to add to the code in Listing 9-3 to take different actions depending +on the value `File::open` returned. Listing 9-4 shows one way to handle the +`Result` using a basic tool: the `match` expression that we discussed in Chapter 6. Filename: src/main.rs @@ -109,7 +109,7 @@ fn main() { } ``` -Listing 9-3: Using a `match` expression to handle the +Listing 9-4: Using a `match` expression to handle the `Result` variants we might have Note that, like the `Option` enum, the `Result` enum and its variants have been @@ -131,19 +131,23 @@ thread 'main' panicked at 'There was a problem opening the file: Error { repr: Os { code: 2, message: "No such file or directory" } }', src/main.rs:8 ``` +As usual, this output tells us exactly what has gone wrong. + ### Matching on Different Errors -The code in Listing 9-3 will `panic!` no matter the reason that `File::open` -failed. What we’d really like to do instead is take different actions for -different failure reasons: if `File::open` failed because the file doesn’t -exist, we want to create the file and return the handle to the new file. If -`File::open` failed for any other reason, for example because we didn’t have -permission to open the file, we still want to `panic!` in the same way as we -did in Listing 9-3. Let’s look at Listing 9-4, which adds another arm to the -`match`: +The code in Listing 9-4 will `panic!` no matter the reason that `File::open` +failed. What we want to do instead is take different actions for different +failure reasons: if `File::open` failed because the file doesn’t exist, we want +to create the file and return the handle to the new file. If `File::open` +failed for any other reason, for example because we didn’t have permission to +open the file, we still want the code to `panic!` in the same way as it did in +Listing 9-4. Look at Listing 9-5, which adds another arm to the `match`: Filename: src/main.rs + + ```rust,ignore use std::fs::File; use std::io::ErrorKind; @@ -174,7 +178,7 @@ fn main() { } ``` -Listing 9-4: Handling different kinds of errors in +Listing 9-5: Handling different kinds of errors in different ways The type of the value that `File::open` returns inside the `Err` variant is @@ -182,36 +186,38 @@ The type of the value that `File::open` returns inside the `Err` variant is has a method `kind` that we can call to get an `io::ErrorKind` value. `io::ErrorKind` is an enum provided by the standard library that has variants representing the different kinds of errors that might result from an `io` -operation. The variant we’re interested in is `ErrorKind::NotFound`, which -indicates the file we’re trying to open doesn’t exist yet. +operation. The variant we want to use is `ErrorKind::NotFound`, which indicates +the file we’re trying to open doesn’t exist yet. The condition `if error.kind() == ErrorKind::NotFound` is called a *match guard*: it’s an extra condition on a `match` arm that further refines the arm’s -pattern. This condition must be true in order for that arm’s code to get run; -otherwise, the pattern matching will move on to consider the next arm in the -`match`. The `ref` in the pattern is needed so that `error` is not moved into -the guard condition but is merely referenced by it. The reason `ref` is used to -take a reference in a pattern instead of `&` will be covered in detail in -Chapter 18. In short, in the context of a pattern, `&` matches a reference and -gives us its value, but `ref` matches a value and gives us a reference to it. +pattern. This condition must be true for that arm’s code to be run; otherwise, +the pattern matching will move on to consider the next arm in the `match`. The +`ref` in the pattern is needed so `error` is not moved into the guard condition +but is merely referenced by it. The reason `ref` is used to take a reference in +a pattern instead of `&` will be covered in detail in Chapter 18. In short, in +the context of a pattern, `&` matches a reference and gives us its value, but +`ref` matches a value and gives us a reference to it. The condition we want to check in the match guard is whether the value returned by `error.kind()` is the `NotFound` variant of the `ErrorKind` enum. If it is, -we try to create the file with `File::create`. However, since `File::create` -could also fail, we need to add an inner `match` statement as well! When the +we try to create the file with `File::create`. However, because `File::create` +could also fail, we need to add an inner `match` statement as well. When the file can’t be opened, a different error message will be printed. The last arm -of the outer `match` stays the same so that the program panics on any error -besides the missing file error. +of the outer `match` stays the same so the program panics on any error besides +the missing file error. ### Shortcuts for Panic on Error: `unwrap` and `expect` Using `match` works well enough, but it can be a bit verbose and doesn’t always communicate intent well. The `Result` type has many helper methods -defined on it to do various things. One of those methods, called `unwrap`, is a +defined on it to do various tasks. One of those methods, called `unwrap`, is a shortcut method that is implemented just like the `match` statement we wrote in -Listing 9-3. If the `Result` value is the `Ok` variant, `unwrap` will return +Listing 9-4. If the `Result` value is the `Ok` variant, `unwrap` will return the value inside the `Ok`. If the `Result` is the `Err` variant, `unwrap` will -call the `panic!` macro for us. +call the `panic!` macro for us. Here is an example of `unwrap` in action: + +Filename: src/main.rs ```rust,should_panic use std::fs::File; @@ -230,10 +236,12 @@ repr: Os { code: 2, message: "No such file or directory" } }', /stable-dist-rustc/build/src/libcore/result.rs:868 ``` -There’s another method similar to `unwrap` that lets us also choose the -`panic!` error message: `expect`. Using `expect` instead of `unwrap` and -providing good error messages can convey your intent and make tracking down the -source of a panic easier. The syntax of `expect` looks like this: +Another method, `expect`, which is similar to `unwrap`, lets us also choose the +`panic!` error message. Using `expect` instead of `unwrap` and providing good +error messages can convey your intent and make tracking down the source of a +panic easier. The syntax of `expect` looks like this: + +Filename: src/main.rs ```rust,should_panic use std::fs::File; @@ -244,8 +252,8 @@ fn main() { ``` We use `expect` in the same way as `unwrap`: to return the file handle or call -the `panic!` macro. The error message that `expect` uses in its call to -`panic!` will be the parameter that we pass to `expect` instead of the default +the `panic!` macro. The error message used by `expect` in its call to `panic!` +will be the parameter that we pass to `expect`, rather than the default `panic!` message that `unwrap` uses. Here’s what it looks like: ```text @@ -254,19 +262,27 @@ thread 'main' panicked at 'Failed to open hello.txt: Error { repr: Os { code: /stable-dist-rustc/build/src/libcore/result.rs:868 ``` +Because this error message starts with the text we specified, `Failed to open +hello.txt`, it will be easier to find where in the code this error message is +coming from. If we use `unwrap` in multiple places, it can take more time to +figure out exactly which `unwrap` is causing the panic because all `unwrap` +calls that panic print the same message. + ### Propagating Errors -When writing a function whose implementation calls something that might fail, -instead of handling the error within this function, you can choose to let your -caller know about the error so they can decide what to do. This is known as -*propagating* the error, and gives more control to the calling code where there +When you’re writing a function whose implementation calls something that might +fail, instead of handling the error within this function, you can return the +error to the calling code so that it can decide what to do. This is known as +*propagating* the error and gives more control to the calling code where there might be more information or logic that dictates how the error should be handled than what you have available in the context of your code. -For example, Listing 9-5 shows a function that reads a username from a file. If +For example, Listing 9-6 shows a function that reads a username from a file. If the file doesn’t exist or can’t be read, this function will return those errors to the code that called this function: +Filename: src/main.rs + ```rust use std::io; use std::io::Read; @@ -289,59 +305,61 @@ fn read_username_from_file() -> Result { } ``` -Listing 9-5: A function that returns errors to the +Listing 9-6: A function that returns errors to the calling code using `match` Let’s look at the return type of the function first: `Result`. This means that the function is returning a value of the type +io::Error>`. This means the function is returning a value of the type `Result` where the generic parameter `T` has been filled in with the concrete type `String`, and the generic type `E` has been filled in with the concrete type `io::Error`. If this function succeeds without any problems, the -caller of this function will receive an `Ok` value that holds a `String` — the -username that this function read from the file. If this function encounters any -problems, the caller of this function will receive an `Err` value that holds an -instance of `io::Error` that contains more information about what the problems -were. We chose `io::Error` as the return type of this function because that -happens to be the type of the error value returned from both of the operations -we’re calling in this function’s body that might fail: the `File::open` -function and the `read_to_string` method. +code that calls this function will receive an `Ok` value that holds a +`String`—the username that this function read from the file. If this function +encounters any problems, the code that calls this function will receive an +`Err` value that holds an instance of `io::Error` that contains more +information about what the problems were. We chose `io::Error` as the return +type of this function because that happens to be the type of the error value +returned from both of the operations we’re calling in this function’s body that +might fail: the `File::open` function and the `read_to_string` method. The body of the function starts by calling the `File::open` function. Then we handle the `Result` value returned with a `match` similar to the `match` in -Listing 9-3, only instead of calling `panic!` in the `Err` case, we return +Listing 9-4, only instead of calling `panic!` in the `Err` case, we return early from this function and pass the error value from `File::open` back to the -caller as this function’s error value. If `File::open` succeeds, we store the -file handle in the variable `f` and continue. +calling code as this function’s error value. If `File::open` succeeds, we store +the file handle in the variable `f` and continue. Then we create a new `String` in variable `s` and call the `read_to_string` -method on the file handle in `f` in order to read the contents of the file into -`s`. The `read_to_string` method also returns a `Result` because it might fail, -even though `File::open` succeeded. So we need another `match` to handle that +method on the file handle in `f` to read the contents of the file into `s`. The +`read_to_string` method also returns a `Result` because it might fail, even +though `File::open` succeeded. So we need another `match` to handle that `Result`: if `read_to_string` succeeds, then our function has succeeded, and we return the username from the file that’s now in `s` wrapped in an `Ok`. If `read_to_string` fails, we return the error value in the same way that we returned the error value in the `match` that handled the return value of -`File::open`. We don’t need to explicitly say `return`, however, since this is -the last expression in the function. +`File::open`. However, we don’t need to explicitly say `return`, because this +is the last expression in the function. The code that calls this code will then handle getting either an `Ok` value that contains a username or an `Err` value that contains an `io::Error`. We -don’t know what the caller will do with those values. If they get an `Err` -value, they could choose to call `panic!` and crash their program, use a +don’t know what the calling code will do with those values. If the calling code +gets an `Err` value, it could call `panic!` and crash the program, use a default username, or look up the username from somewhere other than a file, for -example. We don’t have enough information on what the caller is actually trying -to do, so we propagate all the success or error information upwards for them to -handle as they see fit. +example. We don’t have enough information on what the calling code is actually +trying to do, so we propagate all the success or error information upwards for +it to handle appropriately. -This pattern of propagating errors is so common in Rust that there is dedicated -syntax to make this easier: `?`. +This pattern of propagating errors is so common in Rust that Rust provides the +question mark operator `?` to make this easier. -### A Shortcut for Propagating Errors: `?` +#### A Shortcut for Propagating Errors: `?` -Listing 9-6 shows an implementation of `read_username_from_file` that has the -same functionality as it had in Listing 9-5, but this implementation uses the +Listing 9-7 shows an implementation of `read_username_from_file` that has the +same functionality as it had in Listing 9-6, but this implementation uses the question mark operator: +Filename: src/main.rs + ```rust use std::io; use std::io::Read; @@ -355,26 +373,42 @@ fn read_username_from_file() -> Result { } ``` -Listing 9-6: A function that returns errors to the +Listing 9-7: A function that returns errors to the calling code using `?` -The `?` placed after a `Result` value is defined to work the exact same way as -the `match` expressions we defined to handle the `Result` values in Listing -9-5. If the value of the `Result` is an `Ok`, the value inside the `Ok` will +The `?` placed after a `Result` value is defined to work in almost the same way +as the `match` expressions we defined to handle the `Result` values in Listing +9-6. If the value of the `Result` is an `Ok`, the value inside the `Ok` will get returned from this expression and the program will continue. If the value is an `Err`, the value inside the `Err` will be returned from the whole -function as if we had used the `return` keyword so that the error value gets -propagated to the caller. - -In the context of Listing 9-6, the `?` at the end of the `File::open` call will +function as if we had used the `return` keyword so the error value gets +propagated to the calling code. + +The one difference between the `match` expression from Listing 9-6 and what the +question mark operator does is that when using the question mark operator, +error values go through the `from` function defined in the `From` trait in the +standard library. Many error types implement the `from` function to convert an +error of one type into an error of another type. When used by the question mark +operator, the call to the `from` function converts the error type that the +question mark operator gets into the error type defined in the return type of +the current function that we’re using `?` in. This is useful when parts of a +function might fail for many different reasons, but the function returns one +error type that represents all the ways the function might fail. As long as +each error type implements the `from` function to define how to convert itself +to the returned error type, the question mark operator takes care of the +conversion automatically. + +In the context of Listing 9-7, the `?` at the end of the `File::open` call will return the value inside an `Ok` to the variable `f`. If an error occurs, `?` -will return early out of the whole function and give any `Err` value to our -caller. The same thing applies to the `?` at the end of the `read_to_string` -call. +will return early out of the whole function and give any `Err` value to the +calling code. The same thing applies to the `?` at the end of the +`read_to_string` call. The `?` eliminates a lot of boilerplate and makes this function’s implementation simpler. We could even shorten this code further by chaining -method calls immediately after the `?`: +method calls immediately after the `?` as shown in Listing 9-8: + +Filename: src/main.rs ```rust use std::io; @@ -390,21 +424,24 @@ fn read_username_from_file() -> Result { } ``` +Listing 9-8: Chaining method calls after the question +mark operator + We’ve moved the creation of the new `String` in `s` to the beginning of the function; that part hasn’t changed. Instead of creating a variable `f`, we’ve chained the call to `read_to_string` directly onto the result of `File::open("hello.txt")?`. We still have a `?` at the end of the `read_to_string` call, and we still return an `Ok` value containing the username in `s` when both `File::open` and `read_to_string` succeed rather than -returning errors. The functionality is again the same as in Listing 9-5 and -Listing 9-6, this is just a different, more ergonomic way to write it. +returning errors. The functionality is again the same as in Listing 9-6 and +Listing 9-7; this is just a different, more ergonomic way to write it. -### `?` Can Only Be Used in Functions That Return `Result` +#### `?` Can Only Be Used in Functions That Return Result The `?` can only be used in functions that have a return type of `Result`, -since it is defined to work in exactly the same way as the `match` expression -we defined in Listing 9-5. The part of the `match` that requires a return type -of `Result` is `return Err(e)`, so the return type of the function must be a +because it is defined to work in the same way as the `match` expression we +defined in Listing 9-6. The part of the `match` that requires a return type of +`Result` is `return Err(e)`, so the return type of the function must be a `Result` to be compatible with this `return`. Let’s look at what happens if we use `?` in the `main` function, which you’ll @@ -418,34 +455,28 @@ fn main() { } ``` - - -When we compile this, we get the following error message: +When we compile this code, we get the following error message: ```text -error[E0308]: mismatched types - --> +error[E0277]: the `?` operator can only be used in a function that returns +`Result` (or another type that implements `std::ops::Try`) + --> src/main.rs:4:13 | -3 | let f = File::open("hello.txt")?; - | ^^^^^^^^^^^^^^^^^^^^^^^^^ expected (), found enum -`std::result::Result` +4 | let f = File::open("hello.txt")?; + | ------------------------ + | | + | cannot use the `?` operator in a function that returns `()` + | in this macro invocation | - = note: expected type `()` - = note: found type `std::result::Result<_, _>` + = help: the trait `std::ops::Try` is not implemented for `()` + = note: required by `std::ops::Try::from_error` ``` -This error is pointing out that we have mismatched types: the `main` function -has a return type of `()`, but the `?` might return a `Result`. In functions -that don’t return `Result`, when you call other functions that return `Result`, -you’ll need to use a `match` or one of the `Result` methods to handle it, -instead of using `?` to potentially propagate the error to the caller. +This error points out that we’re only allowed to use the question mark operator +in a function that returns `Result`. In functions that don’t return `Result`, +when you call other functions that return `Result`, you’ll need to use a +`match` or one of the `Result` methods to handle it instead of using `?` to +potentially propagate the error to the calling code. Now that we’ve discussed the details of calling `panic!` or returning `Result`, let’s return to the topic of how to decide which is appropriate to use in which diff --git a/src/doc/book/second-edition/src/ch09-03-to-panic-or-not-to-panic.md b/src/doc/book/second-edition/src/ch09-03-to-panic-or-not-to-panic.md index c2acfdbe39..ef2df322f1 100644 --- a/src/doc/book/second-edition/src/ch09-03-to-panic-or-not-to-panic.md +++ b/src/doc/book/second-edition/src/ch09-03-to-panic-or-not-to-panic.md @@ -1,32 +1,31 @@ -## To `panic!` or Not To `panic!` +## To `panic!` or Not to `panic!` So how do you decide when you should `panic!` and when you should return -`Result`? When code panics, there’s no way to recover. You could choose to call -`panic!` for any error situation, whether there’s a possible way to recover or -not, but then you’re making the decision for your callers that a situation is -unrecoverable. When you choose to return a `Result` value, you give your caller -options, rather than making the decision for them. They could choose to attempt -to recover in a way that’s appropriate for their situation, or they could -decide that actually, an `Err` value in this case is unrecoverable, so they can -call `panic!` and turn your recoverable error into an unrecoverable one. -Therefore, returning `Result` is a good default choice when you’re defining a -function that might fail. - -There are a few situations in which it’s more appropriate to write code that -panics instead of returning a `Result`, but they are less common. Let’s discuss -why it’s appropriate to panic in examples, prototype code, and tests, then -situations where you as a human can know a method won’t fail that the compiler -can’t reason about, and conclude with some general guidelines on how to decide +`Result`? When code panics, there’s no way to recover. You could call `panic!` +for any error situation, whether there’s a possible way to recover or not, but +then you’re making the decision on behalf of the code calling your code that a +situation is unrecoverable. When you choose to return a `Result` value, you +give the calling code options rather than making the decision for it. The +calling code could choose to attempt to recover in a way that’s appropriate for +its situation, or it could decide that an `Err` value in this case is +unrecoverable, so it can call `panic!` and turn your recoverable error into an +unrecoverable one. Therefore, returning `Result` is a good default choice when +you’re defining a function that might fail. + +In a few situations it’s more appropriate to write code that panics instead of +returning a `Result`, but they are less common. Let’s explore why it’s +appropriate to panic in examples, prototype code, and tests; then in situations +where you as a human can know a method won’t fail that the compiler can’t +reason about; and conclude with some general guidelines on how to decide whether to panic in library code. -### Examples, Prototype Code, and Tests: Perfectly Fine to Panic +### Examples, Prototype Code, and Tests Are All Places it’s Perfectly Fine to Panic When you’re writing an example to illustrate some concept, having robust error handling code in the example as well can make the example less clear. In examples, it’s understood that a call to a method like `unwrap` that could -`panic!` is meant as a placeholder for the way that you’d actually like your -application to handle errors, which can differ based on what the rest of your -code is doing. +`panic!` is meant as a placeholder for the way that you’d want your application +to handle errors, which can differ based on what the rest of your code is doing. Similarly, the `unwrap` and `expect` methods are very handy when prototyping, before you’re ready to decide how to handle errors. They leave clear markers in @@ -34,10 +33,10 @@ your code for when you’re ready to make your program more robust. If a method call fails in a test, we’d want the whole test to fail, even if that method isn’t the functionality under test. Because `panic!` is how a test -gets marked as a failure, calling `unwrap` or `expect` is exactly what makes -sense to do. +is marked as a failure, calling `unwrap` or `expect` is exactly what should +happen. -### Cases When You Have More Information Than The Compiler +### Cases When You Have More Information Than the Compiler It would also be appropriate to call `unwrap` when you have some other logic that ensures the `Result` will have an `Ok` value, but the logic isn’t @@ -45,7 +44,7 @@ something the compiler understands. You’ll still have a `Result` value that yo need to handle: whatever operation you’re calling still has the possibility of failing in general, even though it’s logically impossible in your particular situation. If you can ensure by manually inspecting the code that you’ll never -have an `Err` variant, it is perfectly acceptable to call `unwrap`. Here’s an +have an `Err` variant, it’s perfectly acceptable to call `unwrap`. Here’s an example: ```rust @@ -59,62 +58,62 @@ that `127.0.0.1` is a valid IP address, so it’s acceptable to use `unwrap` here. However, having a hardcoded, valid string doesn’t change the return type of the `parse` method: we still get a `Result` value, and the compiler will still make us handle the `Result` as if the `Err` variant is still a -possibility since the compiler isn’t smart enough to see that this string is -always a valid IP address. If the IP address string came from a user instead of -being hardcoded into the program, and therefore *did* have a possibility of -failure, we’d definitely want to handle the `Result` in a more robust way +possibility because the compiler isn’t smart enough to see that this string is +always a valid IP address. If the IP address string came from a user rather +than being hardcoded into the program, and therefore *did* have a possibility +of failure, we’d definitely want to handle the `Result` in a more robust way instead. ### Guidelines for Error Handling -It’s advisable to have your code `panic!` when it’s possible that you could end -up in a bad state—in this context, bad state is when some assumption, -guarantee, contract, or invariant has been broken, such as when invalid values, -contradictory values, or missing values are passed to your code—plus one or -more of the following: +It’s advisable to have your code `panic!` when it’s possible that your code +could end up in a bad state. In this context, bad state is when some +assumption, guarantee, contract, or invariant has been broken, such as when +invalid values, contradictory values, or missing values are passed to your +code—plus one or more of the following: -* The bad state is not something that’s *expected* to happen occasionally -* Your code after this point needs to rely on not being in this bad state -* There’s not a good way to encode this information in the types you use +* The bad state is not something that’s *expected* to happen occasionally. +* Your code after this point needs to rely on not being in this bad state. +* There’s not a good way to encode this information in the types you use. If someone calls your code and passes in values that don’t make sense, the best -thing might be to `panic!` and alert the person using your library to the bug -in their code so that they can fix it during development. Similarly, `panic!` -is often appropriate if you’re calling external code that is out of your -control, and it returns an invalid state that you have no way of fixing. +choice might be to `panic!` and alert the person using your library to the bug +in their code so they can fix it during development. Similarly, `panic!` is +often appropriate if you’re calling external code that is out of your control, +and it returns an invalid state that you have no way of fixing. When a bad state is reached, but it’s expected to happen no matter how well you write your code, it’s still more appropriate to return a `Result` rather than -calling `panic!`. Examples of this include a parser being given malformed data, -or an HTTP request returning a status that indicates you have hit a rate limit. -In these cases, you should indicate that failure is an expected possibility by -returning a `Result` in order to propagate these bad states upwards so that the -caller can decide how they would like to handle the problem. To `panic!` -wouldn’t be the best way to handle these cases. +making a `panic!` call. Examples of this include a parser being given malformed +data or an HTTP request returning a status that indicates you have hit a rate +limit. In these cases, you should indicate that failure is an expected +possibility by returning a `Result` to propagate these bad states upwards so +the calling code can decide how to handle the problem. To `panic!` wouldn’t be +the best way to handle these cases. When your code performs operations on values, your code should verify the values are valid first, and `panic!` if the values aren’t valid. This is mostly for safety reasons: attempting to operate on invalid data can expose your code -to vulnerabilities. This is the main reason that the standard library will -`panic!` if you attempt an out-of-bounds array access: trying to access memory -that doesn’t belong to the current data structure is a common security problem. +to vulnerabilities. This is the main reason the standard library will `panic!` +if you attempt an out-of-bounds memory access: trying to access memory that +doesn’t belong to the current data structure is a common security problem. Functions often have *contracts*: their behavior is only guaranteed if the inputs meet particular requirements. Panicking when the contract is violated makes sense because a contract violation always indicates a caller-side bug, -and it is not a kind of error you want callers to have to explicitly handle. In -fact, there’s no reasonable way for calling code to recover: the calling -*programmers* need to fix the code. Contracts for a function, especially when a -violation will cause a panic, should be explained in the API documentation for -the function. - -Having lots of error checks in all of your functions would be verbose and -annoying, though. Luckily, you can use Rust’s type system (and thus the type -checking the compiler does) to do a lot of the checks for you. If your function +and it’s not a kind of error you want the calling code to have to explicitly +handle. In fact, there’s no reasonable way for calling code to recover: the +calling *programmers* need to fix the code. Contracts for a function, +especially when a violation will cause a panic, should be explained in the API +documentation for the function. + +However, having lots of error checks in all of your functions would be verbose +and annoying. Fortunately, you can use Rust’s type system (and thus the type +checking the compiler does) to do many of the checks for you. If your function has a particular type as a parameter, you can proceed with your code’s logic knowing that the compiler has already ensured you have a valid value. For example, if you have a type rather than an `Option`, your program expects to have *something* rather than *nothing*. Your code then doesn’t have to handle -two cases for the `Some` and `None` variants, it will only have one case for +two cases for the `Some` and `None` variants: it will only have one case for definitely having a value. Code trying to pass nothing to your function won’t even compile, so your function doesn’t have to check for that case at runtime. Another example is using an unsigned integer type like `u32`, which ensures the @@ -123,19 +122,19 @@ parameter is never negative. ### Creating Custom Types for Validation Let’s take the idea of using Rust’s type system to ensure we have a valid value -one step further, and look at creating a custom type for validation. Recall the -guessing game in Chapter 2, where our code asked the user to guess a number -between 1 and 100. We actually never validated that the user’s guess was -between those numbers before checking it against our secret number, only that -it was positive. In this case, the consequences were not very dire: our output -of “Too high” or “Too low” would still be correct. It would be a useful -enhancement to guide the user towards valid guesses, though, and have different -behavior when a user guesses a number that’s out of range versus when a user -types, for example, letters instead. +one step further and look at creating a custom type for validation. Recall the +guessing game in Chapter 2 where our code asked the user to guess a number +between 1 and 100. We never validated that the user’s guess was between those +numbers before checking it against our secret number; we only validated that +the guess was positive. In this case, the consequences were not very dire: our +output of “Too high” or “Too low” would still be correct. It would be a useful +enhancement to guide the user toward valid guesses and have different behavior +when a user guesses a number that’s out of range versus when a user types, for +example, letters instead. One way to do this would be to parse the guess as an `i32` instead of only a -`u32`, to allow potentially negative numbers, then add a check for the number -being in range: +`u32` to allow potentially negative numbers, and then add a check for the +number being in range, like so: ```rust,ignore loop { @@ -156,7 +155,7 @@ loop { } ``` -The `if` expression checks to see if our value is out of range, tells the user +The `if` expression checks whether our value is out of range, tells the user about the problem, and calls `continue` to start the next iteration of the loop and ask for another guess. After the `if` expression, we can proceed with the comparisons between `guess` and the secret number knowing that `guess` is @@ -170,7 +169,7 @@ to have a check like this in every function. Instead, we can make a new type and put the validations in a function to create an instance of the type rather than repeating the validations everywhere. That way, it’s safe for functions to use the new type in their signatures and -confidently use the values they receive. Listing 9-8 shows one way to define a +confidently use the values they receive. Listing 9-9 shows one way to define a `Guess` type that will only create an instance of `Guess` if the `new` function receives a value between 1 and 100: @@ -196,7 +195,7 @@ impl Guess { } ``` -Listing 9-8: A `Guess` type that will only continue with +Listing 9-9: A `Guess` type that will only continue with values between 1 and 100 First, we define a struct named `Guess` that has a field named `value` that @@ -205,35 +204,35 @@ holds a `u32`. This is where the number will be stored. Then we implement an associated function named `new` on `Guess` that creates instances of `Guess` values. The `new` function is defined to have one parameter named `value` of type `u32` and to return a `Guess`. The code in the -body of the `new` function tests `value` to make sure it is between 1 and 100. -If `value` doesn’t pass this test, we call `panic!`, which will alert the -programmer who is calling this code that they have a bug they need to fix, -since creating a `Guess` with a `value` outside this range would violate the -contract that `Guess::new` is relying on. The conditions in which `Guess::new` -might panic should be discussed in its public-facing API documentation; we’ll -cover documentation conventions around indicating the possibility of a `panic!` -in the API documentation that you create in Chapter 14. If `value` does pass -the test, we create a new `Guess` with its `value` field set to the `value` -parameter and return the `Guess`. +body of the `new` function tests `value` to make sure it’s between 1 and 100. +If `value` doesn’t pass this test, we make a `panic!` call, which will alert +the programmer who is writing the calling code that they have a bug they need +to fix, because creating a `Guess` with a `value` outside this range would +violate the contract that `Guess::new` is relying on. The conditions in which +`Guess::new` might panic should be discussed in its public-facing API +documentation; we’ll cover documentation conventions indicating the possibility +of a `panic!` in the API documentation that you create in Chapter 14. If +`value` does pass the test, we create a new `Guess` with its `value` field set +to the `value` parameter and return the `Guess`. Next, we implement a method named `value` that borrows `self`, doesn’t have any other parameters, and returns a `u32`. This is a kind of method sometimes -called a *getter*, since its purpose is to get some data from its fields and +called a *getter*, because its purpose is to get some data from its fields and return it. This public method is necessary because the `value` field of the `Guess` struct is private. It’s important that the `value` field is private so -that code using the `Guess` struct is not allowed to set `value` directly: -callers outside the module *must* use the `Guess::new` function to create an -instance of `Guess`, which ensures there’s no way for a `Guess` to have a -`value` that hasn’t been checked by the conditions in the `Guess::new` function. +code using the `Guess` struct is not allowed to set `value` directly: code +outside the module *must* use the `Guess::new` function to create an instance +of `Guess`, which ensures there’s no way for a `Guess` to have a `value` that +hasn’t been checked by the conditions in the `Guess::new` function. A function that has a parameter or returns only numbers between 1 and 100 could then declare in its signature that it takes or returns a `Guess` rather than a -`u32`, and wouldn’t need to do any additional checks in its body. +`u32` and wouldn’t need to do any additional checks in its body. ## Summary Rust’s error handling features are designed to help you write more robust code. -The `panic!` macro signals that your program is in a state it can’t handle, and +The `panic!` macro signals that your program is in a state it can’t handle and lets you tell the process to stop instead of trying to proceed with invalid or incorrect values. The `Result` enum uses Rust’s type system to indicate that operations might fail in a way that your code could recover from. You can use @@ -241,6 +240,7 @@ operations might fail in a way that your code could recover from. You can use success or failure as well. Using `panic!` and `Result` in the appropriate situations will make your code more reliable in the face of inevitable problems. -Now that we’ve seen useful ways that the standard library uses generics with -the `Option` and `Result` enums, let’s talk about how generics work and how you -can make use of them in your code. +Now that you’ve seen useful ways that the standard library uses generics with +the `Option` and `Result` enums, we’ll talk about how generics work and how you +can use them in your code in the next chapter. + diff --git a/src/doc/book/second-edition/src/ch10-02-traits.md b/src/doc/book/second-edition/src/ch10-02-traits.md index 774d913d06..7a3c391882 100644 --- a/src/doc/book/second-edition/src/ch10-02-traits.md +++ b/src/doc/book/second-edition/src/ch10-02-traits.md @@ -42,14 +42,14 @@ pub trait Summarizable { consists of the behavior provided by a `summary` method We declare a trait with the `trait` keyword, then the trait’s name, in this -case `Summarizable`. Inside curly braces we declare the method signatures that -describe the behaviors that types that implement this trait will need to have, -in this case `fn summary(&self) -> String`. After the method signature, instead -of providing an implementation within curly braces, we put a semicolon. Each -type that implements this trait must then provide its own custom behavior for -the body of the method, but the compiler will enforce that any type that has -the `Summarizable` trait will have the method `summary` defined for it with -this signature exactly. +case `Summarizable`. Inside curly brackets we declare the method signatures +that describe the behaviors that types that implement this trait will need to +have, in this case `fn summary(&self) -> String`. After the method signature, +instead of providing an implementation within curly brackets, we put a +semicolon. Each type that implements this trait must then provide its own +custom behavior for the body of the method, but the compiler will enforce that +any type that has the `Summarizable` trait will have the method `summary` +defined for it with this signature exactly. A trait can have multiple methods in its body, with the method signatures listed one per line and each line ending in a semicolon. @@ -106,7 +106,7 @@ related to a trait. The difference is after `impl`, we put the trait name that we want to implement, then say `for` and the name of the type that we want to implement the trait for. Within the `impl` block, we put the method signatures that the trait definition has defined, but instead of putting a semicolon after -each signature, we put curly braces and fill in the method body with the +each signature, we put curly brackets and fill in the method body with the specific behavior that we want the methods of the trait to have for the particular type. @@ -187,7 +187,7 @@ behavior. When we implement the trait on a particular type, we can choose to keep or override each method’s default behavior. Listing 10-15 shows how we could have chosen to specify a default string for -the `summary` method of the `Summarize` trait instead of only choosing to only +the `summary` method of the `Summarize` trait instead of choosing to only define the method signature like we did in Listing 10-12: Filename: lib.rs @@ -240,7 +240,7 @@ implementation. Default implementations are allowed to call the other methods in the same trait, even if those other methods don’t have a default implementation. In this way, a trait can provide a lot of useful functionality and only require -implementers to specify a small part of it. We could choose to have the +implementors to specify a small part of it. We could choose to have the `Summarizable` trait also have an `author_summary` method whose implementation is required, then a `summary` method that has a default implementation that calls the `author_summary` method: @@ -519,7 +519,7 @@ let s = 3.to_string(); ``` Blanket implementations appear in the documentation for the trait in the -“Implementers” section. +“Implementors” section. Traits and trait bounds let us write code that uses generic type parameters in order to reduce duplication, but still specify to the compiler exactly what diff --git a/src/doc/book/second-edition/src/ch10-03-lifetime-syntax.md b/src/doc/book/second-edition/src/ch10-03-lifetime-syntax.md index 42b3a1746f..33a4bf510b 100644 --- a/src/doc/book/second-edition/src/ch10-03-lifetime-syntax.md +++ b/src/doc/book/second-edition/src/ch10-03-lifetime-syntax.md @@ -66,7 +66,7 @@ error: `x` does not live long enough ``` The variable `x` doesn’t “live long enough.” Why not? Well, `x` is going to go -out of scope when we hit the closing curly brace on line 7, ending the inner +out of scope when we hit the closing curly bracket on line 7, ending the inner scope. But `r` is valid for the outer scope; its scope is larger and we say that it “lives longer.” If Rust allowed this code to work, `r` would be referencing memory that was deallocated when `x` went out of scope, and @@ -101,9 +101,9 @@ Listing 10-18 with annotations showing the lifetimes of the variables: correct? I want to leave a note for production, make sure we can make that clear --> +line and ends with the first closing curly bracket on the 7th line. Do you +think the text art comments work or should we make an SVG diagram that has +nicer looking arrows and labels? /Carol --> We’ve annotated the lifetime of `r` with `'a` and the lifetime of `x` with `'b`. As you can see, the inner `'b` block is much smaller than the outer `'a` @@ -184,7 +184,7 @@ and below). If these topics are confusing you in this context, I'd be interested to know if rereading Chapter 4 clears up that confusion. /Carol --> -Refer back to the “String Slices as Arguments” section of Chapter 4 for more +Refer back to the “String Slices as Parameters” section of Chapter 4 for more discussion about why these are the arguments we want. If we try to implement the `longest` function as shown in Listing 10-22, it diff --git a/src/doc/book/second-edition/src/ch11-01-writing-tests.md b/src/doc/book/second-edition/src/ch11-01-writing-tests.md index eef5fa2270..ca74faba26 100644 --- a/src/doc/book/second-edition/src/ch11-01-writing-tests.md +++ b/src/doc/book/second-edition/src/ch11-01-writing-tests.md @@ -211,8 +211,7 @@ helps us check that our code is functioning in the way we intend. Remember all the way back in Chapter 5, Listing 5-9, where we had a `Rectangle` struct and a `can_hold` method, repeated here in Listing 11-5. Let’s put this -code in *src/lib.rs* instead of *src/main.rs* and write some tests for it using -the `assert!` macro. +code in *src/lib.rs* and write some tests for it using the `assert!` macro. Filename: src/lib.rs @@ -592,7 +591,7 @@ Listing 11-8 shows how we’d write a test that checks the error conditions of Filename: src/lib.rs ```rust -struct Guess { +pub struct Guess { value: u32, } @@ -638,7 +637,7 @@ Looks good! Now let’s introduce a bug in our code, by removing the condition that the `new` function will panic if the value is greater than 100: ```rust -# struct Guess { +# pub struct Guess { # value: u32, # } # @@ -686,7 +685,7 @@ different messages depending on whether the value was too small or too large: Filename: src/lib.rs ```rust -struct Guess { +pub struct Guess { value: u32, } @@ -764,7 +763,7 @@ test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured ``` The failure message indicates that this test did indeed panic as we expected, -but the panic message `did not include expected string 'Guess value must be +but the panic message did not include expected string `'Guess value must be less than or equal to 100'`. We can see the panic message that we did get, which in this case was `Guess value must be greater than or equal to 1, got 200.` We could then start figuring out where our bug was! diff --git a/src/doc/book/second-edition/src/ch11-02-running-tests.md b/src/doc/book/second-edition/src/ch11-02-running-tests.md index 471f429b70..23c1cde225 100644 --- a/src/doc/book/second-edition/src/ch11-02-running-tests.md +++ b/src/doc/book/second-edition/src/ch11-02-running-tests.md @@ -147,7 +147,7 @@ test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured Note that the output for the tests and the test results is interleaved; this is because the tests are running in parallel as we talked about in the previous section. Try using both the `--test-threads=1` option and the `--nocapture` -function and see what the output looks like then! +flag and see what the output looks like then! ### Running a Subset of Tests by Name diff --git a/src/doc/book/second-edition/src/ch12-04-testing-the-librarys-functionality.md b/src/doc/book/second-edition/src/ch12-04-testing-the-librarys-functionality.md index 8f7279e934..49a614792d 100644 --- a/src/doc/book/second-edition/src/ch12-04-testing-the-librarys-functionality.md +++ b/src/doc/book/second-edition/src/ch12-04-testing-the-librarys-functionality.md @@ -80,7 +80,7 @@ vector doesn’t match a vector containing the line `"safe, fast, productive."`. Filename: src/lib.rs -``` +```rust pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { vec![] } diff --git a/src/doc/book/second-edition/src/ch12-06-writing-to-stderr-instead-of-stdout.md b/src/doc/book/second-edition/src/ch12-06-writing-to-stderr-instead-of-stdout.md index 020b573ffb..b7c8f89f96 100644 --- a/src/doc/book/second-edition/src/ch12-06-writing-to-stderr-instead-of-stdout.md +++ b/src/doc/book/second-edition/src/ch12-06-writing-to-stderr-instead-of-stdout.md @@ -4,7 +4,7 @@ At the moment we’re writing all of our output to the terminal with the `println!` function. Most terminals provide two kinds of output: *standard output* for general information (sometimes abbreviated as `stdout` in code), and *standard error* for error messages (`stderr`). This distinction enables -users to choose whether to direct a the successful output of a program to a +users to choose to direct the successful output of a program to a file but still print error messages to the screen. The `println!` function is only capable of printing to standard output, though, diff --git a/src/doc/book/second-edition/src/ch13-00-functional-features.md b/src/doc/book/second-edition/src/ch13-00-functional-features.md index 7ee07f231f..77fd72a412 100644 --- a/src/doc/book/second-edition/src/ch13-00-functional-features.md +++ b/src/doc/book/second-edition/src/ch13-00-functional-features.md @@ -1,18 +1,13 @@ # Functional Language features in Rust: Iterators and Closures - - - Rust’s design has taken inspiration from a lot of existing languages and techniques, and one significant influence is *functional programming*. -Programming in a functional style often includes using functions as values in -arguments or return values of other functions, assigning functions to variables -for later execution, and so forth. We won’t debate here the issue of what, -exactly, functional programming is or is not, but will instead show off some -features of Rust that are similar to features in many languages often referred -to as functional. +Programming in a functional style often includes using functions as values, by +passing them in arguments, returning them from other functions, assigning them +to variables for later execution, and so forth. We won’t debate here the issue +of what, exactly, functional programming is or is not, but will instead show +off some features of Rust that are similar to features in many languages often +referred to as functional. More specifically, we’re going to cover: diff --git a/src/doc/book/second-edition/src/ch13-01-closures.md b/src/doc/book/second-edition/src/ch13-01-closures.md index fb03d0e552..2d057add29 100644 --- a/src/doc/book/second-edition/src/ch13-01-closures.md +++ b/src/doc/book/second-edition/src/ch13-01-closures.md @@ -1,48 +1,31 @@ ## Closures: Anonymous Functions that can Capture their Environment - - - -Rust’s *closures* are anonymous functions that you can save in a variable or -pass as arguments to other functions. You can create the closure in one place, -and then call the closure to evaluate it in a different context. Unlike -functions, closures are allowed to capture values from the scope in which they -are called. We’re going to demonstrate how these features of closures allow for -code reuse and customization of behavior. - - - +Rust’s *closures* are anonymous functions you can save in a variable or pass as +arguments to other functions. You can create the closure in one place, and then +call the closure to evaluate it in a different context. Unlike functions, +closures are able to capture values from the scope in which they are called. +We’re going to demonstrate how these features of closures allow for code reuse +and customization of behavior. ### Creating an Abstraction of Behavior Using a Closure -Let’s work on an example that will show a situation where storing a closure to -be executed at a later time is useful. We’ll talk about the syntax of closures, -type inference, and traits along the way. - -The hypothetical situation is this: we’re working at a startup that’s making an -app to generate custom exercise workout plans. The backend is written in Rust, -and the algorithm that generates the workout plan takes into account many -different factors like the app user’s age, their Body Mass Index, their -preferences, their recent workouts, and an intensity number they specify. The -actual algorithm used isn’t important in this example; what’s important is that -this calculation takes a few seconds. We only want to call this algorithm if we -need to, and we only want to call it once, so that we aren’t making the user -wait more than they need to. We’re going to simulate calling this hypothetical -algorithm by calling the `simulated_expensive_calculation` function shown in -Listing 13-1 instead, which will print `calculating slowly...`, wait for two -seconds, and then return whatever number we passed in: +Let’s work on an example of a situation in which it’s useful to store a closure +to be executed at a later time. We’ll talk about the syntax of closures, type +inference, and traits along the way. + +The hypothetical situation is this: we work at a startup that’s making an app +to generate custom exercise workout plans. The backend is written in Rust, and +the algorithm that generates the workout plan takes into account many different +factors, like the app user’s age, Body Mass Index, preferences, recent +workouts, and an intensity number they specify. The actual algorithm used isn’t +important in this example; what’s important is that this calculation takes a +few seconds. We only want to call this algorithm when we need to, and only call +it once, so we aren’t making the user wait more than necessary. + +We’ll simulate calling this hypothetical algorithm with the +`simulated_expensive_calculation` function shown in Listing 13-1, which will +print `calculating slowly...`, wait for two seconds, and then return whatever +number we passed in: Filename: src/main.rs @@ -50,38 +33,32 @@ seconds, and then return whatever number we passed in: use std::thread; use std::time::Duration; -fn simulated_expensive_calculation(intensity: i32) -> i32 { +fn simulated_expensive_calculation(intensity: u32) -> u32 { println!("calculating slowly..."); thread::sleep(Duration::from_secs(2)); intensity } ``` -Listing 13-1: A function we’ll use to stand in for a -hypothetical calculation that takes about two seconds to run +Listing 13-1: A function to stand in for a hypothetical +calculation that takes about two seconds to run -Next, we have a `main` function that contains the parts of the workout app that -are important for this example. This represents the code that the app would -call when a user asks for a workout plan. Because the interaction with the -app’s frontend isn’t relevant to the use of closures, we’re going to hardcode -values representing inputs to our program and print the outputs. +Next, we have a `main` function that contains the parts of the workout app +important for this example. This represents the code that the app would call +when a user asks for a workout plan. Because the interaction with the app’s +frontend isn’t relevant to the use of closures, we’re going to hardcode values +representing inputs to our program and print the outputs. -The inputs to the program are: +The required inputs are: -- An `intensity` number from the user, specified when they request a workout, - so they can indicate whether they’d like a low intensity workout or a high +* **An intensity number from the user**, specified when they request a + workout to indicate whether they’d like a low intensity workout or a high intensity workout -- A random number that will generate some variety in the workout plans +* **A random number** that will generate some variety in the workout plans -The output the program prints will be the recommended workout plan. +The output will be the recommended workout plan. -Listing 13-2 shows the `main` function we’re going to use. We’ve hardcoded the -variable `simulated_user_specified_value` to 10 and the variable -`simulated_random_number` to 7 for simplicity’s sake; in an actual program we’d -get the intensity number from the app frontend and we’d use the `rand` crate to -generate a random number like we did in the Guessing Game example in Chapter 2. -The `main` function calls a `generate_workout` function with the simulated -input values: +Listing 13-2 shows the `main` function we’re going to use. Filename: src/main.rs @@ -90,19 +67,28 @@ fn main() { let simulated_user_specified_value = 10; let simulated_random_number = 7; - generate_workout(simulated_user_specified_value, simulated_random_number); + generate_workout( + simulated_user_specified_value, + simulated_random_number + ); } -# fn generate_workout(intensity: i32, random_number: i32) {} +# fn generate_workout(intensity: u32, random_number: u32) {} ``` -Listing 13-2: A `main` function containing hardcoded -values to simulate user input and random number generation inputs to the -`generate_workout` function +Listing 13-2: A `main` function with hardcoded values to +simulate user input and random number generation + +We’ve hardcoded the variable `simulated_user_specified_value` to 10 and the +variable `simulated_random_number` to 7 for simplicity’s sake; in an actual +program we’d get the intensity number from the app frontend and we’d use the +`rand` crate to generate a random number like we did in the Guessing Game +example in Chapter 2. The `main` function calls a `generate_workout` function +with the simulated input values. -That’s the context of what we’re working on. The `generate_workout` function in -Listing 13-3 contains the business logic of the app that we’re most concerned -with in this example. The rest of the code changes in this example will be made -to this function: +There’s the context, so let’s get to the algorithm. The `generate_workout` +function in Listing 13-3 contains the business logic of the app that we’re most +concerned with in this example. The rest of the code changes in this example +will be made to this function: Filename: src/main.rs @@ -110,13 +96,13 @@ to this function: # use std::thread; # use std::time::Duration; # -# fn simulated_expensive_calculation(num: i32) -> i32 { +# fn simulated_expensive_calculation(num: u32) -> u32 { # println!("calculating slowly..."); # thread::sleep(Duration::from_secs(2)); # num # } # -fn generate_workout(intensity: i32, random_number: i32) { +fn generate_workout(intensity: u32, random_number: u32) { if intensity < 25 { println!( "Today, do {} pushups!", @@ -133,48 +119,46 @@ fn generate_workout(intensity: i32, random_number: i32) { println!( "Today, run for {} minutes!", simulated_expensive_calculation(intensity) - ) + ); } } } ``` -Listing 13-3: The business logic of the program that -prints the workout plans based on the inputs and calls to the -`simulated_expensive_calculation` function +Listing 13-3: The business logic that prints the workout +plans based on the inputs and calls to the `simulated_expensive_calculation` +function The code in Listing 13-3 has multiple calls to the slow calculation function. The first `if` block calls `simulated_expensive_calculation` twice, the `if` -inside the outer `else` doesn’t call it at all, and the code inside the `else` -case inside the outer `else` calls it once. - - +inside the outer `else` doesn’t call it at all, and the code inside the +second `else` case calls it once. The desired behavior of the `generate_workout` function is to first check if the user wants a low intensity workout (indicated by a number less than 25) or -a high intensity workout (25 or more). Low intensity workout plans will -recommend a number of pushups and situps based on the complex algorithm we’re -simulating with the `simulated_expensive_calculation` function, which needs the -intensity number as an input. +a high intensity workout (25 or more). + +Low intensity workout plans will recommend a number of pushups and situps based +on the complex algorithm we’re simulating. If the user wants a high intensity workout, there’s some additional logic: if the value of the random number generated by the app happens to be 3, the app -will recommend a break and hydration instead. If not, the user will get a high -intensity workout of a number of minutes of running that comes from the complex -algorithm. - -The data science team has let us know that there are going to be some changes -to the way we have to call the algorithm. To simplify the update when those -changes happen, we would like to refactor this code to have only a single call -to the `simulated_expensive_calculation` function. We also want to get rid of -the spot where we’re currently calling the function twice unnecessarily, and -we don’t want to add any other calls to that function in the process. That is, -we don’t want to call it if we’re in the case where the result isn’t needed at -all, and we still want to call it only once in the last case. - -There are many ways we could restructure this program. The way we’re going to -try first is extracting the duplicated call to the expensive calculation -function into a variable, as shown in Listing 13-4: +will recommend a break and hydration. If not, the user will get a number of +minutes of running based on the complex algorithm. + +The data science team has let us know that we’ll have to make some changes to +the way we call the algorithm in the future. To simplify the update when those +changes happen, we want to refactor this code so it only calls the +`simulated_expensive_calculation` function once. We also want to cut the place +where we’re currently calling the function twice unnecessarily without adding +any other calls to that function in the process. That is, we don’t want to call +it if the result isn’t needed, and we still want to call it only once. + +#### Refactoring Using Functions + +There are many ways we could restructure this program. First we’ll try +extracting the duplicated call to the expensive calculation function into a +variable, as shown in Listing 13-4: Filename: src/main.rs @@ -182,13 +166,13 @@ function into a variable, as shown in Listing 13-4: # use std::thread; # use std::time::Duration; # -# fn simulated_expensive_calculation(num: i32) -> i32 { +# fn simulated_expensive_calculation(num: u32) -> u32 { # println!("calculating slowly..."); # thread::sleep(Duration::from_secs(2)); # num # } # -fn generate_workout(intensity: i32, random_number: i32) { +fn generate_workout(intensity: u32, random_number: u32) { let expensive_result = simulated_expensive_calculation(intensity); @@ -208,17 +192,15 @@ fn generate_workout(intensity: i32, random_number: i32) { println!( "Today, run for {} minutes!", expensive_result - ) + ); } } } ``` Listing 13-4: Extracting the calls to -`simulated_expensive_calculation` to one place before the `if` blocks and -storing the result in the `expensive_result` variable - - +`simulated_expensive_calculation` to one place and storing the result in the +`expensive_result` variable This change unifies all the calls to `simulated_expensive_calculation` and solves the problem of the first `if` block calling the function twice @@ -226,15 +208,14 @@ unnecessarily. Unfortunately, we’re now calling this function and waiting for the result in all cases, which includes the inner `if` block that doesn’t use the result value at all. -We want to be able to specify some code in one place in our program, but then -only execute that code if we actually need the result in some other place in -our program. This is a use case for closures! +We want to define code in one place in our program, but only *execute* that +code where we actually need the result. This is a use case for closures! -### Closures Store Code to be Executed Later +#### Refactoring with Closures to Store Code for Later Execution Instead of always calling the `simulated_expensive_calculation` function before -the `if` blocks, we can define a closure and store the closure in a variable -instead of the result as shown in Listing 13-5. We can actually choose to move +the `if` blocks, we can define a closure and store the *closure* in a variable +rather than storing the result, as shown in Listing 13-5. We can actually move the whole body of `simulated_expensive_calculation` within the closure we’re introducing here: @@ -252,43 +233,34 @@ let expensive_closure = |num| { # expensive_closure(5); ``` -Listing 13-5: Defining a closure with the body that was -in the expensive function and store the closure in the `expensive_closure` -variable - - - - -The closure definition is the part after the `=` that we’re assigning to the -variable `expensive_closure`. To define a closure, we start with a pair of -vertical pipes (`|`). Inside the pipes is where we specify the parameters to -the closure; this syntax was chosen because of its similarity to closure -definitions in Smalltalk and Ruby. This closure has one parameter named `num`; -if we had more than one parameter, we would separate them with commas, like -`|param1, param2|`. - -After the parameters, we put curly braces that hold the body of the closure. -The curly braces are optional if the closure body only has one line. After the -curly braces, we need a semicolon to go with the `let` statement. The value -returned from the last line in the closure body (`num`), since that line -doesn’t end in a semicolon, will be the value returned from the closure when -it’s called, just like in function bodies. +Listing 13-5: Defining a closure and storing it in the +`expensive_closure` variable + +The closure definition comes after the `=` to assign it to the variable +`expensive_closure`. To define a closure, we start with a pair of vertical +pipes (`|`), inside which we specify the parameters to the closure; this syntax +was chosen because of its similarity to closure definitions in Smalltalk and +Ruby. This closure has one parameter named `num`; if we had more than one +parameter, we would separate them with commas, like `|param1, param2|`. + +After the parameters, we place curly brackets that hold the body of the +closure—these are optional if the closure body is a single expression. The end +of the closure, after the curly brackets, needs a semicolon to complete the +`let` statement. The value returned from the last line in the closure body +(`num`) will be the value returned from the closure when it’s called, since +that line doesn’t end in a semicolon; just like in function bodies. Note that this `let` statement means `expensive_closure` contains the *definition* of an anonymous function, not the *resulting value* of calling the -anonymous function. Recall the reason we’re using a closure is because we want -to define the code to call at one point, store that code, and actually call it -at a later point; the code we want to call is now stored in `expensive_closure`. +anonymous function. Recall that we’re using a closure because we want to define +the code to call at one point, store that code, and actually call it at a later +point; the code we want to call is now stored in `expensive_closure`. Now that we have the closure defined, we can change the code in the `if` blocks -to call the closure in order to execute the code and get the resulting value. -Calling a closure looks very similar to calling a function; we specify the -variable name that holds the closure definition and follow it with parentheses -containing the argument values we want to use for that call as shown in Listing -13-6: +to call the closure, in order to execute the code and get the resulting value. +We call a closure like we do a function: we specify the variable name that +holds the closure definition and follow it with parentheses containing the +argument values we want to use, as shown in Listing 13-6: Filename: src/main.rs @@ -296,7 +268,7 @@ containing the argument values we want to use for that call as shown in Listing # use std::thread; # use std::time::Duration; # -fn generate_workout(intensity: i32, random_number: i32) { +fn generate_workout(intensity: u32, random_number: u32) { let expensive_closure = |num| { println!("calculating slowly..."); thread::sleep(Duration::from_secs(2)); @@ -319,7 +291,7 @@ fn generate_workout(intensity: i32, random_number: i32) { println!( "Today, run for {} minutes!", expensive_closure(intensity) - ) + ); } } } @@ -328,56 +300,44 @@ fn generate_workout(intensity: i32, random_number: i32) { Listing 13-6: Calling the `expensive_closure` we’ve defined -Now we’ve achieved the goal of unifying where the expensive calculation is -called to one place, and we’re only executing that code where we need the -results. However, we’ve reintroduced one of the problems from Listing 13-3: -we’re still calling the closure twice in the first `if` block, which will call -the expensive code twice and make the user wait twice as long as they need to. -We could fix this problem by creating a variable local to that `if` block to -hold the result of calling the closure, but there’s another solution we can use -since we have a closure. We’ll get back to that solution in a bit; let’s first -talk about why there aren’t type annotations in the closure definition and the -traits involved with closures. +Now the expensive calculation is called in only one place, and we’re only +executing that code where we need the results. + +We have, however, reintroduced one of the problems from Listing 13-3: we’re +still calling the closure twice in the first `if` block, which will call the +expensive code twice and make the user wait twice as long as they need to. We +could fix this problem by creating a variable local to that `if` block to hold +the result of calling the closure, but closures provide us with another +solution. We’ll get back to that solution in a bit; let’s first talk about why +there aren’t type annotations in the closure definition and the traits involved +with closures. ### Closure Type Inference and Annotation -Closures differ from functions defined with the `fn` keyword in a few -ways. The first is that closures don’t require you to annotate the types of the +Closures differ from functions defined with the `fn` keyword in a few ways. The +first is that closures don’t require you to annotate the types of the parameters or the return value like `fn` functions do. - - -Type annotations are required on functions because they are part of an -explicit interface exposed to your users. Defining this interface rigidly is -important for ensuring that everyone agrees on what types of values a function -uses and returns. Closures aren’t used in an exposed interface like this, -though: they’re stored in variables and used without naming them and exposing -them to be invoked by users of our library. +Type annotations are required on functions because they are part of an explicit +interface exposed to your users. Defining this interface rigidly is important +for ensuring that everyone agrees on what types of values a function uses and +returns. Closures aren’t used in an exposed interface like this, though: +they’re stored in variables and used without naming them and exposing them to +users of our library. Additionally, closures are usually short and only relevant within a narrow context rather than in any arbitrary scenario. Within these limited contexts, the compiler is reliably able to infer the types of the parameters and return -type similarly to how it’s able to infer the types of most variables. Being -forced to annotate the types in these small, anonymous functions would be -annoying and largely redundant with the information the compiler already has -available. +type, similar to how it’s able to infer the types of most variables. - - +Making programmers annotate the types in these small, anonymous functions would +be annoying and largely redundant with the information the compiler already has +available. Like variables, we can choose to add type annotations if we want to increase -explicitness and clarity in exchange for being more verbose than is strictly +explicitness and clarity at the cost of being more verbose than is strictly necessary; annotating the types for the closure we defined in Listing 13-4 -would look like the definition shown here in Listing 13-7: +would look like the definition shown in Listing 13-7: Filename: src/main.rs @@ -385,7 +345,7 @@ would look like the definition shown here in Listing 13-7: # use std::thread; # use std::time::Duration; # -let expensive_closure = |num: i32| -> i32 { +let expensive_closure = |num: u32| -> u32 { println!("calculating slowly..."); thread::sleep(Duration::from_secs(2)); num @@ -395,61 +355,35 @@ let expensive_closure = |num: i32| -> i32 { Listing 13-7: Adding optional type annotations of the parameter and return value types in the closure - - - - - - The syntax of closures and functions looks more similar with type annotations. Here’s a vertical comparison of the syntax for the definition of a function that adds one to its parameter, and a closure that has the same behavior. We’ve added some spaces here to line up the relevant parts). This illustrates how -closure syntax is similar to function syntax except for the use of pipes rather -than parentheses and the amount of syntax that is optional: - - - +closure syntax is similar to function syntax, except for the use of pipes and +the amount of syntax that is optional: ```rust,ignore -fn add_one_v1 (x: i32) -> i32 { x + 1 } -let add_one_v2 = |x: i32| -> i32 { x + 1 }; +fn add_one_v1 (x: u32) -> u32 { x + 1 } +let add_one_v2 = |x: u32| -> u32 { x + 1 }; let add_one_v3 = |x| { x + 1 }; let add_one_v4 = |x| x + 1 ; ``` - - - - - The first line shows a function definition, and the second line shows a fully annotated closure definition. The third line removes the type annotations from -the closure definition, and the fourth line removes the braces that are -optional since the closure body only has one line. These are all valid +the closure definition, and the fourth line removes the brackets that are +optional, since the closure body only has one expression. These are all valid definitions that will produce the same behavior when they’re called. - - - Closure definitions will have one concrete type inferred for each of their parameters and for their return value. For instance, Listing 13-8 shows the -definition of a short closure that just returns the value it gets as a -parameter. This closure isn’t very useful except for the purposes of this -example. Note that we haven’t added any type annotations to the definition: if -we then try to call the closure twice, using a `String` as an argument the -first time and an `i32` the second time, we’ll get an error: +definition of a short closure that just returns the value it receives as a +parameter. + +This closure isn’t very useful except for the purposes of this example. Note +that we haven’t added any type annotations to the definition: if we then try to +call the closure twice, using a `String` as an argument the first time and an +`u32` the second time, we’ll get an error: Filename: src/main.rs @@ -477,40 +411,34 @@ error[E0308]: mismatched types found type `{integer}` ``` - - The first time we call `example_closure` with the `String` value, the compiler infers the type of `x` and the return type of the closure to be `String`. Those types are then locked in to the closure in `example_closure`, and we get a type error if we try to use a different type with the same closure. -### Using Closures with Generic Parameters and the `Fn` Traits +### Storing Closures Using Generic Parameters and the `Fn` Traits Returning to our workout generation app, in Listing 13-6 we left our code still -calling the expensive calculation closure more times than it needs to. In each -place throughout our code, if we need the results of the expensive closure more -than once, we could save the result in a variable for reuse and use the -variable instead of calling the closure again. This could be a lot of repeated -code saving the results in a variety of places. - -However, because we have a closure for the expensive calculation, we have -another solution available to us. We can create a struct that will hold the -closure and the resulting value of calling the closure. The struct will only -execute the closure if we need the resulting value, and it will cache the -resulting value so that the rest of our code doesn’t have to be responsible for -saving and reusing the result. You may know this pattern as *memoization* or -*lazy evaluation*. +calling the expensive calculation closure more times than it needs to. One +option to solve this issue is to save the result of the expensive closure in a +variable for reuse and use the variable instead in each place we need the +result instead of calling the closure again. This method, though, could result +in a lot of repeated code. + +Fortunately, we have another solution available to us. We can create a struct +that will hold the closure and the resulting value of calling the closure. The +struct will only execute the closure if we need the resulting value, and it +will cache the resulting value so that the rest of our code doesn’t have to be +responsible for saving and reusing the result. You may know this pattern as +*memoization* or *lazy evaluation*. In order to make a struct that holds a closure, we need to be able to specify -the type of the closure. Each closure instance has its own unique anonymous -type: that is, even if two closures have the same signature, their types are -still considered to be different. In order to define structs, enums, or -function parameters that use closures, we use generics and trait bounds like we -discussed in Chapter 10. - - - +the type of the closure, because a struct definition needs to know the types of +each of its fields. Each closure instance has its own unique anonymous type: +that is, even if two closures have the same signature, their types are still +considered different. In order to define structs, enums, or function parameters +that use closures, we use generics and trait bounds like we discussed in +Chapter 10. The `Fn` traits are provided by the standard library. All closures implement one of the traits `Fn`, `FnMut`, or `FnOnce`. We’ll discuss the difference @@ -518,9 +446,9 @@ between these traits in the next section on capturing the environment; in this example, we can use the `Fn` trait. We add types to the `Fn` trait bound to represent the types of the parameters -and return values that the closures must have in order to match this trait -bound. In this case, our closure has a parameter of type `i32` and returns an -`i32`, so the trait bound we specify is `Fn(i32) -> i32`. +and return values the closures must have in order to match this trait bound. In +this case, our closure has a parameter of type `u32` and returns an `u32`, so +the trait bound we specify is `Fn(u32) -> u32`. Listing 13-9 shows the definition of the `Cacher` struct that holds a closure and an optional result value: @@ -529,10 +457,10 @@ and an optional result value: ```rust struct Cacher - where T: Fn(i32) -> i32 + where T: Fn(u32) -> u32 { calculation: T, - value: Option, + value: Option, } ``` @@ -540,33 +468,38 @@ struct Cacher closure in `calculation` and an optional result in `value` The `Cacher` struct has a `calculation` field of the generic type `T`. The -trait bounds on `T` specify that `T` is a closure by using the `Fn` trait. Any -closure we want to store in the `calculation` field of a `Cacher` instance must -have one `i32` parameter (specified within the parentheses after `Fn`) and must -return an `i32` (specified after the `->`). - -The `value` field is of type `Option`. Before we execute the closure, -`value` will be `None`. If the code using a `Cacher` asks for the result of the -closure, we’ll execute the closure at that time and store the result within a -`Some` variant in the `value` field. Then if the code asks for the result of -the closure again, instead of executing the closure again, we’ll return the -result that we’re holding in the `Some` variant. - -The logic around the `value` field that we’ve just described is defined in -Listing 13-10: +trait bounds on `T` specify that it’s a closure by using the `Fn` trait. Any +closure we want to store in the `calculation` field must have one `u32` +parameter (specified within the parentheses after `Fn`) and must return an +`u32` (specified after the `->`). + +> Note: Functions implement all three of the `Fn` traits too. If what we want to +> do doesn’t require capturing a value from the environment, we can use a +> function rather than a closure where we need something that implements an `Fn` +> trait. + +The `value` field is of type `Option`. Before we execute the closure, +`value` will be `None`. When code using a `Cacher` asks for the *result* of the +closure, the `Cacher` will execute the closure at that time and store the +result within a `Some` variant in the `value` field. Then if the code asks for +the result of the closure again, instead of executing the closure again, the +`Cacher` will return the result held in the `Some` variant. + +The logic around the `value` field we’ve just described is defined in Listing +13-10: Filename: src/main.rs ```rust # struct Cacher -# where T: Fn(i32) -> i32 +# where T: Fn(u32) -> u32 # { # calculation: T, -# value: Option, +# value: Option, # } # impl Cacher - where T: Fn(i32) -> i32 + where T: Fn(u32) -> u32 { fn new(calculation: T) -> Cacher { Cacher { @@ -575,7 +508,7 @@ impl Cacher } } - fn value(&mut self, arg: i32) -> i32 { + fn value(&mut self, arg: u32) -> u32 { match self.value { Some(v) => v, None => { @@ -588,24 +521,17 @@ impl Cacher } ``` - - -Listing 13-10: Implementations on `Cacher` of an -associated function named `new` and a method named `value` that manage the -caching logic - -The fields on the `Cacher` struct are private since we want `Cacher` to manage -their values rather than letting the calling code potentially change the values -in these fields directly. The `Cacher::new` function takes a generic parameter -`T`, which we’ve defined in the context of the `impl` block to have the same -trait bound as the `Cacher` struct. `Cacher::new` returns a `Cacher` instance -that holds the closure specified in the `calculation` field and a `None` value -in the `value` field, since we haven’t executed the closure yet. +Listing 13-10: The caching logic of `Cacher` + +We want `Cacher` to manage the struct fields’ values, rather than letting the +calling code potentially change the values in these fields directly, so these +fields are private. + +The `Cacher::new` function takes a generic parameter `T`, which we’ve defined +as having the same trait bound as the `Cacher` struct. Then `Cacher::new` +returns a `Cacher` instance that holds the closure specified in the +`calculation` field and a `None` value in the `value` field, since we haven’t +executed the closure yet. When the calling code wants the result of evaluating the closure, instead of calling the closure directly, it will call the `value` method. This method @@ -626,14 +552,14 @@ Listing 13-11 shows how we can use this `Cacher` struct in the # use std::time::Duration; # # struct Cacher -# where T: Fn(i32) -> i32 +# where T: Fn(u32) -> u32 # { # calculation: T, -# value: Option, +# value: Option, # } # # impl Cacher -# where T: Fn(i32) -> i32 +# where T: Fn(u32) -> u32 # { # fn new(calculation: T) -> Cacher { # Cacher { @@ -642,7 +568,7 @@ Listing 13-11 shows how we can use this `Cacher` struct in the # } # } # -# fn value(&mut self, arg: i32) -> i32 { +# fn value(&mut self, arg: u32) -> u32 { # match self.value { # Some(v) => v, # None => { @@ -654,7 +580,7 @@ Listing 13-11 shows how we can use this `Cacher` struct in the # } # } # -fn generate_workout(intensity: i32, random_number: i32) { +fn generate_workout(intensity: u32, random_number: u32) { let mut expensive_result = Cacher::new(|num| { println!("calculating slowly..."); thread::sleep(Duration::from_secs(2)); @@ -677,7 +603,7 @@ fn generate_workout(intensity: i32, random_number: i32) { println!( "Today, run for {} minutes!", expensive_result.value(intensity) - ) + ); } } } @@ -686,25 +612,26 @@ fn generate_workout(intensity: i32, random_number: i32) { Listing 13-11: Using `Cacher` in the `generate_workout` function to abstract away the caching logic - - Instead of saving the closure in a variable directly, we save a new instance of `Cacher` that holds the closure. Then, in each place we want the result, we call the `value` method on the `Cacher` instance. We can call the `value` method as many times as we want, or not call it at all, and the expensive -calculation will be run a maximum of once. Try running this program with the -`main` function from Listing 13-2, and change the values in the -`simulated_user_specified_value` and `simulated_random_number` variables to -verify that in all of the cases in the various `if` and `else` blocks, -`calculating slowly...` printed by the closure only shows up once and only when -needed. - -The `Cacher` takes care of the logic necessary to ensure we aren’t calling the +calculation will be run a maximum of once. + +Try running this program with the `main` function from Listing 13-2. Change the +values in the `simulated_user_specified_value` and `simulated_random_number` +variables to verify that in all of the cases in the various `if` and `else` +blocks, `calculating slowly...` only shows up once and only when needed. The +`Cacher` takes care of the logic necessary to ensure we aren’t calling the expensive calculation more than we need to, so that `generate_workout` can -focus on the business logic. Caching values is a more generally useful behavior -that we might want to use in other parts of our code with other closures as -well. However, there are a few problems with the current implementation of -`Cacher` that would make reusing it in different contexts difficult. +focus on the business logic. + +### Limitations of the `Cacher` Implementation + +Caching values is a generally useful behavior that we might want to use in +other parts of our code with different closures. However, there are a few +problems with the current implementation of `Cacher` that would make reusing it +in different contexts difficult. The first problem is a `Cacher` instance assumes it will always get the same value for the parameter `arg` to the `value` method. That is, this test of @@ -723,9 +650,9 @@ fn call_with_different_values() { ``` This test creates a new `Cacher` instance with a closure that returns the value -passed into it. We call the `value` method on this `Cacher` instance with -an `arg` value of 1 and then an `arg` value of 2, and we expect that the call -to `value` with the `arg` value of 2 returns 2. +passed into it. We call the `value` method on this `Cacher` instance with an +`arg` value of 1 and then an `arg` value of 2, and we expect that the call to +`value` with the `arg` value of 2 should return 2. Run this with the `Cacher` implementation from Listing 13-9 and Listing 13-10 and the test will fail on the `assert_eq!` with this message: @@ -736,50 +663,33 @@ thread 'call_with_different_arg_values' panicked at 'assertion failed: ``` The problem is that the first time we called `c.value` with 1, the `Cacher` -instance saved `Some(1)` in `self.value`. After that, no matter what we pass -in to the `value` method, it will always return 1. +instance saved `Some(1)` in `self.value`. After that, no matter what we pass in +to the `value` method, it will always return 1. Try modifying `Cacher` to hold a hash map rather than a single value. The keys of the hash map will be the `arg` values that are passed in, and the values of the hash map will be the result of calling the closure on that key. Instead of looking at whether `self.value` directly has a `Some` or a `None` value, the -`value` function will look up the `arg` in the hash map and return the value if -it’s present. If it’s not present, the `Cacher` will call the closure and save -the resulting value in the hash map associated with its `arg` value. +`value` function will look up the `arg` in the hash map and return the value, +if it’s present. If it’s not present, the `Cacher` will call the closure and +save the resulting value in the hash map associated with its `arg` value. -Another problem with the current `Cacher` implementation that restricts its use -is that it only accepts closures that take one parameter of type `i32` and -return an `i32`. We might want to be able to cache the results of closures that -take a string slice as an argument and return `usize` values, for example. Try -introducing more generic parameters to increase the flexibility of the `Cacher` -functionality. +Another problem with the current `Cacher` implementation is that it only +accepts closures that take one parameter of type `u32` and return an `u32`. We +might want to cache the results of closures that take a string slice and return +`usize` values, for example. To fix this issue, try introducing more generic +parameters to increase the flexibility of the `Cacher` functionality. ### Closures Can Capture Their Environment In the workout generator example, we only used closures as inline anonymous -functions. Closures have an additional ability we can use that functions don’t -have, however: they can capture their environment and access variables from the -scope in which they’re defined. - - - +functions. Closures have an additional ability that functions don’t have, +however: they can capture their environment and access variables from the scope +in which they’re defined. Listing 13-12 has an example of a closure stored in the variable `equal_to_x` that uses the variable `x` from the closure’s surrounding environment: - - - Filename: src/main.rs ```rust @@ -801,15 +711,6 @@ Here, even though `x` is not one of the parameters of `equal_to_x`, the `equal_to_x` closure is allowed to use the `x` variable that’s defined in the same scope that `equal_to_x` is defined in. - - - We can’t do the same with functions; let’s see what happens if we try: Filename: src/main.rs @@ -839,43 +740,40 @@ closure form instead The compiler even reminds us that this only works with closures! -When a closure captures a value from its environment, the closure uses memory -to store the values for use in the closure body. This use of memory is overhead -that we don’t want to pay for in the more common case where we want to execute -code that doesn’t capture its environment. Because functions are never allowed -to capture their environment, defining and using functions will never incur -this overhead. - - - +When a closure captures a value from its environment, it uses memory to store +the values for use in the closure body. This use of memory is overhead that we +don’t want to pay in more common cases, where we want to execute code that +doesn’t capture its environment. Because functions are never allowed to capture +their environment, defining and using functions will never incur this overhead. Closures can capture values from their environment in three ways, which directly map to the three ways a function can take a parameter: taking -ownership, borrowing immutably, and borrowing mutably. These ways of capturing -values are encoded in the three `Fn` traits as follows: - -* `FnOnce` consumes the variables it captures from its enclosing scope (the - enclosing scope is called the closure’s *environment*). In order to consume - the captured variables, the closure must therefore take ownership of these - variables and moves them into the closure when the closure is defined. The - `Once` part of the name is because the closure can’t take ownership of the - same variables more than once, so it can only be called one time. +ownership, borrowing immutably, and borrowing mutably. These are encoded in the +three `Fn` traits as follows: + +* `FnOnce` consumes the variables it captures from its enclosing scope, known + as the closure’s *environment*. In order to consume the captured variables, + the closure must take ownership of these variables and move them into the + closure when it is defined. The `Once` part of the name is because the + closure can’t take ownership of the same variables more than once, so it can + only be called one time. * `Fn` borrows values from the environment immutably. * `FnMut` can change the environment since it mutably borrows values. -When we create a closure, Rust infers how we want to reference the environment -based on how the closure uses the values from the environment. In Listing -13-12, the `equal_to_x` closure borrows `x` immutably (so `equal_to_x` has the -`Fn` trait) since the body of the closure only needs to read the value in `x`. +When we create a closure, Rust infers which to use based on how the closure +uses the values from the environment. In Listing 13-12, the `equal_to_x` +closure borrows `x` immutably (so `equal_to_x` has the `Fn` trait) since the +body of the closure only needs to read the value in `x`. If we want to force the closure to take ownership of the values it uses in the environment, we can use the `move` keyword before the parameter list. This is mostly useful when passing a closure to a new thread in order to move the data -to be owned by the new thread. We’ll have more examples of `move` closures in -Chapter 16 when we talk about concurrency, but for now here’s the code from -Listing 13-12 with the `move` keyword added to the closure definition and using -vectors instead of integers, since integers can be copied rather than moved: +so that it’s owned by the new thread. + +We’ll have more examples of `move` closures in Chapter 16 when we talk about +concurrency, but for now here’s the code from Listing 13-12 with the `move` +keyword added to the closure definition and using vectors instead of integers, +since integers can be copied rather than moved: Filename: src/main.rs @@ -909,9 +807,10 @@ error[E0382]: use of moved value: `x` implement the `Copy` trait ``` -The `x` value is moved into the closure when the closure is defined because of -the `move` keyword. The closure then has ownership of `x`, and `main` isn’t -allowed to use `x` anymore. Removing the `println!` will fix this example. +The `x` value is moved into the closure when the closure is defined, because we +added the `move` keyword. The closure then has ownership of `x`, and `main` +isn’t allowed to use `x` anymore in the `println!` statement. Removing +`println!` will fix this example. Most of the time when specifying one of the `Fn` trait bounds, you can start with `Fn` and the compiler will tell you if you need `FnMut` or `FnOnce` based diff --git a/src/doc/book/second-edition/src/ch13-02-iterators.md b/src/doc/book/second-edition/src/ch13-02-iterators.md index 29f56efe50..f41a69536c 100644 --- a/src/doc/book/second-edition/src/ch13-02-iterators.md +++ b/src/doc/book/second-edition/src/ch13-02-iterators.md @@ -1,25 +1,15 @@ ## Processing a Series of Items with Iterators - - - - The iterator pattern allows you to perform some task on a sequence of items in -turn. An *iterator* is responsible for the logic around iterating over each item -in the sequence and determining when the sequence has finished. When we use -iterators, we don’t have to reimplement that logic ourselves. +turn. An *iterator* is responsible for the logic of iterating over each item +and determining when the sequence has finished. When we use iterators, we don’t +have to reimplement that logic ourselves. -In Rust, iterators are *lazy*, which means they have no effect until we call -methods on them that consume the iterator to use it up. For example, the code -in Listing 13-13 creates an iterator over the items in the vector `v1` by -calling the `iter` method defined on `Vec`. This code by itself doesn’t do -anything useful: +In Rust, iterators are *lazy*, meaning they have no effect until we call +methods that consume the iterator to use it up. For example, the code in +Listing 13-13 creates an iterator over the items in the vector `v1` by calling +the `iter` method defined on `Vec`. This code by itself doesn’t do anything +useful: ```rust let v1 = vec![1, 2, 3]; @@ -29,11 +19,13 @@ let v1_iter = v1.iter(); Listing 13-13: Creating an iterator -After creating an iterator, we can choose to use it in a variety of ways. In -Listing 3-6, we actually used iterators with `for` loops to execute some code -on each item, though we glossed over what the call to `iter` did until now. The -example in Listing 13-14 separates the creation of the iterator from the use of -the iterator in the `for` loop. The iterator is stored in the `v1_iter` +Once we’ve created an iterator, we can choose to use it in a variety of ways. +In Listing 3-6 from Chapter 3, we actually used iterators with `for` loops to +execute some code on each item, though we glossed over what the call to `iter` +did until now. + +The example in Listing 13-14 separates the creation of the iterator from the +use of the iterator in the `for` loop. The iterator is stored in the `v1_iter` variable, and no iteration takes place at that time. Once the `for` loop is called using the iterator in `v1_iter`, then each element in the iterator is used in one iteration of the loop, which prints out each value: @@ -54,12 +46,13 @@ loop In languages that don’t have iterators provided by their standard libraries, we would likely write this same functionality by starting a variable at index 0, using that variable to index into the vector to get a value, and incrementing -the variable value in a loop until its value gets up to the total number of -items in the vector. Iterators take care of all of that logic for us, which -cuts down on the repetitive code we would have to write and potentially mess up. -In addition, the way iterators are implemented gives us more flexibility to -use the same logic with many different kinds of sequences, not just data -structures that we can index into like vectors. Let’s see how iterators do that. +the variable value in a loop until it gets to the total number of items in the +vector. + +Iterators take care of all of that logic for us, cutting down on repetitive +code we could potentially mess up. Iterators give us more flexibility to use +the same logic with many different kinds of sequences, not just data structures +we can index into like vectors. Let’s see how iterators do that. ### The `Iterator` trait and the `next` method @@ -79,17 +72,18 @@ trait Iterator { You’ll notice some new syntax that we haven’t covered yet: `type Item` and `Self::Item`, which are defining an *associated type* with this trait. We’ll talk about associated types in depth in Chapter 19, but for now, all you need -to know is that this code says implementing `Iterator` trait requires that you -also define an `Item` type, and this `Item` type is used in the return type of -the `next` method. In other words, the `Item` type will be the type of element -that’s returned from the iterator. - -The `next` method is the only method that the `Iterator` trait requires -implementers of the trait to define. `next` returns one item of the iterator -at a time wrapped in `Some`, and when iteration is over, it returns `None`. -We can call the `next` method on iterators directly if we’d like; Listing 13-15 -has a test that demonstrates the values we’d get on repeated calls to `next` -on the iterator created from the vector: +to know is that this code says implementing the `Iterator` trait requires that +you also define an `Item` type, and this `Item` type is used in the return type +of the `next` method. In other words, the `Item` type will be the type returned +from the iterator. + +The `Iterator` trait only requires implementors to define one method: the +`next` method, which returns one item of the iterator at a time wrapped in +`Some` and, when iteration is over, it returns `None`. + +We can call the `next` method on iterators directly; Listing 13-15 demonstrates +what values are returned from repeated calls to `next` on the iterator created +from the vector: Filename: src/lib.rs @@ -111,29 +105,21 @@ fn iterator_demonstration() { iterator Note that we needed to make `v1_iter` mutable: calling the `next` method on an -iterator changes the iterator’s state that keeps track of where it is in the -sequence. Put another way, this code *consumes*, or uses up, the iterator. Each -call to `next` eats up an item from the iterator. We didn’t need to make -`v1_iter` mutable when we used a `for` loop because the `for` loop took -ownership of `v1_iter` and made `v1_iter` mutable behind the scenes. +iterator changes state that keeps track of where it is in the sequence. Put +another way, this code *consumes*, or uses up, the iterator. Each call to +`next` eats up an item from the iterator. We didn’t need to make `v1_iter` +mutable when we used a `for` loop because the loop took ownership of `v1_iter` +and made it mutable behind the scenes. Also note that the values we get from the calls to `next` are immutable references to the values in the vector. The `iter` method produces an iterator -over immutable references. If we wanted to create an iterator that takes +over immutable references. If we want to create an iterator that takes ownership of `v1` and returns owned values, we can call `into_iter` instead of `iter`. Similarly, if we want to iterate over mutable references, we can call `iter_mut` instead of `iter`. ### Methods in the `Iterator` Trait that Consume the Iterator - - - - The `Iterator` trait has a number of different methods with default implementations provided for us by the standard library; you can find out all about these methods by looking in the standard library API documentation for @@ -141,25 +127,12 @@ the `Iterator` trait. Some of these methods call the `next` method in their definition, which is why we’re required to implement the `next` method when implementing the `Iterator` trait. - - - -The methods that call the `next` method are called *consuming adaptors*, since -calling them uses up the iterator. An example of a consuming adaptor is the -`sum` method. This method takes ownership of the iterator and iterates through -the items by repeatedly calling `next`, thus consuming the iterator. As it -iterates through each item, it adds each item to a running total and returns -the total when iteration has completed. Listing 13-16 has a test illustrating a -use of the `sum` method: +Methods that call `next` are called *consuming adaptors*, because calling them +uses up the iterator. One example is the `sum` method, which takes ownership of +the iterator and iterates through the items by repeatedly calling `next`, thus +consuming the iterator. As it iterates through, it adds each item to a running +total and returns the total when iteration is complete. Listing 13-16 has a +test illustrating a use of the `sum` method: Filename: src/lib.rs @@ -184,14 +157,16 @@ ownership of the iterator we call it on. ### Methods in the `Iterator` Trait that Produce Other Iterators -Another kind of method defined on the `Iterator` trait are methods that produce -other iterators. These methods are called *iterator adaptors* and allow us to -change iterators into different kind of iterators. We can chain multiple calls -to iterator adaptors. Because all iterators are lazy, however, we have to -call one of the consuming adaptor methods in order to get results from calls -to iterator adaptors. Listing 13-17 shows an example of calling the iterator -adaptor method `map`, which takes a closure that `map` will call on each -item in order to produce a new iterator in which each item from the vector has +Other methods defined on the `Iterator` trait, known as *iterator adaptors*, +allow us to change iterators into different kind of iterators. We can chain +multiple calls to iterator adaptors to perform complex actions in a readable +way. Because all iterators are lazy, however, we have to call one of the +consuming adaptor methods in order to get results from calls to iterator +adaptors. + +Listing 13-17 shows an example of calling the iterator adaptor method `map` +which takes a closure to call on each item in order to produce a new iterator. +The closure here creates a new iterator in which each item from the vector has been incremented by 1. This code produces a warning, though: Filename: src/main.rs @@ -220,14 +195,15 @@ nothing unless consumed The code in Listing 13-17 isn’t actually doing anything; the closure we’ve specified never gets called. The warning reminds us why: iterator adaptors are -lazy, and we probably meant to consume the iterator here. +lazy, and we need to consume the iterator here. + +To fix this and consume the iterator, we’re going to use the `collect` method, +which we saw briefly in Chapter 12. This method consumes the iterator and +collects the resulting values into a collection data type. -In order to fix this warning and consume the iterator to get a useful result, -we’re going to use the `collect` method, which we saw briefly in Chapter 12. -This method consumes the iterator and collects the resulting values into a -data structure. In Listing 13-18, we’re going to collect the results of -iterating over the iterator returned from the call to `map` into a vector that -will contain each item from the original vector incremented by 1: +In Listing 13-18, we collect the results of iterating over the iterator that’s +returned from the call to `map` into a vector. This vector will end up +containing each item from the original vector incremented by 1: Filename: src/main.rs @@ -243,24 +219,10 @@ assert_eq!(v2, vec![2, 3, 4]); iterator, then calling the `collect` method to consume the new iterator and create a vector -Because `map` takes a closure, we can specify any operation that we want to -perform on each item that we iterate over. This is a great example of how using -closures lets us customize some behavior while reusing the iteration behavior -that the `Iterator` trait provides. - - - - +Because `map` takes a closure, we can specify any operation we want to perform +on each item. This is a great example of how closures let us customize some +behavior while reusing the iteration behavior that the `Iterator` trait +provides. ### Using Closures that Capture their Environment with Iterators @@ -269,21 +231,22 @@ closures that capture their environment by using the `filter` iterator adapter. The `filter` method on an iterator takes a closure that takes each item from the iterator and returns a boolean. If the closure returns `true`, the value will be included in the iterator produced by `filter`. If the closure returns -`false`, the value won’t be included in the resulting iterator. Listing 13-19 -demonstrates using `filter` with a closure that captures the `shoe_size` -variable from its environment in order to iterate over a collection of `Shoe` -struct instances in order to return only shoes that are the specified size: +`false`, the value won’t be included in the resulting iterator. + +In Listing 13-19 we use `filter` with a closure that captures the `shoe_size` +variable from its environment, in order to iterate over a collection of `Shoe` +struct instances. It will return only shoes that are the specified size: Filename: src/lib.rs ```rust,test_harness #[derive(PartialEq, Debug)] struct Shoe { - size: i32, + size: u32, style: String, } -fn shoes_in_my_size(shoes: Vec, shoe_size: i32) -> Vec { +fn shoes_in_my_size(shoes: Vec, shoe_size: u32) -> Vec { shoes.into_iter() .filter(|s| s.size == shoe_size) .collect() @@ -312,47 +275,37 @@ fn filters_by_size() { Listing 13-19: Using the `filter` method with a closure that captures `shoe_size` - - The `shoes_in_my_size` function takes ownership of a vector of shoes and a shoe size as parameters. It returns a vector containing only shoes of the specified -size. In the body of `shoes_in_my_size`, we call `into_iter` to create an -iterator that takes ownership of the vector. Then we call `filter` to adapt -that iterator into a new iterator that only contains elements for which the -closure returns `true`. The closure we’ve specified captures the `shoe_size` -parameter from the environment and uses the value to compare with each shoe’s -size to only keep shoes that are of the size specified. Finally, calling -`collect` gathers the values returned by the adapted iterator into a vector -that the function returns. +size. + +In the body of `shoes_in_my_size`, we call `into_iter` to create an iterator +that takes ownership of the vector. Then we call `filter` to adapt that +iterator into a new iterator that only contains elements for which the closure +returns `true`. + +The closure captures the `shoe_size` parameter from the environment and +compares the value with each shoe’s size, keeping only shoes of the size +specified. Finally, calling `collect` gathers the values returned by the +adapted iterator into a vector that’s returned by the function. The test shows that when we call `shoes_in_my_size`, we only get back shoes that have the same size as the value we specified. ### Implementing the `Iterator` Trait to Create Our Own Iterators - - - We’ve shown that we can create an iterator by calling `iter`, `into_iter`, or -`iter_mut` on a vector. We can also create iterators from the other collection -types in the standard library, such as hash map. Additionally, we can implement -the `Iterator` trait in order to create iterators that do anything we want. -As previously mentioned, the only method we’re required to provide a definition -for is the `next` method. Once we’ve done that, we can use all the other -methods that have default implementations provided by the `Iterator` trait on -our iterator! - - - -The iterator we’re going to create is one that will only ever count from 1 -to 5. First, we’ll create a struct to hold on to some values, and then we’ll -make this struct into an iterator by implementing the `Iterator` trait and use -the values in that implementation. +`iter_mut` on a vector. We can create iterators from the other collection types +in the standard library, such as hash map. We can also create iterators that do +anything we want by implementing the `Iterator` trait on our own types. As +previously mentioned, the only method we’re required to provide a definition +for is the `next` method. Once we’ve done that, we can use all other methods +that have default implementations provided by the `Iterator` trait! + +To demonstrate, let’s create an iterator that will only ever count from 1 to 5. +First, we’ll create a struct to hold some values, and then we’ll make this +struct into an iterator by implementing the `Iterator` trait and use the values +in that implementation. Listing 13-20 has the definition of the `Counter` struct and an associated `new` function to create instances of `Counter`: @@ -375,26 +328,14 @@ impl Counter { function that creates instances of `Counter` with an initial value of 0 for `count` - - - -The `Counter` struct has one field named `count`. This field holds a `u32` -value that will keep track of where we are in the process of iterating from 1 -to 5. The `count` field is private since we want the implementation of -`Counter` to manage its value. The `new` function enforces the behavior we want -of always starting new instances with a value of 0 in the `count` field. - - - - +The `Counter` struct has one field named `count`. This holds a `u32` value that +will keep track of where we are in the process of iterating from 1 to 5. The +`count` field is private since we want the implementation of `Counter` to +manage its value. The `new` function enforces the behavior of always starting +new instances with a value of 0 in the `count` field. Next, we’re going to implement the `Iterator` trait for our `Counter` type by -defining the body of the `next` method to specify what we want to happen when +defining the body of the `next` method, to specify what we want to happen when this iterator is used, as shown in Listing 13-21: Filename: src/lib.rs @@ -422,22 +363,21 @@ impl Iterator for Counter { Listing 13-21: Implementing the `Iterator` trait on our `Counter` struct - - We set the associated `Item` type for our iterator to `u32`, meaning the iterator will return `u32` values. Again, don’t worry about associated types -yet, we’ll be covering them in Chapter 19. We want our iterator to add one to -the current state, which is why we initialized `count` to 0: we want our -iterator to return one first. If the value of `count` is less than six, `next` -will return the current value wrapped in `Some`, but if `count` is six or -higher, our iterator will return `None`. +yet, we’ll be covering them in Chapter 19. + +We want our iterator to add one to the current state, so we initialized `count` +to 0 so it would return one first. If the value of `count` is less than six, +`next` will return the current value wrapped in `Some`, but if `count` is six +or higher, our iterator will return `None`. #### Using Our `Counter` Iterator’s `next` Method Once we’ve implemented the `Iterator` trait, we have an iterator! Listing 13-22 -shows a test demonstrating that we can use the iterator functionality our -`Counter` struct now has by calling the `next` method on it directly, just like -we did with the iterator created from a vector in Listing 13-15: +shows a test demonstrating that we can use the iterator functionality of our +`Counter` struct by calling the `next` method on it directly, just like we did +with the iterator created from a vector in Listing 13-15: Filename: src/lib.rs @@ -478,41 +418,19 @@ method implementation This test creates a new `Counter` instance in the `counter` variable and then calls `next` repeatedly, verifying that we have implemented the behavior we -want this iterator to have of returning the values from 1 to 5. - - - +want this iterator to have: returning the values from 1 to 5. #### Using Other `Iterator` Trait Methods on Our Iterator Because we implemented the `Iterator` trait by defining the `next` method, we -can now use any `Iterator` trait method’s default implementations that the -standard library has defined, since they all use the `next` method’s -functionality. - - - - - - - -For example, if for some reason we wanted to take the values that an instance -of `Counter` produces, pair those values with values produced by another -`Counter` instance after skipping the first value that instance produces, -multiply each pair together, keep only those results that are divisible by -three, and add all the resulting values together, we could do so as shown in -the test in Listing 13-23: +can now use any `Iterator` trait method’s default implementations as defined in +the standard library, since they all use the `next` method’s functionality. + +For example, if for some reason we wanted to take the values produced by an +instance of `Counter`, pair them with values produced by another `Counter` +instance after skipping the first value, multiply each pair together, keep only +those results that are divisible by three, and add all the resulting values +together, we could do so as shown in the test in Listing 13-23: Filename: src/lib.rs @@ -561,6 +479,6 @@ Note that `zip` produces only four pairs; the theoretical fifth pair `(5, None)` is never produced because `zip` returns `None` when either of its input iterators return `None`. -All of these method calls are possible because we implemented the `Iterator` -trait by specifying how the `next` method works and the standard library -provides default implementations for other methods that call `next`. +All of these method calls are possible because we specified how the `next` +method works, and the standard library provides default implementations for +other methods that call `next`. diff --git a/src/doc/book/second-edition/src/ch13-03-improving-our-io-project.md b/src/doc/book/second-edition/src/ch13-03-improving-our-io-project.md index 126ed98d14..91d42fdafd 100644 --- a/src/doc/book/second-edition/src/ch13-03-improving-our-io-project.md +++ b/src/doc/book/second-edition/src/ch13-03-improving-our-io-project.md @@ -1,6 +1,6 @@ ## Improving our I/O Project -We can improve our implementation of the I/O project in Chapter 12 by using +With this new knowledge, we can improve the I/O project in Chapter 12 by using iterators to make places in the code clearer and more concise. Let’s take a look at how iterators can improve our implementation of both the `Config::new` function and the `search` function. @@ -9,7 +9,7 @@ function and the `search` function. In Listing 12-6, we added code that took a slice of `String` values and created an instance of the `Config` struct by indexing into the slice and cloning the -values so that the `Config` struct could own those values. We’ve reproduced the +values, allowing the `Config` struct to own those values. We’ve reproduced the implementation of the `Config::new` function as it was at the end of Chapter 12 in Listing 13-24: @@ -35,42 +35,32 @@ impl Config { Listing 13-24: Reproduction of the `Config::new` function from the end of Chapter 12 - - - At the time, we said not to worry about the inefficient `clone` calls here because we would remove them in the future. Well, that time is now! -The reason we needed `clone` here in the first place is that we have a slice -with `String` elements in the parameter `args`, but the `new` function does not -own `args`. In order to be able to return ownership of a `Config` instance, we -need to clone the values that we put in the `query` and `filename` fields of -`Config`, so that the `Config` instance can own its values. +We needed `clone` here because we have a slice with `String` elements in the +parameter `args`, but the `new` function doesn’t own `args`. In order to be +able to return ownership of a `Config` instance, we had to clone the values +from the `query` and `filename` fields of `Config`, so that the `Config` +instance can own its values. With our new knowledge about iterators, we can change the `new` function to take ownership of an iterator as its argument instead of borrowing a slice. -We’ll use the iterator functionality instead of the code we had that checks the -length of the slice and indexes into specific locations. This will clear up -what the `Config::new` function is doing since the iterator will take care of -accessing the values. - - - +We’ll use the iterator functionality instead of the code that checks the length +of the slice and indexes into specific locations. This will clear up what the +`Config::new` function is doing since the iterator will take care of accessing +the values. -Once `Config::new` taking ownership of the iterator and not using indexing +Once `Config::new` takes ownership of the iterator and stops using indexing operations that borrow, we can move the `String` values from the iterator into `Config` rather than calling `clone` and making a new allocation. - - - #### Using the Iterator Returned by `env::args` Directly -In your I/O project’s *src/main.rs*, let’s change the start of the `main` -function from this code that we had at the end of Chapter 12: +Open your I/O project’s *src/main.rs*, and we’ll change the start of the `main` +function that we had at the end of Chapter 12: + +Filename: src/main.rs ```rust,ignore fn main() { @@ -103,14 +93,6 @@ fn main() { Listing 13-25: Passing the return value of `env::args` to `Config::new` - - - The `env::args` function returns an iterator! Rather than collecting the iterator values into a vector and then passing a slice to `Config::new`, now we’re passing ownership of the iterator returned from `env::args` to @@ -120,14 +102,11 @@ Next, we need to update the definition of `Config::new`. In your I/O project’s *src/lib.rs*, let’s change the signature of `Config::new` to look like Listing 13-26: - - - Filename: src/lib.rs ```rust,ignore impl Config { - pub fn new(args: std::env::Args) -> Result { + pub fn new(mut args: std::env::Args) -> Result { // ...snip... ``` @@ -137,14 +116,17 @@ expect an iterator The standard library documentation for the `env::args` function shows that the type of the iterator it returns is `std::env::Args`. We’ve updated the signature of the `Config::new` function so that the parameter `args` has the -type `std::env::Args` instead of `&[String]`. +type `std::env::Args` instead of `&[String]`. Because we’re taking ownership of +`args`, and we’re going to be mutating `args` by iterating over it, we can add +the `mut` keyword into the specification of the `args` parameter to make it +mutable. #### Using `Iterator` Trait Methods Instead of Indexing Next, we’ll fix the body of `Config::new`. The standard library documentation also mentions that `std::env::Args` implements the `Iterator` trait, so we know -we can call the `next` method on it! Listing 13-27 has updated the code -from Listing 12-23 to use the `next` method: +we can call the `next` method on it! Listing 13-27 has updated the code from +Listing 12-23 to use the `next` method: Filename: src/lib.rs @@ -159,7 +141,7 @@ from Listing 12-23 to use the `next` method: # impl Config { pub fn new(mut args: std::env::Args) -> Result { - args.next(); + args.next(); let query = match args.next() { Some(arg) => arg, @@ -173,9 +155,7 @@ impl Config { let case_sensitive = env::var("CASE_INSENSITIVE").is_err(); - Ok(Config { - query, filename, case_sensitive - }) + Ok(Config { query, filename, case_sensitive }) } } ``` @@ -183,12 +163,6 @@ impl Config { Listing 13-27: Changing the body of `Config::new` to use iterator methods - - - Remember that the first value in the return value of `env::args` is the name of the program. We want to ignore that and get to the next value, so first we call `next` and do nothing with the return value. Second, we call `next` on the @@ -197,12 +171,6 @@ value we want to put in the `query` field of `Config`. If `next` returns a not enough arguments were given and we return early with an `Err` value. We do the same thing for the `filename` value. - - - ### Making Code Clearer with Iterator Adaptors The other place in our I/O project we could take advantage of iterators is in @@ -228,17 +196,14 @@ pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { Listing 13-28: The implementation of the `search` function from Chapter 12 -We can write this code in a much shorter way by using iterator adaptor methods -instead. This also lets us avoid having a mutable intermediate `results` +We can write this code in a much more concise way using iterator adaptor +methods. This also lets us avoid having a mutable intermediate `results` vector. The functional programming style prefers to minimize the amount of mutable state to make code clearer. Removing the mutable state might make it easier for us to make a future enhancement to make searching happen in parallel, since we wouldn’t have to manage concurrent access to the `results` vector. Listing 13-29 shows this change: - - - Filename: src/lib.rs ```rust,ignore @@ -253,23 +218,18 @@ pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { implementation of the `search` function Recall that the purpose of the `search` function is to return all lines in -`contents` that contain the `query`. Similarly to the `filter` example in -Listing 13-19, we can use the `filter` adaptor to keep only the lines that +`contents` that contain the `query`. Similar to the `filter` example in Listing +13-19, we can use the `filter` adaptor to keep only the lines that `line.contains(query)` returns true for. We then collect the matching lines up into another vector with `collect`. Much simpler! Feel free to make the same change to use iterator methods in the `search_case_insensitive` function as well. - - - -The next logical question is which style you should choose in your own code: -the original implementation in Listing 13-28, or the version using iterators in -Listing 13-29. Most Rust programmers prefer to use the iterator style. It’s a -bit tougher to get the hang of at first, but once you get a feel for the -various iterator adaptors and what they do, iterators can be easier to +The next logical question is which style you should choose in your own code and +why: the original implementation in Listing 13-28, or the version using +iterators in Listing 13-29. Most Rust programmers prefer to use the iterator +style. It’s a bit tougher to get the hang of at first, but once you get a feel +for the various iterator adaptors and what they do, iterators can be easier to understand. Instead of fiddling with the various bits of looping and building new vectors, the code focuses on the high-level objective of the loop. This abstracts away some of the commonplace code so that it’s easier to see the diff --git a/src/doc/book/second-edition/src/ch13-04-performance.md b/src/doc/book/second-edition/src/ch13-04-performance.md index 11a0c46570..c7a1efe01e 100644 --- a/src/doc/book/second-edition/src/ch13-04-performance.md +++ b/src/doc/book/second-edition/src/ch13-04-performance.md @@ -17,14 +17,16 @@ test bench_search_iter ... bench: 19,234,900 ns/iter (+/- 657,200) The iterator version ended up slightly faster! We’re not going to go through the benchmark code here, as the point is not to prove that they’re exactly equivalent, but to get a general sense of how these two implementations compare -performance-wise. For a more comprehensive benchmark, you’d want to check -various texts of various sizes, different words, words of different lengths, -and all kinds of other variations. The point is this: iterators, while a -high-level abstraction, get compiled down to roughly the same code as if you’d -written the lower-level code yourself. Iterators are one of Rust’s *zero-cost -abstractions*, by which we mean using the abstraction imposes no additional -runtime overhead in the same way that Bjarne Stroustrup, the original designer -and implementer of C++, defines *zero-overhead*: +performance-wise. + +For a more comprehensive benchmark, you’d want to check various texts of +various sizes, different words, words of different lengths, and all kinds of +other variations. The point is this: iterators, while a high-level abstraction, +get compiled down to roughly the same code as if you’d written the lower-level +code yourself. Iterators are one of Rust’s *zero-cost* *abstractions*, by which +we mean using the abstraction imposes no additional runtime overhead, in the +same way that Bjarne Stroustrup, the original designer and implementor of C++, +defines *zero-overhead*: > In general, C++ implementations obey the zero-overhead principle: What you > don’t use, you don’t pay for. And further: What you do use, you couldn’t hand @@ -32,21 +34,10 @@ and implementer of C++, defines *zero-overhead*: > > - Bjarne Stroustrup “Foundations of C++” - - - As another example, here is some code taken from an audio decoder. The decoding algorithm uses the linear prediction mathematical operation to estimate future values based on a linear function of the previous samples. - - - This code uses an iterator chain to do some math on three variables in scope: a `buffer` slice of data, an array of 12 `coefficients`, and an amount by which to shift data in `qlp_shift`. We’ve declared the variables within this example @@ -81,13 +72,10 @@ consuming the value. What assembly code would this Rust code compile to? Well, as of this writing, it compiles down to the same assembly you’d write by hand. There’s no loop at all corresponding to the iteration over the values in `coefficients`: Rust knows that there are twelve iterations, so it “unrolls” -the loop. Unrolling is an optimization that removes the overhead of the loop +the loop. *Unrolling* is an optimization that removes the overhead of the loop controlling code and instead generates repetitive code for each iteration of the loop. - - - All of the coefficients get stored in registers, which means it’s very fast to access the values. There are no bounds checks on the array access at runtime. All these optimizations Rust is able to apply make the resulting code extremely @@ -105,15 +93,6 @@ ideas, at low level performance. The implementations of closures and iterators are such that runtime performance is not affected. This is part of Rust’s goal to strive to provide zero-cost abstractions. - - - Now that we’ve improved the expressiveness of our I/O project, let’s look at some more features of `cargo` that would help us get ready to share the project with the world. diff --git a/src/doc/book/second-edition/src/ch14-00-more-about-cargo.md b/src/doc/book/second-edition/src/ch14-00-more-about-cargo.md index 192ea7f4f5..3a6da6ace2 100644 --- a/src/doc/book/second-edition/src/ch14-00-more-about-cargo.md +++ b/src/doc/book/second-edition/src/ch14-00-more-about-cargo.md @@ -11,7 +11,4 @@ advanced features to show you how to: * Extend Cargo with your own custom commands Cargo can do even more than what we can cover in this chapter too, so for a -full explanation, see [its documentation](http://doc.rust-lang.org/cargo/). - - - +full explanation, see [its documentation](https://doc.rust-lang.org/cargo/). diff --git a/src/doc/book/second-edition/src/ch14-01-release-profiles.md b/src/doc/book/second-edition/src/ch14-01-release-profiles.md index 359fab0b93..e269cf517f 100644 --- a/src/doc/book/second-edition/src/ch14-01-release-profiles.md +++ b/src/doc/book/second-edition/src/ch14-01-release-profiles.md @@ -5,37 +5,14 @@ different configurations, to allow the programmer more control over various options for compiling your code. Each profile is configured independently of the others. - - - -Cargo has four profiles defined with good default configurations for each use -case. Cargo uses the different profiles based on which command you’re running. -The commands correspond to the profiles as shown in Table 14-1: - - - - -| Command | Profile | -|-------------------------|-----------| -| `cargo build` | `dev` | -| `cargo build --release` | `release` | -| `cargo test` | `test` | -| `cargo doc` | `doc` | - -Table 14-1: Which profile is used when you run different -Cargo commands - -This may be familiar from the output of your builds, which shows the profile -used in the build: - - - +Cargo has two main profiles you should know about: the `dev` profile Cargo uses +when you run `cargo build`, and the `release` profile Cargo uses when you run +`cargo build --release`. The `dev` profile is defined with good defaults for +developing, and likewise the `release` profile has good defaults for release +builds. + +These names may be familiar from the output of your builds, which shows the +profile used in the build: ```text $ cargo build @@ -44,31 +21,19 @@ $ cargo build --release Finished release [optimized] target(s) in 0.0 secs ``` -The “dev” and “release” notifications here indicate that the compiler is -using different profiles. - - - +The “dev” and “release” notifications here indicate that the compiler is using +different profiles. ### Customizing Release Profiles - - - Cargo has default settings for each of the profiles that apply when there aren’t any `[profile.*]` sections in the project’s *Cargo.toml* file. By adding `[profile.*]` sections for any profile we want to customize, we can choose to override any subset of the default settings. For example, here are the default values for the `opt-level` setting for the `dev` and `release` profiles: +Filename: Cargo.toml + ```toml [profile.dev] opt-level = 0 @@ -92,15 +57,6 @@ them in *Cargo.toml*. If we wanted to use optimization level 1 in the development profile, for example, we can add these two lines to our project’s *Cargo.toml*: - - - Filename: Cargo.toml ```toml @@ -114,4 +70,4 @@ will use the defaults for the `dev` profile plus our customization to optimizations than the default, but not as many as a release build. For the full list of configuration options and defaults for each profile, see -[Cargo’s documentation](http://doc.rust-lang.org/cargo/). +[Cargo’s documentation](https://doc.rust-lang.org/cargo/). diff --git a/src/doc/book/second-edition/src/ch14-02-publishing-to-crates-io.md b/src/doc/book/second-edition/src/ch14-02-publishing-to-crates-io.md index 838bccaee5..b67c086ebb 100644 --- a/src/doc/book/second-edition/src/ch14-02-publishing-to-crates-io.md +++ b/src/doc/book/second-edition/src/ch14-02-publishing-to-crates-io.md @@ -20,13 +20,9 @@ contents of documentation comments for public API items, intended for programmers interested in knowing how to *use* your crate, as opposed to how your crate is *implemented*. - - - Documentation comments use `///` instead of `//` and support Markdown notation for formatting the text if you’d like. You place documentation comments just -before the item they are documenting. Listing 14-2 shows documentation comments +before the item they are documenting. Listing 14-1 shows documentation comments for an `add_one` function in a crate named `my_crate`: Filename: src/lib.rs @@ -46,11 +42,8 @@ pub fn add_one(x: i32) -> i32 { } ``` -Listing 14-2: A documentation comment for a function - - - +Listing 14-1: A documentation comment for a +function Here, we give a description of what the `add_one` function does, then start a section with the heading “Examples”, and code that demonstrates how to use the @@ -63,37 +56,29 @@ For convenience, running `cargo doc --open` will build the HTML for your current crate’s documentation (as well as the documentation for all of your crate’s dependencies) and open the result in a web browser. Navigate to the `add_one` function and you’ll see how the text in the documentation comments -gets rendered, shown here in Figure 14-3: +gets rendered, shown here in Figure 14-2: Rendered HTML documentation for the `add_one` function of `my_crate` -Figure 14-3: HTML documentation for the `add_one` +Figure 14-2: HTML documentation for the `add_one` function - - - #### Commonly Used Sections -We used the `# Examples` markdown heading in Listing 14-2 to create a section +We used the `# Examples` markdown heading in Listing 14-1 to create a section in the HTML with the title “Examples”. Some other sections that crate authors commonly use in their documentation include: -- Panics: The scenarios in which this function could `panic!`. Callers of this - function who don’t want their programs to panic should make sure that they - don’t call this function in these situations. -- Errors: If this function returns a `Result`, describing the kinds of errors - that might occur and what conditions might cause those errors to be returned - can be helpful to callers so that they can write code to handle the different - kinds of errors in different ways. -- Safety: If this function uses `unsafe` code (which we will discuss in Chapter - 19), there should be a section covering the invariants that this function - expects callers to uphold in order for the code in `unsafe` blocks to - function correctly. +* **Panics**: The scenarios in which this function could `panic!`. Callers of + this function who don’t want their programs to panic should make sure that + they don’t call this function in these situations. +* **Errors**: If this function returns a `Result`, describing the kinds of + errors that might occur and what conditions might cause those errors to be + returned can be helpful to callers so that they can write code to handle the + different kinds of errors in different ways. +* **Safety**: If this function is `unsafe` to call (we will discuss unsafety in + Chapter 19), there should be a section explaining why the function is unsafe + and covering the invariants that this function expects callers to uphold. Most documentation comment sections don’t need all of these sections, but this is a good list to check to remind you of the kinds of things that people @@ -107,7 +92,7 @@ running `cargo test` will run the code examples in your documentation as tests! Nothing is better than documentation with examples. Nothing is worse than examples that don’t actually work because the code has changed since the documentation has been written. Try running `cargo test` with the documentation -for the `add_one` function like in Listing 14-2; you should see a section in +for the `add_one` function like in Listing 14-1; you should see a section in the test results like this: ```text @@ -125,21 +110,16 @@ tests catch that the example and the code are out of sync from one another! #### Commenting Contained Items - - - There’s another style of doc comment, `//!`, that adds documentation to the item that contains the comments, rather than adding documentation to the items following the comments. These are typically used inside the crate root file -(*src/lib.rs*) or inside a module’s root (*mod.rs*) to document the crate or -the module as a whole. +(*src/lib.rs* by convention) or inside a module to document the crate or the +module as a whole. For example, if we wanted to add documentation that described the purpose of the `my_crate` crate that contains the `add_one` function, we can add documentation comments that start with `//!` to the beginning of *src/lib.rs* -as shown in Listing 14-4: +as shown in Listing 14-3: Filename: src/lib.rs @@ -153,7 +133,7 @@ as shown in Listing 14-4: // ...snip... ``` -Listing 14-4: Documentation for the `my_crate` crate as a +Listing 14-3: Documentation for the `my_crate` crate as a whole Notice there isn’t any code after the last line that begins with `//!`. Because @@ -164,22 +144,18 @@ is the crate root. These comments describe the entire crate. If we run `cargo doc --open`, we’ll see these comments displayed on the front page of the documentation for `my_crate` above the list of public items in the -crate, as shown in Figure 14-5: +crate, as shown in Figure 14-4: Rendered HTML documentation with a comment for the crate as a whole -Figure 14-5: Rendered documentation for `my_crate` +Figure 14-4: Rendered documentation for `my_crate` including the comment describing the crate as a whole - - - Documentation comments within items are useful for describing crates and modules especially. Use them to talk about the purpose of the container overall to help users of your crate understand your organization. -### Exporting a Convenient Public API with `pub use` +#### Exporting a Convenient Public API with `pub use` In Chapter 7, we covered how to organize our code into modules with the `mod` keyword, how to make items public with the `pub` keyword, and how to bring @@ -192,11 +168,6 @@ also be annoyed at having to type `use my_crate::some_module::another_module::UsefulType;` rather than `use my_crate::UsefulType;`. - - - The structure of your public API is a major consideration when publishing a crate. People who use your crate are less familiar with the structure than you are, and might have trouble finding the pieces they want to use if the module @@ -209,13 +180,10 @@ to your private structure, using `pub use`. Re-exporting takes a public item in one location and makes it public in another location as if it was defined in the other location instead. - - - For example, say we made a library named `art` for modeling artistic concepts. Within this library is a `kinds` module containing two enums named `PrimaryColor` and `SecondaryColor` and a `utils` module containing a function -named `mix` as shown in Listing 14-6: +named `mix` as shown in Listing 14-5: Filename: src/lib.rs @@ -247,20 +215,19 @@ pub mod utils { /// a secondary color. pub fn mix(c1: PrimaryColor, c2: PrimaryColor) -> SecondaryColor { // ...snip... -# SecondaryColor::Green } } ``` -Listing 14-6: An `art` library with items organized into +Listing 14-5: An `art` library with items organized into `kinds` and `utils` modules The front page of the documentation for this crate generated by `cargo doc` -would look like Figure 14-7: +would look like Figure 14-6: Rendered documentation for the `art` crate that lists the `kinds` and `utils` modules -Figure 14-7: Front page of the documentation for `art` +Figure 14-6: Front page of the documentation for `art` that lists the `kinds` and `utils` modules Note that the `PrimaryColor` and `SecondaryColor` types aren’t listed on the @@ -269,7 +236,7 @@ in order to see them. Another crate depending on this library would need `use` statements that import the items from `art` including specifying the module structure that’s currently -defined. Listing 14-8 shows an example of a crate that uses the `PrimaryColor` +defined. Listing 14-7 shows an example of a crate that uses the `PrimaryColor` and `mix` items from the `art` crate: Filename: src/main.rs @@ -287,16 +254,10 @@ fn main() { } ``` -Listing 14-8: A crate using the `art` crate’s items with +Listing 14-7: A crate using the `art` crate’s items with its internal structure exported - - - -The author of the code in Listing 14-8 that uses the `art` crate had to figure +The author of the code in Listing 14-7 that uses the `art` crate had to figure out that `PrimaryColor` is in the `kinds` module and `mix` is in the `utils` module. The module structure of the `art` crate is more relevant to developers working on the `art` crate than developers using the `art` crate. The internal @@ -307,8 +268,8 @@ confusion in having to figure out where to look and inconvenience in having to specify the module names in the `use` statements. To remove the internal organization from the public API, we can take the `art` -crate code from Listing 14-6 and add `pub use` statements to re-export the -items at the top level, as shown in Listing 14-9: +crate code from Listing 14-5 and add `pub use` statements to re-export the +items at the top level, as shown in Listing 14-8: Filename: src/lib.rs @@ -330,23 +291,21 @@ pub mod utils { } ``` -Listing 14-9: Adding `pub use` statements to re-export +Listing 14-8: Adding `pub use` statements to re-export items - - The API documentation generated with `cargo doc` for this crate will now list -and link re-exports on the front page as shown in Figure 14-10, which makes +and link re-exports on the front page as shown in Figure 14-9, which makes these types easier to find. Rendered documentation for the `art` crate with the re-exports on the front page -Figure 14-10: Front page of the documentation for `art` +Figure 14-9: Front page of the documentation for `art` that lists the re-exports Users of the `art` crate can still see and choose to use the internal structure -as in Listing 14-8, or they can use the more convenient structure from Listing -14-9, as shown in Listing 14-11: +as in Listing 14-7, or they can use the more convenient structure from Listing +14-8, as shown in Listing 14-10: Filename: src/main.rs @@ -361,11 +320,9 @@ fn main() { } ``` -Listing 14-11: A program using the re-exported items from +Listing 14-10: A program using the re-exported items from the `art` crate - - In cases where there are many nested modules, re-exporting the types at the top level with `pub use` can make a big difference in the experience of people who use the crate. @@ -381,7 +338,7 @@ structure differs from their public API. Before you can publish any crates, you need to create an account on crates.io and get an API token. To do so, visit the home page at *https://crates.io* and -log in via a GitHub account---the GitHub account is a requirement for now, but +log in via a GitHub account—the GitHub account is a requirement for now, but the site may support other ways of creating an account in the future. Once you’re logged in, visit your account settings at *https://crates.io/me* and retrieve your API key. Then run the `cargo login` command with your API key, @@ -392,7 +349,7 @@ $ cargo login abcdefghijklmnopqrstuvwxyz012345 ``` This command will inform Cargo of your API token and store it locally in -*~/.cargo/credentials*. Note that this token is a **secret** and should not be +*~/.cargo/credentials*. Note that this token is a *secret* and should not be shared with anyone else. If it is shared with anyone for any reason, you should revoke it and generate a new token on Crates.io. @@ -402,9 +359,6 @@ Now you have an account, and let’s say you already have a crate you want to publish. Before publishing, you’ll need to add some metadata to your crate by adding it to the `[package]` section of the crate’s *Cargo.toml*. - - - Your crate will first need a unique name. While you’re working on a crate locally, you may name a crate whatever you’d like. However, crate names on Crates.io are allocated on a first-come-first-serve basis. Once a crate name is @@ -413,6 +367,8 @@ you’d like to use on the site to find out if it has been taken. If it hasn’t edit the name in *Cargo.toml* under `[package]` to have the name you want to use for publishing like so: +Filename: Cargo.toml + ```toml [package] name = "guessing_game" @@ -442,16 +398,14 @@ Package Data Exchange (SPDX) at *http://spdx.org/licenses/* lists the identifiers you can use for this value. For example, to specify that you’ve licensed your crate using the MIT License, add the `MIT` identifier: +Filename: Cargo.toml + ```toml [package] name = "guessing_game" license = "MIT" ``` - - - If you want to use a license that doesn’t appear in the SPDX, you need to place the text of that license in a file, include the file in your project, then use `license-file` to specify the name of that file instead of using the `license` @@ -459,7 +413,7 @@ key. Guidance on which license is right for your project is out of scope for this book. Many people in the Rust community choose to license their projects in the -same way as Rust itself, with a dual license of `MIT/Apache-2.0`---this +same way as Rust itself, with a dual license of `MIT/Apache-2.0`—this demonstrates that you can also specify multiple license identifiers separated by a slash. @@ -467,6 +421,8 @@ So, with a unique name, the version, and author details that `cargo new` added when you created the crate, your description, and the license you chose added, the *Cargo.toml* for a project that’s ready to publish might look like this: +Filename: Cargo.toml + ```toml [package] name = "guessing_game" @@ -478,7 +434,7 @@ license = "MIT/Apache-2.0" [dependencies] ``` -[Cargo’s documentation](http://doc.rust-lang.org/cargo/) describes other +[Cargo’s documentation](https://doc.rust-lang.org/cargo/) describes other metadata you can specify to ensure your crate can be discovered and used more easily! @@ -528,11 +484,11 @@ projects from adding them as a new dependency. This is useful when a version of a crate ends up being broken for one reason or another. For situations such as this, Cargo supports *yanking* a version of a crate. -Yanking a version prevents new projects from starting to depend on that -version while allowing all existing projects that depend on it to continue to -download and depend on that version. Essentially, a yank means that all -projects with a *Cargo.lock* will not break, while any future *Cargo.lock* -files generated will not use the yanked version. +Yanking a version prevents new projects from starting to depend on that version +while allowing all existing projects that depend on it to continue to download +and depend on that version. Essentially, a yank means that all projects with a +*Cargo.lock* will not break, while any future *Cargo.lock* files generated will +not use the yanked version. To yank a version of a crate, run `cargo yank` and specify which version you want to yank: diff --git a/src/doc/book/second-edition/src/ch14-03-cargo-workspaces.md b/src/doc/book/second-edition/src/ch14-03-cargo-workspaces.md index bf39793765..9fbe4283d6 100644 --- a/src/doc/book/second-edition/src/ch14-03-cargo-workspaces.md +++ b/src/doc/book/second-edition/src/ch14-03-cargo-workspaces.md @@ -25,6 +25,8 @@ We need to modify the binary package’s *Cargo.toml* and add a `[workspace]` section to tell Cargo the `adder` package is a workspace. Add this at the bottom of the file: +Filename: Cargo.toml + ```toml [workspace] ``` @@ -33,26 +35,20 @@ Like many Cargo features, workspaces support convention over configuration: we don’t need to add anything more than this to *Cargo.toml* to define our workspace as long as we follow the convention. - - - ### Specifying Workspace Dependencies -The workspace convention says any crates in any subdirectories that the -top-level crate depends on are part of the workspace. Any crate, whether in a -workspace or not, can specify that it has a dependency on a crate in a local -directory by using the `path` attribute on the dependency specification in -*Cargo.toml*. If a crate has the `[workspace]` key and we specify path -dependencies where the paths are subdirectories of the crate’s directory, those -dependent crates will be considered part of the workspace. Let’s specify in the -*Cargo.toml* for the top-level `adder` crate that it will have a dependency on -an `add-one` crate that will be in the `add-one` subdirectory, by changing -*Cargo.toml* to look like this: - - - +By default, Cargo will include all transitive path dependencies. A *path +dependency* is when any crate, whether in a workspace or not, specifies that it +has a dependency on a crate in a local directory by using the `path` attribute +on the dependency specification in *Cargo.toml*. If a crate has the +`[workspace]` key, or if the crate is itself part of a workspace, and we +specify path dependencies where the paths are subdirectories of the crate’s +directory, those dependent crates will be considered part of the workspace. +Let’s specify in the *Cargo.toml* for the top-level `adder` crate that it will +have a dependency on an `add-one` crate that will be in the `add-one` +subdirectory, by changing *Cargo.toml* to look like this: + +Filename: Cargo.toml ```toml [dependencies] @@ -65,10 +61,6 @@ and are assumed to come from Crates.io. ### Creating the Second Crate in the Workspace - - - Next, while in the `adder` directory, generate an `add-one` crate: ```text @@ -98,12 +90,11 @@ pub fn add_one(x: i32) -> i32 { } ``` - - - Open up *src/main.rs* for `adder` and add an `extern crate` line at the top of the file to bring the new `add-one` library crate into scope. Then change the -`main` function to call the `add_one` function, as in Listing 14-12: +`main` function to call the `add_one` function, as in Listing 14-11: + +Filename: src/main.rs ```rust,ignore extern crate add_one; @@ -114,7 +105,7 @@ fn main() { } ``` -Listing 14-12: Using the `add-one` library crate from the +Listing 14-11: Using the `add-one` library crate from the `adder` crate Let’s build the `adder` crate by running `cargo build` in the *adder* directory! @@ -151,14 +142,6 @@ its own *target* directory. By sharing one *target* directory, the crates in the workspace can avoid rebuilding the other crates in the workspace more than necessary. - - - #### Depending on an External Crate in a Workspace Also notice the workspace only has one *Cargo.lock*, rather than having a @@ -316,9 +299,6 @@ does not have an `--all` flag or a `-p` flag, so it is necessary to change to each crate’s directory and run `cargo publish` on each crate in the workspace in order to publish them. - - - Now try adding an `add-two` crate to this workspace in a similar way as the `add-one` crate for some more practice! diff --git a/src/doc/book/second-edition/src/ch14-04-installing-binaries.md b/src/doc/book/second-edition/src/ch14-04-installing-binaries.md index 04b7107c87..c6869f4537 100644 --- a/src/doc/book/second-edition/src/ch14-04-installing-binaries.md +++ b/src/doc/book/second-edition/src/ch14-04-installing-binaries.md @@ -10,13 +10,10 @@ target that isn’t runnable on its own but is suitable for including within other programs. Usually, crates have information in the *README* file about whether a crate is a library, has a binary target, or both. - - - All binaries from `cargo install` are put into the installation root’s *bin* folder. If you installed Rust using *rustup.rs* and don’t have any custom -configurations, this will be `$HOME/.cargo/bin`. Add that directory to your -`$PATH` to be able to run programs you’ve gotten through `cargo install`. +configurations, this will be `$HOME/.cargo/bin`. Ensure that directory is in +your `$PATH` to be able to run programs you’ve gotten through `cargo install`. For example, we mentioned in Chapter 12 that there’s a Rust implementation of the `grep` tool for searching files called `ripgrep`. If we want to install diff --git a/src/doc/book/second-edition/src/ch15-00-smart-pointers.md b/src/doc/book/second-edition/src/ch15-00-smart-pointers.md index 33e5a92d32..d1417a5f0a 100644 --- a/src/doc/book/second-edition/src/ch15-00-smart-pointers.md +++ b/src/doc/book/second-edition/src/ch15-00-smart-pointers.md @@ -1,41 +1,97 @@ # Smart Pointers -*Pointer* is a generic programming term for something that refers to a location -that stores some other data. We learned about Rust’s references in Chapter 4; -they’re a plain sort of pointer indicated by the `&` symbol and borrow the -value that they point to. *Smart pointers* are data structures that act like a -pointer, but also have additional metadata and capabilities, such as reference -counting. The smart pointer pattern originated in C++. In Rust, an additional -difference between plain references and smart pointers is that references are a -kind of pointer that only borrow data; by contrast, in many cases, smart -pointers *own* the data that they point to. - -We’ve actually already encountered a few smart pointers in this book, even -though we didn’t call them that by name at the time. For example, in a certain -sense, `String` and `Vec` from Chapter 8 are both smart pointers. They own -some memory and allow you to manipulate it, and have metadata (like their -capacity) and extra capabilities or guarantees (`String` data will always be -valid UTF-8). The characteristics that distinguish a smart pointer from an -ordinary struct are that smart pointers implement the `Deref` and `Drop` -traits, and in this chapter we’ll be discussing both of those traits and why -they’re important to smart pointers. +A *pointer* is a general concept for a variable that contains an address in +memory. This address refers to, or “points at”, some other data. The most +common kind of pointer in Rust is a *reference*, which we learned about in +Chapter 4. References are indicated by the `&` symbol and borrow the value that +they point to. They don’t have any special abilities other than referring to +data. They also don’t have any overhead, so they’re used the most often. + +*Smart pointers*, on the other hand, are data structures that act like a +pointer, but they also have additional metadata and capabilities. The concept +of smart pointers isn’t unique to Rust; it originated in C++ and exists in +other languages as well. The different smart pointers defined in Rust’s +standard library provide extra functionality beyond what references provide. +One example that we’ll explore in this chapter is the *reference counting* +smart pointer type, which enables you to have multiple owners of data. The +reference counting smart pointer keeps track of how many owners there are, and +when there aren’t any remaining, the smart pointer takes care of cleaning up +the data. + + + + + + + +In Rust, where we have the concept of ownership and borrowing, an additional +difference between references and smart pointers is that references are a kind +of pointer that only borrow data; by contrast, in many cases, smart pointers +*own* the data that they point to. + +We’ve actually already encountered a few smart pointers in this book, such as +`String` and `Vec` from Chapter 8, though we didn’t call them smart pointers +at the time. Both these types count as smart pointers because they own some +memory and allow you to manipulate it. They also have metadata (such as their +capacity) and extra capabilities or guarantees (such as `String` ensuring its +data will always be valid UTF-8). + + + + +Smart pointers are usually implemented using structs. The characteristics that +distinguish a smart pointer from an ordinary struct are that smart pointers +implement the `Deref` and `Drop` traits. The `Deref` trait allows an instance +of the smart pointer struct to behave like a reference so that we can write +code that works with either references or smart pointers. The `Drop` trait +allows us to customize the code that gets run when an instance of the smart +pointer goes out of scope. In this chapter, we’ll be discussing both of those +traits and demonstrating why they’re important to smart pointers. Given that the smart pointer pattern is a general design pattern used frequently in Rust, this chapter won’t cover every smart pointer that exists. -Many libraries have their own and you may write some yourself. The ones we -cover here are the most common ones from the standard library: +Many libraries have their own smart pointers and you can even write some +yourself. We’ll just cover the most common smart pointers from the standard +library: + + + -* `Box`, for allocating values on the heap -* `Rc`, a reference counted type so data can have multiple owners -* `RefCell`, which isn’t a smart pointer itself, but manages access to the - smart pointers `Ref` and `RefMut` to enforce the borrowing rules at runtime - instead of compile time +* `Box` for allocating values on the heap +* `Rc`, a reference counted type that enables multiple ownership +* `Ref` and `RefMut`, accessed through `RefCell`, a type that enforces + the borrowing rules at runtime instead of compile time -Along the way, we’ll also cover: + + -* The *interior mutability* pattern where an immutable type exposes an API for - mutating an interior value, and the borrowing rules apply at runtime instead - of compile time -* Reference cycles, how they can leak memory, and how to prevent them +Along the way, we’ll cover the *interior mutability* pattern where an immutable +type exposes an API for mutating an interior value. We’ll also discuss +*reference cycles*, how they can leak memory, and how to prevent them. Let’s dive in! diff --git a/src/doc/book/second-edition/src/ch15-01-box.md b/src/doc/book/second-edition/src/ch15-01-box.md index df2d2197e5..f49e1602a1 100644 --- a/src/doc/book/second-edition/src/ch15-01-box.md +++ b/src/doc/book/second-edition/src/ch15-01-box.md @@ -1,9 +1,42 @@ ## `Box` Points to Data on the Heap and Has a Known Size The most straightforward smart pointer is a *box*, whose type is written -`Box`. Boxes allow you to put a single value on the heap (we talked about -the stack vs. the heap in Chapter 4). Listing 15-1 shows how to use a box to -store an `i32` on the heap: +`Box`. Boxes allow you to store data on the heap rather than the stack. What +remains on the stack is the pointer to the heap data. Refer back to Chapter 4 +if you’d like to review the difference between the stack and the heap. + + + + +Boxes don’t have performance overhead other than their data being on the heap +instead of on the stack, but they don’t have a lot of extra abilities either. +They’re most often used in these situations: + +- When you have a type whose size can’t be known at compile time, and you want + to use a value of that type in a context that needs to know an exact size +- When you have a large amount of data and you want to transfer ownership but + ensure the data won’t be copied when you do so +- When you want to own a value and only care that it’s a type that implements a + particular trait rather than knowing the concrete type itself + +We’re going to demonstrate the first case in the rest of this section. To +elaborate on the other two situations a bit more: in the second case, +transferring ownership of a large amount of data can take a long time because +the data gets copied around on the stack. To improve performance in this +situation, we can store the large amount of data on the heap in a box. Then, +only the small amount of pointer data is copied around on the stack, and the +data stays in one place on the heap. The third case is known as a *trait +object*, and Chapter 17 has an entire section devoted just to that topic. So +know that what you learn here will be applied again in Chapter 17! + +### Using a `Box` to Store Data on the Heap + +Before we get into a use case for `Box`, let’s get familiar with the syntax +and how to interact with values stored within a `Box`. + +Listing 15-1 shows how to use a box to store an `i32` on the heap: Filename: src/main.rs @@ -17,34 +50,63 @@ fn main() { Listing 15-1: Storing an `i32` value on the heap using a box -This will print `b = 5`. In this case, we can access the data in the box in a -similar way as we would if this data was on the stack. Just like any value that -has ownership of data, when a box goes out of scope like `b` does at the end of -`main`, it will be deallocated. The deallocation happens for both the box -(stored on the stack) and the data it points to (stored on the heap). +We define the variable `b` to have the value of a `Box` that points to the +value `5`, which is allocated on the heap. This program will print `b = 5`; in +this case, we can access the data in the box in a similar way as we would if +this data was on the stack. Just like any value that has ownership of data, +when a box goes out of scope like `b` does at the end of `main`, it will be +deallocated. The deallocation happens for both the box (stored on the stack) +and the data it points to (stored on the heap). Putting a single value on the heap isn’t very useful, so you won’t use boxes by -themselves in the way that Listing 15-1 does very often. A time when boxes are -useful is when you want to ensure that your type has a known size. For example, -consider Listing 15-2, which contains an enum definition for a *cons list*, a -type of data structure that comes from functional programming. Note that this -won’t compile quite yet: - -Filename: src/main.rs - -```rust,ignore -enum List { - Cons(i32, List), - Nil, -} -``` - -Listing 15-2: The first attempt of defining an enum to -represent a cons list data structure of `i32` values - -We’re implementing a cons list that holds only `i32` values. We -could have also chosen to implement a cons list independent of the -type of value by using generics as discussed in Chapter 10. +themselves in the way that Listing 15-1 does very often. Having values like a +single `i32` on the stack, where they’re stored by default is more appropriate +in the majority of cases. Let’s get into a case where boxes allow us to define +types that we wouldn’t be allowed to if we didn’t have boxes. + + + + +### Boxes Enable Recursive Types + + + + + + +Rust needs to know at compile time how much space a type takes up. One kind of +type whose size can’t be known at compile time is a *recursive type* where a +value can have as part of itself another value of the same type. This nesting +of values could theoretically continue infinitely, so Rust doesn’t know how +much space a value of a recursive type needs. Boxes have a known size, however, +so by inserting a box in a recursive type definition, we are allowed to have +recursive types. + +Let’s explore the *cons list*, a data type common in functional programming +languages, to illustrate this concept. The cons list type we’re going to define +is straightforward except for the recursion, so the concepts in this example +will be useful any time you get into more complex situations involving +recursive types. + + + + +A cons list is a list where each item in the list contains two things: the +value of the current item and the next item. The last item in the list contains +only a value called `Nil` without a next item. > #### More Information About the Cons List > @@ -63,14 +125,57 @@ type of value by using generics as discussed in Chapter 10. > announces the end of the list. Note that this is not the same as the “null” > or “nil” concept from Chapter 6, which is an invalid or absent value. -A cons list is a list where each element contains both a single value as well -as the remains of the list at that point. The remains of the list are defined -by nested cons lists. The end of the list is signified by the value `Nil`. Cons -lists aren’t used very often in Rust; `Vec` is usually a better choice. -Implementing this data structure is a good example of a situation where -`Box` is useful, though. Let’s find out why! +Note that while functional programming languages use cons lists frequently, +this isn’t a commonly used data structure in Rust. Most of the time when you +have a list of items in Rust, `Vec` is a better choice. Other, more complex +recursive data types *are* useful in various situations in Rust, but by +starting with the cons list, we can explore how boxes let us define a recursive +data type without much distraction. + + + + +Listing 15-2 contains an enum definition for a cons list. Note that this +won’t compile quite yet because this is type doesn’t have a known size, which +we’ll demonstrate: + + + -Using a cons list to store the list `1, 2, 3` would look like this: +Filename: src/main.rs + +```rust,ignore +enum List { + Cons(i32, List), + Nil, +} +``` + +Listing 15-2: The first attempt of defining an enum to +represent a cons list data structure of `i32` values + +> Note: We’re choosing to implement a cons list that only holds `i32` values +> for the purposes of this example. We could have implemented it using +> generics, as we discussed in Chapter 10, in order to define a cons list type +> that could store values of any type. + + + + +Using our cons list type to store the list `1, 2, 3` would look like the code +in Listing 15-3: + +Filename: src/main.rs ```rust,ignore use List::{Cons, Nil}; @@ -80,12 +185,15 @@ fn main() { } ``` +Listing 15-3: Using the `List` enum to store the list `1, +2, 3` + The first `Cons` value holds `1` and another `List` value. This `List` value is another `Cons` value that holds `2` and another `List` value. This is one more `Cons` value that holds `3` and a `List` value, which is finally `Nil`, the non-recursive variant that signals the end of the list. -If we try to compile the above code, we get the error shown in Listing 15-3: +If we try to compile the above code, we get the error shown in Listing 15-4: ```text error[E0072]: recursive type `List` has infinite size @@ -100,14 +208,26 @@ error[E0072]: recursive type `List` has infinite size make `List` representable ``` -Listing 15-3: The error we get when attempting to define +Listing 15-4: The error we get when attempting to define a recursive enum -The error says this type ‘has infinite size’. Why is that? It’s because we’ve -defined `List` to have a variant that is recursive: it holds another value of -itself. This means Rust can’t figure out how much space it needs in order to -store a `List` value. Let’s break this down a bit: first let’s look at how Rust -decides how much space it needs to store a value of a non-recursive type. + + + +The error says this type ‘has infinite size’. The reason is the way we’ve +defined `List` is with a variant that is recursive: it holds another value of +itself directly. This means Rust can’t figure out how much space it needs in +order to store a `List` value. Let’s break this down a bit: first let’s look at +how Rust decides how much space it needs to store a value of a non-recursive +type. + +### Computing the Size of a Non-Recursive Type + Recall the `Message` enum we defined in Listing 6-2 when we discussed enum definitions in Chapter 6: @@ -120,29 +240,31 @@ enum Message { } ``` -When Rust needs to know how much space to allocate for a `Message` value, it -can go through each of the variants and see that `Message::Quit` does not need -any space, `Message::Move` needs enough space to store two `i32` values, and so -forth. Therefore, the most space a `Message` value will need is the space it -would take to store the largest of its variants. - -Contrast this to what happens when the Rust compiler looks at a recursive type -like `List` in Listing 15-2. The compiler tries to figure out how much memory -is needed to store a value of the `List` enum, and starts by looking at the `Cons` +To determine how much space to allocate for a `Message` value, Rust goes +through each of the variants to see which variant needs the most space. Rust +sees that `Message::Quit` doesn’t need any space, `Message::Move` needs enough +space to store two `i32` values, and so forth. Since only one variant will end +up being used, the most space a `Message` value will need is the space it would +take to store the largest of its variants. + +Contrast this to what happens when Rust tries to determine how much space a +recursive type like the `List` enum in Listing 15-2 needs. The compiler starts +by looking at the `Cons` variant, which holds a value of type `i32` and a value +of type `List`. Therefore, `Cons` needs an amount of space equal to the size of +an `i32` plus the size of a `List`. To figure out how much memory the `List` +type needs, the compiler looks at the variants, starting with the `Cons` variant. The `Cons` variant holds a value of type `i32` and a value of type -`List`, so `Cons` needs an amount of space equal to the size of an `i32` plus -the size of a `List`. To figure out how much memory a `List` needs, it looks at -its variants, starting with the `Cons` variant. The `Cons` variant holds a -value of type `i32` and a value of type `List`, and this continues infinitely, -as shown in Figure 15-4. +`List`, and this continues infinitely, as shown in Figure 15-5. An infinite Cons list -Figure 15-4: An infinite `List` consisting of infinite +Figure 15-5: An infinite `List` consisting of infinite `Cons` variants +### Using `Box` to Get a Recursive Type with a Known Size + Rust can’t figure out how much space to allocate for recursively defined types, -so the compiler gives the error in Listing 15-3. The error did include this +so the compiler gives the error in Listing 15-4. The error does include this helpful suggestion: ```text @@ -150,12 +272,23 @@ helpful suggestion: make `List` representable ``` -Because a `Box` is a pointer, we always know how much space it needs: a -pointer takes up a `usize` amount of space. The value of the `usize` will be -the address of the heap data. The heap data can be any size, but the address to -the start of that heap data will always fit in a `usize`. We can change our -definition from Listing 15-2 to look like the definition in Listing 15-5 by -changing `main` to use `Box::new` for the values inside the `Cons` variants: +In this suggestion, “indirection” means that instead of storing a value +directly, we’re going to store the value indirectly by storing a pointer to +the value instead. + +Because a `Box` is a pointer, Rust always knows how much space a `Box` +needs: a pointer’s size doesn’t change based on the amount of data it’s +pointing to. + +So we can put a `Box` inside the `Cons` variant instead of another `List` value +directly. The `Box` will point to the next `List` value that will be on the +heap, rather than inside the `Cons` variant. Conceptually, we still have a list +created by lists “holding” other lists, but the way this concept is implemented +is now more like the items being next to one another rather than inside one +another. + +We can change the definition of the `List` enum from Listing 15-2 and the usage +of the `List` from Listing 15-3 to the code in Listing 15-6, which will compile: Filename: src/main.rs @@ -175,30 +308,45 @@ fn main() { } ``` -Listing 15-5: Definition of `List` that uses `Box` in +Listing 15-6: Definition of `List` that uses `Box` in order to have a known size -The compiler will now be able to figure out the size it needs to store a `List` -value. Rust will look at `List`, and again start by looking at the `Cons` -variant. The `Cons` variant will need the size of `i32` plus the space to store -a `usize`, since a box always has the size of a `usize`, no matter what it’s -pointing to. Then Rust looks at the `Nil` variant, which does not store a -value, so `Nil` doesn’t need any space. We’ve broken the infinite, recursive -chain by adding in a box. Figure 15-6 shows what the `Cons` variant looks like -now: +The `Cons` variant will need the size of an `i32` plus the space to store the +box’s pointer data. The `Nil` variant stores no values, so it needs less space +than the `Cons` variant. We now know that any `List` value will take up the +size of an `i32` plus the size of a box’s pointer data. By using a box, we’ve +broken the infinite, recursive chain so the compiler is able to figure out the +size it needs to store a `List` value. Figure 15-7 shows what the `Cons` +variant looks like now: A finite Cons list -Figure 15-6: A `List` that is not infinitely sized since +Figure 15-7: A `List` that is not infinitely sized since `Cons` holds a `Box` -This is the main area where boxes are useful: breaking up an infinite data -structure so that the compiler can know what size it is. We’ll look at another -case where Rust has data of unknown size in Chapter 17 when we discuss trait -objects. - -Even though you won’t be using boxes very often, they are a good way to -understand the smart pointer pattern. Two of the aspects of `Box` that are -commonly used with smart pointers are its implementations of the `Deref` trait -and the `Drop` trait. Let’s investigate how these traits work and how smart -pointers use them. + + + +Boxes only provide the indirection and heap allocation; they don’t have any +other special abilities like those we’ll see with the other smart pointer +types. They also don’t have any performance overhead that these special +abilities incur, so they can be useful in cases like the cons list where the +indirection is the only feature we need. We’ll look at more use cases for boxes +in Chapter 17, too. + +The `Box` type is a smart pointer because it implements the `Deref` trait, +which allows `Box` values to be treated like references. When a `Box` +value goes out of scope, the heap data that the box is pointing to is cleaned +up as well because of the `Box` type’s `Drop` trait implementation. Let’s +explore these two types in more detail; these traits are going to be even more +important to the functionality provided by the other smart pointer types we’ll +be discussing in the rest of this chapter. + + + diff --git a/src/doc/book/second-edition/src/ch15-02-deref.md b/src/doc/book/second-edition/src/ch15-02-deref.md index 9713ab58b4..818e385951 100644 --- a/src/doc/book/second-edition/src/ch15-02-deref.md +++ b/src/doc/book/second-edition/src/ch15-02-deref.md @@ -1,193 +1,444 @@ -## The `Deref` Trait Allows Access to the Data Through a Reference - -The first important smart pointer-related trait is `Deref`, which allows us to -override `*`, the dereference operator (as opposed to the multiplication -operator or the glob operator). Overriding `*` for smart pointers makes -accessing the data behind the smart pointer convenient, and we’ll talk about -what we mean by convenient when we get to deref coercions later in this section. - -We briefly mentioned the dereference operator in Chapter 8, in the hash map -section titled “Update a Value Based on the Old Value”. We had a mutable -reference, and we wanted to change the value that the reference was pointing -to. In order to do that, first we had to dereference the reference. Here’s -another example using references to `i32` values: +## Treating Smart Pointers like Regular References with the `Deref` Trait + +Implementing `Deref` trait allows us to customize the behavior of the +*dereference operator* `*`(as opposed to the multiplication or glob operator). +By implementing `Deref` in such a way that a smart pointer can be treated like +a regular reference, we can write code that operates on references and use that +code with smart pointers too. + + + + + + + +Let’s first take a look at how `*` works with regular references, then try and +define our own type like `Box` and see why `*` doesn’t work like a +reference. We’ll explore how implementing the `Deref` trait makes it possible +for smart pointers to work in a similar way as references. Finally, we’ll look +at the *deref coercion* feature of Rust and how that lets us work with either +references or smart pointers. + +### Following the Pointer to the Value with `*` + + + + + + + +A regular reference is a type of pointer, and one way to think of a pointer is +that it’s an arrow to a value stored somewhere else. In Listing 15-8, let’s +create a reference to an `i32` value then use the dereference operator to +follow the reference to the data: + + + + + + + +Filename: src/main.rs ```rust -let mut x = 5; -{ - let y = &mut x; +fn main() { + let x = 5; + let y = &x; - *y += 1 + assert_eq!(5, x); + assert_eq!(5, *y); } +``` -assert_eq!(6, x); +Listing 15-8: Using the dereference operator to follow a +reference to an `i32` value + +The variable `x` holds an `i32` value, `5`. We set `y` equal to a reference to +`x`. We can assert that `x` is equal to `5`. However, if we want to make an +assertion about the value in `y`, we have to use `*y` to follow the reference +to the value that the reference is pointing to (hence *de-reference*). Once we +de-reference `y`, we have access to the integer value `y` is pointing to that +we can compare with `5`. + +If we try to write `assert_eq!(5, y);` instead, we’ll get this compilation +error: + +```text +error[E0277]: the trait bound `{integer}: std::cmp::PartialEq<&{integer}>` is +not satisfied + --> :5:19 + | +5 | if ! ( * left_val == * right_val ) { + | ^^ can't compare `{integer}` with `&{integer}` + | + = help: the trait `std::cmp::PartialEq<&{integer}>` is not implemented for + `{integer}` ``` -We use `*y` to access the data that the mutable reference in `y` refers to, -rather than the mutable reference itself. We can then modify that data, in this -case by adding 1. - -With references that aren’t smart pointers, there’s only one value that the -reference is pointing to, so the dereference operation is straightforward. -Smart pointers can also store metadata about the pointer or the data. When -dereferencing a smart pointer, we only want the data, not the metadata, since -dereferencing a regular reference only gives us data and not metadata. We want -to be able to use smart pointers in the same places that we can use regular -references. To enable that, we can override the behavior of the `*` operator by -implementing the `Deref` trait. - -Listing 15-7 has an example of overriding `*` using `Deref` on a struct we’ve -defined to hold mp3 data and metadata. `Mp3` is, in a sense, a smart pointer: -it owns the `Vec` data containing the audio. In addition, it holds some -optional metadata, in this case the artist and title of the song in the audio -data. We want to be able to conveniently access the audio data, not the -metadata, so we implement the `Deref` trait to return the audio data. -Implementing the `Deref` trait requires implementing one method named `deref` -that borrows `self` and returns the inner data: +Comparing a reference to a number with a number isn’t allowed because they’re +different types. We have to use `*` to follow the reference to the value it’s +pointing to. -Filename: src/main.rs +### Using `Box` Like a Reference + +We can rewrite the code in Listing 15-8 to use a `Box` instead of a +reference, and the de-reference operator will work the same way as shown in +Listing 15-9: + +Filename: src/main.rs ```rust -use std::ops::Deref; +fn main() { + let x = 5; + let y = Box::new(x); -struct Mp3 { - audio: Vec, - artist: Option, - title: Option, + assert_eq!(5, x); + assert_eq!(5, *y); } +``` -impl Deref for Mp3 { - type Target = Vec; +Listing 15-9: Using the dereference operator on a +`Box` - fn deref(&self) -> &Vec { - &self.audio +The only part of Listing 15-8 that we changed was to set `y` to be an instance +of a box pointing to the value in `x` rather than a reference pointing to the +value of `x`. In the last assertion, we can use the dereference operator to +follow the box’s pointer in the same way that we did when `y` was a reference. +Let’s explore what is special about `Box` that enables us to do this by +defining our own box type. + +### Defining Our Own Smart Pointer + +Let’s build a smart pointer similar to the `Box` type that the standard +library has provided for us, in order to experience that smart pointers don’t +behave like references by default. Then we’ll learn about how to add the +ability to use the dereference operator. + +`Box` is ultimately defined as a tuple struct with one element, so Listing +15-10 defines a `MyBox` type in the same way. We’ll also define a `new` +function to match the `new` function defined on `Box`: + +Filename: src/main.rs + +```rust +struct MyBox(T); + +impl MyBox { + fn new(x: T) -> MyBox { + MyBox(x) } } +``` +Listing 15-10: Defining a `MyBox` type + +We define a struct named `MyBox` and declare a generic parameter `T`, since we +want our type to be able to hold values of any type. `MyBox` is a tuple struct +with one element of type `T`. The `MyBox::new` function takes one parameter of +type `T` and returns a `MyBox` instance that holds the value passed in. + +Let’s try adding the code from Listing 15-9 to the code in Listing 15-10 and +changing `main` to use the `MyBox` type we’ve defined instead of `Box`. +The code in Listing 15-11 won’t compile because Rust doesn’t know how to +dereference `MyBox`: + +Filename: src/main.rs + +```rust,ignore fn main() { - let my_favorite_song = Mp3 { - // we would read the actual audio data from an mp3 file - audio: vec![1, 2, 3], - artist: Some(String::from("Nirvana")), - title: Some(String::from("Smells Like Teen Spirit")), - }; - - assert_eq!(vec![1, 2, 3], *my_favorite_song); + let x = 5; + let y = MyBox::new(x); + + assert_eq!(5, x); + assert_eq!(5, *y); +} +``` + +Listing 15-11: Attempting to use `MyBox` in the same +way we were able to use references and `Box` + +The compilation error we get is: + +```text +error: type `MyBox<{integer}>` cannot be dereferenced + --> src/main.rs:14:19 + | +14 | assert_eq!(5, *y); + | ^^ +``` + +Our `MyBox` type can’t be dereferenced because we haven’t implemented that +ability on our type. To enable dereferencing with the `*` operator, we can +implement the `Deref` trait. + +### Implementing the `Deref` Trait Defines How To Treat a Type Like a Reference + +As we discussed in Chapter 10, in order to implement a trait, we need to +provide implementations for the trait’s required methods. The `Deref` trait, +provided by the standard library, requires implementing one method named +`deref` that borrows `self` and returns a reference to the inner data. Listing +15-12 contains an implementation of `Deref` to add to the definition of `MyBox`: + +Filename: src/main.rs + +```rust +use std::ops::Deref; + +# struct MyBox(T); +impl Deref for MyBox { + type Target = T; + + fn deref(&self) -> &T { + &self.0 + } } ``` -Listing 15-7: An implementation of the `Deref` trait on a -struct that holds mp3 file data and metadata - -Most of this should look familiar: a struct, a trait implementation, and a -main function that creates an instance of the struct. There is one part we -haven’t explained thoroughly yet: similarly to Chapter 13 when we looked at the -Iterator trait with the `type Item`, the `type Target = T;` syntax is defining -an associated type, which is covered in more detail in Chapter 19. Don’t worry -about that part of the example too much; it is a slightly different way of -declaring a generic parameter. - -In the `assert_eq!`, we’re verifying that `vec![1, 2, 3]` is the result we get -when dereferencing the `Mp3` instance with `*my_favorite_song`, which is what -happens since we implemented the `deref` method to return the audio data. If -we hadn’t implemented the `Deref` trait for `Mp3`, Rust wouldn’t compile the -code `*my_favorite_song`: we’d get an error saying type `Mp3` cannot be -dereferenced. - -Without the `Deref` trait, the compiler can only dereference `&` references, -which `my_favorite_song` is not (it is an `Mp3` struct). With the `Deref` -trait, the compiler knows that types implementing the `Deref` trait have a -`deref` method that returns a reference (in this case, `&self.audio` because of -our definition of `deref` in Listing 15-7). So in order to get a `&` reference -that `*` can dereference, the compiler expands `*my_favorite_song` to this: +Listing 15-12: Implementing `Deref` on `MyBox` + +The `type Target = T;` syntax defines an associated type for this trait to use. +Associated types are a slightly different way of declaring a generic parameter +that you don’t need to worry about too much for now; we’ll cover it in more +detail in Chapter 19. + + + + +We filled in the body of the `deref` method with `&self.0` so that `deref` +returns a reference to the value we want to access with the `*` operator. The +`main` function from Listing 15-11 that calls `*` on the `MyBox` value now +compiles and the assertions pass! + +Without the `Deref` trait, the compiler can only dereference `&` references. +The `Deref` trait’s `deref` method gives the compiler the ability to take a +value of any type that implements `Deref` and call the `deref` method in order +to get a `&` reference that it knows how to dereference. + +When we typed `*y` in Listing 15-11, what Rust actually ran behind the scenes +was this code: ```rust,ignore -*(my_favorite_song.deref()) +*(y.deref()) ``` -The result is the value in `self.audio`. The reason `deref` returns a reference -that we then have to dereference, rather than just returning a value directly, -is because of ownership: if the `deref` method directly returned the value -instead of a reference to it, the value would be moved out of `self`. We don’t -want to take ownership of `my_favorite_song.audio` in this case and most cases -where we use the dereference operator. + + + +Rust substitutes the `*` operator with a call to the `deref` method and then a +plain dereference so that we don’t have to think about when we have to call the +`deref` method or not. This feature of Rust lets us write code that functions +identically whether we have a regular reference or a type that implements +`Deref`. + +The reason the `deref` method returns a reference to a value, and why the plain +dereference outside the parentheses in `*(y.deref())` is still necessary, is +because of ownership. If the `deref` method returned the value directly instead +of a reference to the value, the value would be moved out of `self`. We don’t +want to take ownership of the inner value inside `MyBox` in this case and in +most cases where we use the dereference operator. Note that replacing `*` with a call to the `deref` method and then a call to -`*` happens once, each time the `*` is used. The substitution of `*` does not -recurse infinitely. That’s how we end up with data of type `Vec`, which -matches the `vec![1, 2, 3]` in the `assert_eq!` in Listing 15-7. +`*` happens once, each time we type a `*` in our code. The substitution of `*` +does not recurse infinitely. That’s how we end up with data of type `i32`, +which matches the `5` in the `assert_eq!` in Listing 15-11. ### Implicit Deref Coercions with Functions and Methods -Rust tends to favor explicitness over implicitness, but one case where this -does not hold true is *deref coercions* of arguments to functions and methods. -A deref coercion will automatically convert a reference to any pointer into a -reference to that pointer’s contents. A deref coercion happens when the -reference type of the argument passed into the function differs from the -reference type of the parameter defined in that function’s signature. Deref -coercion was added to Rust to make calling functions and methods not need as -many explicit references and dereferences with `&` and `*`. + + + +*Deref coercion* is a convenience that Rust performs on arguments to functions +and methods. Deref coercion converts a reference to a type that implements +`Deref` into a reference to a type that `Deref` can convert the original type +into. Deref coercion happens automatically when we pass a reference to a value +of a particular type as an argument to a function or method that doesn’t match +the type of the parameter in the function or method definition, and there’s a +sequence of calls to the `deref` method that will convert the type we provided +into the type that the parameter needs. + +Deref coercion was added to Rust so that programmers writing function and +method calls don’t need to add as many explicit references and dereferences +with `&` and `*`. This feature also lets us write more code that can work for +either references or smart pointers. + +To illustrate deref coercion in action, let’s use the `MyBox` type we +defined in Listing 15-10 as well as the implementation of `Deref` that we added +in Listing 15-12. Listing 15-13 shows the definition of a function that has a +string slice parameter: -Using our `Mp3` struct from Listing 15-7, here’s the signature of a function to -compress mp3 audio data that takes a slice of `u8`: +Filename: src/main.rs -```rust,ignore -fn compress_mp3(audio: &[u8]) -> Vec { - // the actual implementation would go here +```rust +fn hello(name: &str) { + println!("Hello, {}!", name); } ``` -If Rust didn’t have deref coercion, in order to call this function with the -audio data in `my_favorite_song`, we’d have to write: +Listing 15-13: A `hello` function that has the parameter +`name` of type `&str` -```rust,ignore -compress_mp3(my_favorite_song.audio.as_slice()) +We can call the `hello` function with a string slice as an argument, like +`hello("Rust");` for example. Deref coercion makes it possible for us to call +`hello` with a reference to a value of type `MyBox`, as shown in +Listing 15-14: + +Filename: src/main.rs + +```rust +# use std::ops::Deref; +# +# struct MyBox(T); +# +# impl MyBox { +# fn new(x: T) -> MyBox { +# MyBox(x) +# } +# } +# +# impl Deref for MyBox { +# type Target = T; +# +# fn deref(&self) -> &T { +# &self.0 +# } +# } +# +# fn hello(name: &str) { +# println!("Hello, {}!", name); +# } +# +fn main() { + let m = MyBox::new(String::from("Rust")); + hello(&m); +} ``` -That is, we’d have to explicitly say that we want the data in the `audio` field -of `my_favorite_song` and that we want a slice referring to the whole -`Vec`. If there were a lot of places where we’d want to process the `audio` -data in a similar manner, `.audio.as_slice()` would be wordy and repetitive. +Listing 15-14: Calling `hello` with a reference to a +`MyBox`, which works because of deref coercion -However, because of deref coercion and our implementation of the `Deref` trait -on `Mp3`, we can call this function with the data in `my_favorite_song` by -using this code: +Here we’re calling the `hello` function with the argument `&m`, which is a +reference to a `MyBox` value. Because we implemented the `Deref` trait +on `MyBox` in Listing 15-12, Rust can turn `&MyBox` into `&String` +by calling `deref`. The standard library provides an implementation of `Deref` +on `String` that returns a string slice, which we can see in the API +documentation for `Deref`. Rust calls `deref` again to turn the `&String` into +`&str`, which matches the `hello` function’s definition. -```rust,ignore -let result = compress_mp3(&my_favorite_song); +If Rust didn’t implement deref coercion, in order to call `hello` with a value +of type `&MyBox`, we’d have to write the code in Listing 15-15 instead +of the code in Listing 15-14: + +Filename: src/main.rs + +```rust +# use std::ops::Deref; +# +# struct MyBox(T); +# +# impl MyBox { +# fn new(x: T) -> MyBox { +# MyBox(x) +# } +# } +# +# impl Deref for MyBox { +# type Target = T; +# +# fn deref(&self) -> &T { +# &self.0 +# } +# } +# +# fn hello(name: &str) { +# println!("Hello, {}!", name); +# } +# +fn main() { + let m = MyBox::new(String::from("Rust")); + hello(&(*m)[..]); +} ``` -Just an `&` and the instance, nice! We can treat our smart pointer as if it was -a regular reference. Deref coercion means that Rust can use its knowledge of -our `Deref` implementation, namely: Rust knows that `Mp3` implements the -`Deref` trait and returns `&Vec` from the `deref` method. Rust also knows -the standard library implements the `Deref` trait on `Vec` to return `&[T]` -from the `deref` method (and we can find that out too by looking at the API -documentation for `Vec`). So, at compile time, Rust will see that it can use -`Deref::deref` twice to turn `&Mp3` into `&Vec` and then into `&[T]` to -match the signature of `compress_mp3`. That means we get to do less typing! -Rust will analyze types through `Deref::deref` as many times as it needs to in -order to get a reference to match the parameter’s type, when the `Deref` trait -is defined for the types involved. This indirection is resolved at compile time, -so there is no run-time penalty for taking advantage of deref coercion! - -Similar to how we use the `Deref` trait to override `*` on `&T`s, there is also -a `DerefMut` trait for overriding `*` on `&mut T`. +Listing 15-15: The code we’d have to write if Rust didn’t +have deref coercion + +The `(*m)` is dereferencing the `MyBox` into a `String`. Then the `&` +and `[..]` are taking a string slice of the `String` that is equal to the whole +string to match the signature of `hello`. The code without deref coercions is +harder to read, write, and understand with all of these symbols involved. Deref +coercion makes it so that Rust takes care of these conversions for us +automatically. + +When the `Deref` trait is defined for the types involved, Rust will analyze the +types and use `Deref::deref` as many times as it needs in order to get a +reference to match the parameter’s type. This is resolved at compile time, so +there is no run-time penalty for taking advantage of deref coercion! + +### How Deref Coercion Interacts with Mutability + + + + +Similar to how we use the `Deref` trait to override `*` on immutable +references, Rust provides a `DerefMut` trait for overriding `*` on mutable +references. Rust does deref coercion when it finds types and trait implementations in three cases: + + + * From `&T` to `&U` when `T: Deref`. * From `&mut T` to `&mut U` when `T: DerefMut`. * From `&mut T` to `&U` when `T: Deref`. -The first two are the same, except for mutability: if you have a `&T`, and -`T` implements `Deref` to some type `U`, you can get a `&U` transparently. Same -for mutable references. The last one is more tricky: if you have a mutable -reference, it will also coerce to an immutable one. The other case is _not_ -possible though: immutable references will never coerce to mutable ones. - -The reason that the `Deref` trait is important to the smart pointer pattern is -that smart pointers can then be treated like regular references and used in -places that expect regular references. We don’t have to redefine methods and -functions to take smart pointers explicitly, for example. +The first two cases are the same except for mutability. The first case says +that if you have a `&T`, and `T` implements `Deref` to some type `U`, you can +get a `&U` transparently. The second case states that the same deref coercion +happens for mutable references. + +The last case is trickier: Rust will also coerce a mutable reference to an +immutable one. The reverse is *not* possible though: immutable references will +never coerce to mutable ones. Because of the borrowing rules, if you have a +mutable reference, that mutable reference must be the only reference to that +data (otherwise, the program wouldn’t compile). Converting one mutable +reference to one immutable reference will never break the borrowing rules. +Converting an immutable reference to a mutable reference would require that +there was only one immutable reference to that data, and the borrowing rules +don’t guarantee that. Therefore, Rust can’t make the assumption that converting +an immutable reference to a mutable reference is possible. + + + diff --git a/src/doc/book/second-edition/src/ch15-03-drop.md b/src/doc/book/second-edition/src/ch15-03-drop.md index d5c1d5d30a..6626e20ff8 100644 --- a/src/doc/book/second-edition/src/ch15-03-drop.md +++ b/src/doc/book/second-edition/src/ch15-03-drop.md @@ -1,32 +1,53 @@ ## The `Drop` Trait Runs Code on Cleanup -The other trait that’s important to the smart pointer pattern is the `Drop` -trait. `Drop` lets us run some code when a value is about to go out of scope. -Smart pointers perform important cleanup when being dropped, like deallocating -memory or decrementing a reference count. More generally, data types can manage -resources beyond memory, like files or network connections, and use `Drop` to -release those resources when our code is done with them. We’re discussing -`Drop` in the context of smart pointers, though, because the functionality of -the `Drop` trait is almost always used when implementing smart pointers. - -In some other languages, we have to remember to call code to free the memory or -resource every time we finish using an instance of a smart pointer. If we -forget, the system our code is running on might get overloaded and crash. In -Rust, we can specify that some code should be run when a value goes out of -scope, and the compiler will insert this code automatically. That means we don’t -need to remember to put this code everywhere we’re done with an instance of -these types, but we still won’t leak resources! - -The way we specify code should be run when a value goes out of scope is by -implementing the `Drop` trait. The `Drop` trait requires us to implement one -method named `drop` that takes a mutable reference to `self`. - -Listing 15-8 shows a `CustomSmartPointer` struct that doesn’t actually do -anything, but we’re printing out `CustomSmartPointer created.` right after we -create an instance of the struct and `Dropping CustomSmartPointer!` when the -instance goes out of scope so that we can see when each piece of code gets run. -Instead of a `println!` statement, you’d fill in `drop` with whatever cleanup -code your smart pointer needs to run: +The second trait important to the smart pointer pattern is `Drop`, which lets +us customize what happens when a value is about to go out of scope. We can +provide an implementation for the `Drop` trait on any type, and the code we +specify can be used to release resources like files or network connections. +We’re introducing `Drop` in the context of smart pointers because the +functionality of the `Drop` trait is almost always used when implementing a +smart pointer. For example, `Box` customizes `Drop` in order to deallocate +the space on the heap that the box points to. + +In some languages, the programmer must call code to free memory or resources +every time they finish using an instance of a smart pointer. If they forget, +the system might become overloaded and crash. In Rust, we can specify that a +particular bit of code should be run whenever a value goes out of scope, and +the compiler will insert this code automatically. + + + + +This means we don’t need to be careful about placing clean up code everywhere +in a program that an instance of a particular type is finished with, but we +still won’t leak resources! + +We specify the code to run when a value goes out of scope by implementing the +`Drop` trait. The `Drop` trait requires us to implement one method named `drop` +that takes a mutable reference to `self`. In order to be able to see when Rust +calls `drop`, let’s implement `drop` with `println!` statements for now. + + + + +Listing 15-8 shows a `CustomSmartPointer` struct whose only custom +functionality is that it will print out `Dropping CustomSmartPointer!` when the +instance goes out of scope. This will demonstrate when Rust runs the `drop` +function: + + + Filename: src/main.rs @@ -37,47 +58,76 @@ struct CustomSmartPointer { impl Drop for CustomSmartPointer { fn drop(&mut self) { - println!("Dropping CustomSmartPointer!"); + println!("Dropping CustomSmartPointer with data `{}`!", self.data); } } fn main() { - let c = CustomSmartPointer { data: String::from("some data") }; - println!("CustomSmartPointer created."); - println!("Wait for it..."); + let c = CustomSmartPointer { data: String::from("my stuff") }; + let d = CustomSmartPointer { data: String::from("other stuff") }; + println!("CustomSmartPointers created."); } ``` Listing 15-8: A `CustomSmartPointer` struct that -implements the `Drop` trait, where we could put code that would clean up after -the `CustomSmartPointer`. +implements the `Drop` trait, where we would put our clean up code. + +The `Drop` trait is included in the prelude, so we don’t need to import it. We +implement the `Drop` trait on `CustomSmartPointer`, and provide an +implementation for the `drop` method that calls `println!`. The body of the +`drop` function is where you’d put any logic that you wanted to run when an +instance of your type goes out of scope. We’re choosing to print out some text +here in order to demonstrate when Rust will call `drop`. + + + -The `Drop` trait is in the prelude, so we don’t need to import it. The `drop` -method implementation calls the `println!`; this is where you’d put the actual -code needed to close the socket. In `main`, we create a new instance of -`CustomSmartPointer` then print out `CustomSmartPointer created.` to be able to -see that our code got to that point at runtime. At the end of `main`, our -instance of `CustomSmartPointer` will go out of scope. Note that we didn’t call -the `drop` method explicitly. +In `main`, we create a new instance of `CustomSmartPointer` and then print out +`CustomSmartPointer created.`. At the end of `main`, our instance of +`CustomSmartPointer` will go out of scope, and Rust will call the code we put +in the `drop` method, printing our final message. Note that we didn’t need to +call the `drop` method explicitly. -When we run this program, we’ll see: +When we run this program, we’ll see the following output: ```text -CustomSmartPointer created. -Wait for it... -Dropping CustomSmartPointer! +CustomSmartPointers created. +Dropping CustomSmartPointer with data `other stuff`! +Dropping CustomSmartPointer with data `my stuff`! ``` -printed to the screen, which shows that Rust automatically called `drop` for us -when our instance went out of scope. - -We can use the `std::mem::drop` function to drop a value earlier than when it -goes out of scope. This isn’t usually necessary; the whole point of the `Drop` -trait is that it’s taken care of automatically for us. We’ll see an example of -a case when we’ll need to drop a value earlier than when it goes out of scope -in Chapter 16 when we’re talking about concurrency. For now, let’s just see -that it’s possible, and `std::mem::drop` is in the prelude so we can just call -`drop` as shown in Listing 15-9: +Rust automatically called `drop` for us when our instance went out of scope, +calling the code we specified. Variables are dropped in the reverse order of +the order in which they were created, so `d` was dropped before `c`. This is +just to give you a visual guide to how the drop method works, but usually you +would specify the cleanup code that your type needs to run rather than a print +message. + + + + +#### Dropping a Value Early with `std::mem::drop` + + + + +Rust inserts the call to `drop` automatically when a value goes out of scope, +and it’s not straightforward to disable this functionality. Disabling `drop` +isn’t usually necessary; the whole point of the `Drop` trait is that it’s taken +care of automatically for us. Occasionally you may find that you want to clean +up a value early. One example is when using smart pointers that manage locks; +you may want to force the `drop` method that releases the lock to run so that +other code in the same scope can acquire the lock. First, let’s see what +happens if we try to call the `Drop` trait’s `drop` method ourselves by +modifying the `main` function from Listing 15-8 as shown in Listing 15-9: + + + Filename: src/main.rs @@ -85,56 +135,101 @@ that it’s possible, and `std::mem::drop` is in the prelude so we can just call fn main() { let c = CustomSmartPointer { data: String::from("some data") }; println!("CustomSmartPointer created."); - drop(c); - println!("Wait for it..."); + c.drop(); + println!("CustomSmartPointer dropped before the end of main."); } ``` -Listing 15-9: Calling `std::mem::drop` to explicitly drop -a value before it goes out of scope +Listing 15-9: Attempting to call the `drop` method from +the `Drop` trait manually to clean up early -Running this code will print the following, showing that the destructor code is -called since `Dropping CustomSmartPointer!` is printed between -`CustomSmartPointer created.` and `Wait for it...`: +If we try to compile this, we’ll get this error: ```text -CustomSmartPointer created. -Dropping CustomSmartPointer! -Wait for it... +error[E0040]: explicit use of destructor method + --> src/main.rs:15:7 + | +15 | c.drop(); + | ^^^^ explicit destructor calls not allowed ``` -Note that we aren’t allowed to call the `drop` method that we defined directly: -if we replaced `drop(c)` in Listing 15-9 with `c.drop()`, we’ll get a compiler -error that says `explicit destructor calls not allowed`. We’re not allowed to -call `Drop::drop` directly because when Rust inserts its call to `Drop::drop` -automatically when the value goes out of scope, then the value would get -dropped twice. Dropping a value twice could cause an error or corrupt memory, -so Rust doesn’t let us. Instead, we can use `std::mem::drop`, whose definition -is: +This error message says we’re not allowed to explicitly call `drop`. The error +message uses the term *destructor*, which is the general programming term for a +function that cleans up an instance. A *destructor* is analogous to a +*constructor* that creates an instance. The `drop` function in Rust is one +particular destructor. + +Rust doesn’t let us call `drop` explicitly because Rust would still +automatically call `drop` on the value at the end of `main`, and this would be +a *double free* error since Rust would be trying to clean up the same value +twice. + +Because we can’t disable the automatic insertion of `drop` when a value goes +out of scope, and we can’t call the `drop` method explicitly, if we need to +force a value to be cleaned up early, we can use the `std::mem::drop` function. + +The `std::mem::drop` function is different than the `drop` method in the `Drop` +trait. We call it by passing the value we want to force to be dropped early as +an argument. `std::mem::drop` is in the prelude, so we can modify `main` from +Listing 15-8 to call the `drop` function as shown in Listing 15-10: + +Filename: src/main.rs ```rust -pub mod std { - pub mod mem { - pub fn drop(x: T) { } - } +# struct CustomSmartPointer { +# data: String, +# } +# +# impl Drop for CustomSmartPointer { +# fn drop(&mut self) { +# println!("Dropping CustomSmartPointer!"); +# } +# } +# +fn main() { + let c = CustomSmartPointer { data: String::from("some data") }; + println!("CustomSmartPointer created."); + drop(c); + println!("CustomSmartPointer dropped before the end of main."); } ``` -This function is generic over any type `T`, so we can pass any value to it. The -function doesn’t actually have anything in its body, so it doesn’t use its -parameter. The reason this empty function is useful is that `drop` takes -ownership of its parameter, which means the value in `x` gets dropped at the -end of this function when `x` goes out of scope. +Listing 15-10: Calling `std::mem::drop` to explicitly +drop a value before it goes out of scope + +Running this code will print the following: -Code specified in a `Drop` trait implementation can be used for many reasons to +```text +CustomSmartPointer created. +Dropping CustomSmartPointer! +CustomSmartPointer dropped before the end of main. +``` + + + + +The `Dropping CustomSmartPointer!` is printed between `CustomSmartPointer +created.` and `CustomSmartPointer dropped before the end of main.`, showing +that the `drop` method code is called to drop `c` at that point. + + + + +Code specified in a `Drop` trait implementation can be used in many ways to make cleanup convenient and safe: we could use it to create our own memory -allocator, for instance! By using the `Drop` trait and Rust’s ownership system, -we don’t have to remember to clean up after ourselves since Rust takes care of -it automatically. We’ll get compiler errors if we write code that would clean -up a value that’s still in use, since the ownership system that makes sure +allocator, for instance! With the `Drop` trait and Rust’s ownership system, you +don’t have to remember to clean up after yourself, Rust takes care of it +automatically. + +We also don’t have to worry about accidentally cleaning up values still in use +because that would cause a compiler error: the ownership system that makes sure references are always valid will also make sure that `drop` only gets called -one time when the value is no longer being used. +once when the value is no longer being used. Now that we’ve gone over `Box` and some of the characteristics of smart pointers, let’s talk about a few other smart pointers defined in the standard -library that add different kinds of useful functionality. +library. diff --git a/src/doc/book/second-edition/src/ch15-04-rc.md b/src/doc/book/second-edition/src/ch15-04-rc.md index fd32277b34..774cb05d46 100644 --- a/src/doc/book/second-edition/src/ch15-04-rc.md +++ b/src/doc/book/second-edition/src/ch15-04-rc.md @@ -1,50 +1,55 @@ ## `Rc`, the Reference Counted Smart Pointer -In the majority of cases, ownership is very clear: you know exactly which -variable owns a given value. However, this isn’t always the case; sometimes, -you may actually need multiple owners. For this, Rust has a type called -`Rc`. Its name is an abbreviation for *reference counting*. Reference -counting means keeping track of the number of references to a value in order to -know if a value is still in use or not. If there are zero references to a -value, we know we can clean up the value without any references becoming -invalid. - -To think about this in terms of a real-world scenario, it’s like a TV in a -family room. When one person comes in the room to watch TV, they turn it on. -Others can also come in the room and watch the TV. When the last person leaves -the room, they’ll turn the TV off since it’s no longer being used. If someone -turns off the TV while others are still watching it, though, the people -watching the TV would get mad! - -`Rc` is for use when we want to allocate some data on the heap for multiple +In the majority of cases, ownership is clear: you know exactly which variable +owns a given value. However, there are cases when a single value may have +multiple owners. For example, in graph data structures, multiple edges may +point to the same node, and that node is conceptually owned by all of the edges +that point to it. A node shouldn’t be cleaned up unless it doesn’t have any +edges pointing to it. + + + + +In order to enable multiple ownership, Rust has a type called `Rc`. Its name +is an abbreviation for reference counting. *Reference counting* means keeping +track of the number of references to a value in order to know if a value is +still in use or not. If there are zero references to a value, the value can be +cleaned up without any references becoming invalid. + +Imagine it like a TV in a family room. When one person enters to watch TV, they +turn it on. Others can come into the room and watch the TV. When the last +person leaves the room, they turn the TV off because it’s no longer being used. +If someone turns the TV off while others are still watching it, there’d be +uproar from the remaining TV watchers! + +`Rc` is used when we want to allocate some data on the heap for multiple parts of our program to read, and we can’t determine at compile time which part -of our program using this data will finish using it last. If we knew which part -would finish last, we could make that part the owner of the data and the normal -ownership rules enforced at compile time would kick in. +will finish using the data last. If we did know which part would finish last, +we could just make that the owner of the data and the normal ownership rules +enforced at compile time would kick in. -Note that `Rc` is only for use in single-threaded scenarios; the next -chapter on concurrency will cover how to do reference counting in -multithreaded programs. If you try to use `Rc` with multiple threads, -you’ll get a compile-time error. +Note that `Rc` is only for use in single-threaded scenarios; Chapter 16 on +concurrency will cover how to do reference counting in multithreaded programs. ### Using `Rc` to Share Data -Let’s return to our cons list example from Listing 15-5. In Listing 15-11, we’re -going to try to use `List` as we defined it using `Box`. First we’ll create -one list instance that contains 5 and then 10. Next, we want to create two more -lists: one that starts with 3 and continues on to our first list containing 5 -and 10, then another list that starts with 4 and *also* continues on to our -first list containing 5 and 10. In other words, we want two lists that both -share ownership of the third list, which conceptually will be something like -Figure 15-10: +Let’s return to our cons list example from Listing 15-6, as we defined it using +`Box`. This time, we want to create two lists that both share ownership of a +third list, which conceptually will look something like Figure 15-11: Two lists that share ownership of a third list -Figure 15-10: Two lists, `b` and `c`, sharing ownership +Figure 15-11: Two lists, `b` and `c`, sharing ownership of a third list, `a` +We’ll create list `a` that contains 5 and then 10, then make two more lists: +`b` that starts with 3 and `c` that starts with 4. Both `b` and `c` lists will +then continue on to the first `a` list containing 5 and 10. In other words, +both lists will try to share the first list containing 5 and 10. + Trying to implement this using our definition of `List` with `Box` won’t -work, as shown in Listing 15-11: +work, as shown in Listing 15-12: Filename: src/main.rs @@ -65,8 +70,8 @@ fn main() { } ``` -Listing 15-11: Having two lists using `Box` that try -to share ownership of a third list won’t work +Listing 15-12: Demonstrating we’re not allowed to have +two lists using `Box` that try to share ownership of a third list If we compile this, we get this error: @@ -83,17 +88,32 @@ error[E0382]: use of moved value: `a` implement the `Copy` trait ``` -The `Cons` variants own the data they hold, so when we create the `b` list it -moves `a` to be owned by `b`. Then when we try to use `a` again when creating -`c`, we’re not allowed to since `a` has been moved. +The `Cons` variants own the data they hold, so when we create the `b` list, `a` +is moved into `b` and `b` owns `a`. Then, when we try to use `a` again when +creating `c`, we’re not allowed to because `a` has been moved. We could change the definition of `Cons` to hold references instead, but then -we’d have to specify lifetime parameters and we’d have to construct elements of -a list such that every element lives at least as long as the list itself. -Otherwise, the borrow checker won’t even let us compile the code. - -Instead, we can change our definition of `List` to use `Rc` instead of -`Box` as shown here in Listing 15-12: +we’d have to specify lifetime parameters. By specifying lifetime parameters, +we’d be specifying that every element in the list will live at least as long as +the list itself. The borrow checker wouldn’t let us compile `let a = Cons(10, +&Nil);` for example, since the temporary `Nil` value would be dropped before +`a` could take a reference to it. + +Instead, we’ll change our definition of `List` to use `Rc` in place of +`Box` as shown here in Listing 15-13. Each `Cons` variant now holds a value +and an `Rc` pointing to a `List`. When we create `b`, instead of taking +ownership of `a`, we clone the `Rc` that `a` is holding, which increases the +number of references from 1 to 2 and lets `a` and `b` share ownership of the +data in that `Rc`. We also clone `a` when creating `c`, which increases the +number of references from 2 to 3. Every time we call `Rc::clone`, the reference +count to the data within the `Rc` is increased, and the data won’t be cleaned +up unless there are zero references to it: + + + Filename: src/main.rs @@ -108,29 +128,55 @@ use std::rc::Rc; fn main() { let a = Rc::new(Cons(5, Rc::new(Cons(10, Rc::new(Nil))))); - let b = Cons(3, a.clone()); - let c = Cons(4, a.clone()); + let b = Cons(3, Rc::clone(&a)); + let c = Cons(4, Rc::clone(&a)); } ``` -Listing 15-12: A definition of `List` that uses +Listing 15-13: A definition of `List` that uses `Rc` -Note that we need to add a `use` statement for `Rc` because it’s not in the -prelude. In `main`, we create the list holding 5 and 10 and store it in a new -`Rc` in `a`. Then when we create `b` and `c`, we call the `clone` method on `a`. +We need to add a `use` statement to bring `Rc` into scope because it’s not in +the prelude. In `main`, we create the list holding 5 and 10 and store it in a +new `Rc` in `a`. Then when we create `b` and `c`, we call the `Rc::clone` +function and pass a reference to the `Rc` in `a` as an argument. + +We could have called `a.clone()` rather than `Rc::clone(&a)`, but Rust +convention is to use `Rc::clone` in this case. The implementation of `Rc::clone` +doesn’t make a deep copy of all the data like most types’ implementations of +`clone` do. `Rc::clone` only increments the reference count, which doesn’t take +very much time. Deep copies of data can take a lot of time, so by using +`Rc::clone` for reference counting, we can visually distinguish between the +deep copy kinds of clones that might have a large impact on runtime performance +and memory usage and the types of clones that increase the reference count that +have a comparatively small impact on runtime performance and don’t allocate new +memory. ### Cloning an `Rc` Increases the Reference Count -We’ve seen the `clone` method previously, where we used it for making a -complete copy of some data. With `Rc`, though, it doesn’t make a full copy. -`Rc` holds a *reference count*, that is, a count of how many clones exist. -Let’s change `main` as shown in Listing 15-13 to have an inner scope around -where we create `c`, and to print out the results of the `Rc::strong_count` -associated function at various points. `Rc::strong_count` returns the reference -count of the `Rc` value we pass to it, and we’ll talk about why this function -is named `strong_count` in the section later in this chapter about preventing -reference cycles. +Let’s change our working example from Listing 15-13 so that we can see the +reference counts changing as we create and drop references to the `Rc` in `a`. + + + + +In Listing 15-14, we’ll change `main` so that it has an inner scope around list +`c`, so that we can see how the reference count changes when `c` goes out of +scope. At each point in the program where the reference count changes, we’ll +print out the reference count, which we can get by calling the +`Rc::strong_count` function. We’ll talk about why this function is named +`strong_count` rather than `count` in the section later in this chapter about +preventing reference cycles. + + + Filename: src/main.rs @@ -145,43 +191,50 @@ reference cycles. # fn main() { let a = Rc::new(Cons(5, Rc::new(Cons(10, Rc::new(Nil))))); - println!("rc = {}", Rc::strong_count(&a)); - let b = Cons(3, a.clone()); - println!("rc after creating b = {}", Rc::strong_count(&a)); + println!("count after creating a = {}", Rc::strong_count(&a)); + let b = Cons(3, Rc::clone(&a)); + println!("count after creating b = {}", Rc::strong_count(&a)); { - let c = Cons(4, a.clone()); - println!("rc after creating c = {}", Rc::strong_count(&a)); + let c = Cons(4, Rc::clone(&a)); + println!("count after creating c = {}", Rc::strong_count(&a)); } - println!("rc after c goes out of scope = {}", Rc::strong_count(&a)); + println!("count after c goes out of scope = {}", Rc::strong_count(&a)); } ``` -Listing 15-13: Printing out the reference count +Listing 15-14: Printing out the reference count This will print out: ```text -rc = 1 -rc after creating b = 2 -rc after creating c = 3 -rc after c goes out of scope = 2 +count after creating a = 1 +count after creating b = 2 +count after creating c = 3 +count after c goes out of scope = 2 ``` -We’re able to see that `a` has an initial reference count of one. Then each -time we call `clone`, the count goes up by one. When `c` goes out of scope, the -count is decreased by one, which happens in the implementation of the `Drop` -trait for `Rc`. What we can’t see in this example is that when `b` and then -`a` go out of scope at the end of `main`, the count of references to the list -containing 5 and 10 is then 0, and the list is dropped. This strategy lets us -have multiple owners, as the count will ensure that the value remains valid as -long as any of the owners still exist. - -In the beginning of this section, we said that `Rc` only allows you to share -data for multiple parts of your program to read through immutable references to -the `T` value the `Rc` contains. If `Rc` let us have a mutable reference, -we’d run into the problem that the borrowing rules disallow that we discussed -in Chapter 4: two mutable borrows to the same place can cause data races and -inconsistencies. But mutating data is very useful! In the next section, we’ll -discuss the interior mutability pattern and the `RefCell` type that we can -use in conjunction with an `Rc` to work with this restriction on -immutability. + + + +We’re able to see that the `Rc` in `a` has an initial reference count of one, +then each time we call `clone`, the count goes up by one. When `c` goes out of +scope, the count goes down by one. We don’t have to call a function to decrease +the reference count like we have to call `Rc::clone` to increase the reference +count; the implementation of the `Drop` trait decreases the reference count +automatically when an `Rc` value goes out of scope. + +What we can’t see from this example is that when `b` and then `a` go out of +scope at the end of `main`, the count is then 0, and the `Rc` is cleaned up +completely at that point. Using `Rc` allows a single value to have multiple +owners, and the count will ensure that the value remains valid as long as any +of the owners still exist. + +`Rc` allows us to share data between multiple parts of our program for +reading only, via immutable references. If `Rc` allowed us to have multiple +mutable references too, we’d be able to violate one of the the borrowing rules +that we discussed in Chapter 4: multiple mutable borrows to the same place can +cause data races and inconsistencies. But being able to mutate data is very +useful! In the next section, we’ll discuss the interior mutability pattern and +the `RefCell` type that we can use in conjunction with an `Rc` to work +with this restriction on immutability. diff --git a/src/doc/book/second-edition/src/ch15-05-interior-mutability.md b/src/doc/book/second-edition/src/ch15-05-interior-mutability.md index 4e2b6c2853..6758ae12a1 100644 --- a/src/doc/book/second-edition/src/ch15-05-interior-mutability.md +++ b/src/doc/book/second-edition/src/ch15-05-interior-mutability.md @@ -1,22 +1,36 @@ ## `RefCell` and the Interior Mutability Pattern + + + + + + *Interior mutability* is a design pattern in Rust for allowing you to mutate -data even though there are immutable references to that data, which would -normally be disallowed by the borrowing rules. The interior mutability pattern -involves using `unsafe` code inside a data structure to bend Rust’s usual rules -around mutation and borrowing. We haven’t yet covered unsafe code; we will in -Chapter 19. The interior mutability pattern is used when you can ensure that -the borrowing rules will be followed at runtime, even though the compiler can’t +data even when there are immutable references to that data, normally disallowed +by the borrowing rules. To do so, the pattern uses `unsafe` code inside a data +structure to bend Rust’s usual rules around mutation and borrowing. We haven’t +yet covered unsafe code; we will in Chapter 19. We can choose to use types that +make use of the interior mutability pattern when we can ensure that the +borrowing rules will be followed at runtime, even though the compiler can’t ensure that. The `unsafe` code involved is then wrapped in a safe API, and the outer type is still immutable. Let’s explore this by looking at the `RefCell` type that follows the interior mutability pattern. -### `RefCell` has Interior Mutability +### Enforcing Borrowing Rules at Runtime with `RefCell` Unlike `Rc`, the `RefCell` type represents single ownership over the data -that it holds. So, what makes `RefCell` different than a type like `Box`? +it holds. So, what makes `RefCell` different than a type like `Box`? Let’s recall the borrowing rules we learned in Chapter 4: 1. At any given time, you can have *either* but not both of: @@ -29,163 +43,405 @@ compile time. With `RefCell`, these invariants are enforced *at runtime*. With references, if you break these rules, you’ll get a compiler error. With `RefCell`, if you break these rules, you’ll get a `panic!`. -Static analysis, like the Rust compiler performs, is inherently conservative. -There are properties of code that are impossible to detect by analyzing the -code: the most famous is the Halting Problem, which is out of scope of this -book but an interesting topic to research if you’re interested. - -Because some analysis is impossible, the Rust compiler does not try to even -guess if it can’t be sure, so it’s conservative and sometimes rejects correct -programs that would not actually violate Rust’s guarantees. Put another way, if -Rust accepts an incorrect program, people would not be able to trust in the -guarantees Rust makes. If Rust rejects a correct program, the programmer will -be inconvenienced, but nothing catastrophic can occur. `RefCell` is useful -when you know that the borrowing rules are respected, but the compiler can’t -understand that that’s true. - -Similarly to `Rc`, `RefCell` is only for use in single-threaded -scenarios. We’ll talk about how to get the functionality of `RefCell` in a -multithreaded program in the next chapter on concurrency. For now, all you -need to know is that if you try to use `RefCell` in a multithreaded -context, you’ll get a compile time error. - -With references, we use the `&` and `&mut` syntax to create references and -mutable references, respectively. But with `RefCell`, we use the `borrow` -and `borrow_mut` methods, which are part of the safe API that `RefCell` has. -`borrow` returns the smart pointer type `Ref`, and `borrow_mut` returns the -smart pointer type `RefMut`. These two types implement `Deref` so that we can -treat them as if they’re regular references. `Ref` and `RefMut` track the -borrows dynamically, and their implementation of `Drop` releases the borrow -dynamically. - -Listing 15-14 shows what it looks like to use `RefCell` with functions that -borrow their parameters immutably and mutably. Note that the `data` variable is -declared as immutable with `let data` rather than `let mut data`, yet -`a_fn_that_mutably_borrows` is allowed to borrow the data mutably and make -changes to the data! + + + +The advantages to checking the borrowing rules at compile time are that errors +will be caught sooner in the development process and there is no impact on +runtime performance since all the analysis is completed beforehand. For those +reasons, checking the borrowing rules at compile time is the best choice for +the majority of cases, which is why this is Rust’s default. + +The advantage to checking the borrowing rules at runtime instead is that +certain memory safe scenarios are then allowed, whereas they are disallowed by +the compile time checks. Static analysis, like the Rust compiler, is inherently +conservative. Some properties of code are impossible to detect by analyzing the +code: the most famous example is the Halting Problem, which is out of scope of +this book but an interesting topic to research if you’re interested. + + + + +Because some analysis is impossible, if the Rust compiler can’t be sure the +code complies with the ownership rules, it may reject a correct program; in +this way, it is conservative. If Rust were to accept an incorrect program, +users would not be able to trust in the guarantees Rust makes. However, if Rust +rejects a correct program, the programmer will be inconvenienced, but nothing +catastrophic can occur. `RefCell` is useful when you yourself are sure that +your code follows the borrowing rules, but the compiler is not able to +understand and guarantee that. + +Similarly to `Rc`, `RefCell` is only for use in single-threaded scenarios +and will give you a compile time error if you try in a multithreaded context. +We’ll talk about how to get the functionality of `RefCell` in a +multithreaded program in Chapter 16. + + + + +To recap the reasons to choose `Box`, `Rc`, or `RefCell`: + +- `Rc` enables multiple owners of the same data; `Box` and `RefCell` + have single owners. +- `Box` allows immutable or mutable borrows checked at compile time; `Rc` + only allows immutable borrows checked at compile time; `RefCell` allows + immutable or mutable borrows checked at runtime. +- Because `RefCell` allows mutable borrows checked at runtime, we can mutate + the value inside the `RefCell` even when the `RefCell` is itself + immutable. + +The last reason is the *interior mutability* pattern. Let’s look at a case when +interior mutability is useful and discuss how this is possible. + +### Interior Mutability: A Mutable Borrow to an Immutable Value + +A consequence of the borrowing rules is that when we have an immutable value, +we can’t borrow it mutably. For example, this code won’t compile: -Filename: src/main.rs +```rust,ignore +fn main() { + let x = 5; + let y = &mut x; +} +``` -```rust -use std::cell::RefCell; +If we try to compile this, we’ll get this error: -fn a_fn_that_immutably_borrows(a: &i32) { - println!("a is {}", a); -} +```text +error[E0596]: cannot borrow immutable local variable `x` as mutable + --> src/main.rs:3:18 + | +2 | let x = 5; + | - consider changing this to `mut x` +3 | let y = &mut x; + | ^ cannot borrow mutably +``` + +However, there are situations where it would be useful for a value to be able +to mutate itself in its methods, but to other code, the value would appear to +be immutable. Code outside the value’s methods would not be able to mutate the +value. `RefCell` is one way to get the ability to have interior mutability. +`RefCell` isn’t getting around the borrowing rules completely, but the +borrow checker in the compiler allows this interior mutability and the +borrowing rules are checked at runtime instead. If we violate the rules, we’ll +get a `panic!` instead of a compiler error. + +Let’s work through a practical example where we can use `RefCell` to make it +possible to mutate an immutable value and see why that’s useful. + +#### A Use Case for Interior Mutability: Mock Objects + +A *test double* is the general programming concept for a type that stands in +the place of another type during testing. *Mock objects* are specific types of +test doubles that record what happens during a test so that we can assert that +the correct actions took place. + +While Rust doesn’t have objects in the exact same sense that other languages +have objects, and Rust doesn’t have mock object functionality built into the +standard library like some other languages do, we can definitely create a +struct that will serve the same purposes as a mock object. + +Here’s the scenario we’d like to test: we’re creating a library that tracks a +value against a maximum value, and sends messages based on how close to the +maximum value the current value is. This could be used for keeping track of a +user’s quota for the number of API calls they’re allowed to make, for example. + +Our library is only going to provide the functionality of tracking how close to +the maximum a value is, and what the messages should be at what times. +Applications that use our library will be expected to provide the actual +mechanism for sending the messages: the application could choose to put a +message in the application, send an email, send a text message, or something +else. Our library doesn’t need to know about that detail; all it needs is +something that implements a trait we’ll provide called `Messenger`. Listing +15-15 shows our library code: + +Filename: src/lib.rs -fn a_fn_that_mutably_borrows(b: &mut i32) { - *b += 1; +```rust +pub trait Messenger { + fn send(&self, msg: &str); } -fn demo(r: &RefCell) { - a_fn_that_immutably_borrows(&r.borrow()); - a_fn_that_mutably_borrows(&mut r.borrow_mut()); - a_fn_that_immutably_borrows(&r.borrow()); +pub struct LimitTracker<'a, T: 'a + Messenger> { + messenger: &'a T, + value: usize, + max: usize, } -fn main() { - let data = RefCell::new(5); - demo(&data); +impl<'a, T> LimitTracker<'a, T> + where T: Messenger { + pub fn new(messenger: &T, max: usize) -> LimitTracker { + LimitTracker { + messenger, + value: 0, + max, + } + } + + pub fn set_value(&mut self, value: usize) { + self.value = value; + + let percentage_of_max = self.value as f64 / self.max as f64; + + if percentage_of_max >= 0.75 && percentage_of_max < 0.9 { + self.messenger.send("Warning: You've used up over 75% of your quota!"); + } else if percentage_of_max >= 0.9 && percentage_of_max < 1.0 { + self.messenger.send("Urgent warning: You've used up over 90% of your quota!"); + } else if percentage_of_max >= 1.0 { + self.messenger.send("Error: You are over your quota!"); + } + } } ``` -Listing 15-14: Using `RefCell`, `borrow`, and -`borrow_mut` +Listing 15-15: A library to keep track of how close to a +maximum value a value is, and warn when the value is at certain levels + +One important part of this code is that the `Messenger` trait has one method, +`send`, that takes an immutable reference to `self` and text of the message. +This is the interface our mock object will need to have. The other important +part is that we want to test the behavior of the `set_value` method on the +`LimitTracker`. We can change what we pass in for the `value` parameter, but +`set_value` doesn’t return anything for us to make assertions on. What we want +to be able to say is that if we create a `LimitTracker` with something that +implements the `Messenger` trait and a particular value for `max`, when we pass +different numbers for `value`, the messenger gets told to send the appropriate +messages. + +What we need is a mock object that, instead of actually sending an email or +text message when we call `send`, will only keep track of the messages it’s +told to send. We can create a new instance of the mock object, create a +`LimitTracker` that uses the mock object, call the `set_value` method on +`LimitTracker`, then check that the mock object has the messages we expect. +Listing 15-16 shows an attempt of implementing a mock object to do just that, +but that the borrow checker won’t allow: + +Filename: src/lib.rs + +```rust +#[cfg(test)] +mod tests { + use super::*; + + struct MockMessenger { + sent_messages: Vec, + } + + impl MockMessenger { + fn new() -> MockMessenger { + MockMessenger { sent_messages: vec![] } + } + } + + impl Messenger for MockMessenger { + fn send(&self, message: &str) { + self.sent_messages.push(String::from(message)); + } + } + + #[test] + fn it_sends_an_over_75_percent_warning_message() { + let mock_messenger = MockMessenger::new(); + let mut limit_tracker = LimitTracker::new(&mock_messenger, 100); + + limit_tracker.set_value(80); + + assert_eq!(mock_messenger.sent_messages.len(), 1); + } +} +``` -This example prints: +Listing 15-16: An attempt to implement a `MockMessenger` +that isn’t allowed by the borrow checker + +This test code defines a `MockMessenger` struct that has a `sent_messages` +field with a `Vec` of `String` values to keep track of the messages it’s told +to send. We also defined an associated function `new` to make it convenient to +create new `MockMessenger` values that start with an empty list of messages. We +then implement the `Messenger` trait for `MockMessenger` so that we can give a +`MockMessenger` to a `LimitTracker`. In the definition of the `send` method, we +take the message passed in as a parameter and store it in the `MockMessenger` +list of `sent_messages`. + +In the test, we’re testing what happens when the `LimitTracker` is told to set +`value` to something that’s over 75% of the `max` value. First, we create a new +`MockMessenger`, which will start with an empty list of messages. Then we +create a new `LimitTracker` and give it a reference to the new `MockMessenger` +and a `max` value of 100. We call the `set_value` method on the `LimitTracker` +with a value of 80, which is more than 75% of 100. Then we assert that the list +of messages that the `MockMessenger` is keeping track of should now have one +message in it. + +There’s one problem with this test, however: ```text -a is 5 -a is 6 +error[E0596]: cannot borrow immutable field `self.sent_messages` as mutable + --> src/lib.rs:46:13 + | +45 | fn send(&self, message: &str) { + | ----- use `&mut self` here to make mutable +46 | self.sent_messages.push(String::from(message)); + | ^^^^^^^^^^^^^^^^^^ cannot mutably borrow immutable field ``` -In `main`, we’ve created a new `RefCell` containing the value 5, and stored -in the variable `data`, declared without the `mut` keyword. We then call the -`demo` function with an immutable reference to `data`: as far as `main` is -concerned, `data` is immutable! +We can’t modify the `MockMessenger` to keep track of the messages because the +`send` method takes an immutable reference to `self`. We also can’t take the +suggestion from the error text to use `&mut self` instead because then the +signature of `send` wouldn’t match the signature in the `Messenger` trait +definition (feel free to try and see what error message you get). -In the `demo` function, we get an immutable reference to the value inside the -`RefCell` by calling the `borrow` method, and we call -`a_fn_that_immutably_borrows` with that immutable reference. More -interestingly, we can get a *mutable* reference to the value inside the -`RefCell` with the `borrow_mut` method, and the function -`a_fn_that_mutably_borrows` is allowed to change the value. We can see that the -next time we call `a_fn_that_immutably_borrows` that prints out the value, it’s -6 instead of 5. +This is where interior mutability can help! We’re going to store the +`sent_messages` within a `RefCell`, and then the `send` message will be able to +modify `sent_messages` to store the messages we’ve seen. Listing 15-17 shows +what that looks like: -### Borrowing Rules are Checked at Runtime on `RefCell` +Filename: src/lib.rs -Recall from Chapter 4 that because of the borrowing rules, this code using -regular references that tries to create two mutable borrows in the same scope -won’t compile: +```rust +#[cfg(test)] +mod tests { + use super::*; + use std::cell::RefCell; + + struct MockMessenger { + sent_messages: RefCell>, + } + + impl MockMessenger { + fn new() -> MockMessenger { + MockMessenger { sent_messages: RefCell::new(vec![]) } + } + } + + impl Messenger for MockMessenger { + fn send(&self, message: &str) { + self.sent_messages.borrow_mut().push(String::from(message)); + } + } + + #[test] + fn it_sends_an_over_75_percent_warning_message() { + // ...snip... +# let mock_messenger = MockMessenger::new(); +# let mut limit_tracker = LimitTracker::new(&mock_messenger, 100); +# limit_tracker.set_value(75); + + assert_eq!(mock_messenger.sent_messages.borrow().len(), 1); + } +} +``` -```rust,ignore -let mut s = String::from("hello"); +Listing 15-17: Using `RefCell` to be able to mutate an +inner value while the outer value is considered immutable -let r1 = &mut s; -let r2 = &mut s; -``` +The `sent_messages` field is now of type `RefCell>` instead of +`Vec`. In the `new` function, we create a new `RefCell` instance around +the empty vector. -We’ll get this compiler error: +For the implementation of the `send` method, the first parameter is still an +immutable borrow of `self`, which matches the trait definition. We call +`borrow_mut` on the `RefCell` in `self.sent_messages` to get a mutable +reference to the value inside the `RefCell`, which is the vector. Then we can +call `push` on the mutable reference to the vector in order to keep track of +the messages seen during the test. -```text -error[E0499]: cannot borrow `s` as mutable more than once at a time - --> - | -5 | let r1 = &mut s; - | - first mutable borrow occurs here -6 | let r2 = &mut s; - | ^ second mutable borrow occurs here -7 | } - | - first borrow ends here -``` +The last change we have to make is in the assertion: in order to see how many +items are in the inner vector, we call `borrow` on the `RefCell` to get an +immutable reference to the vector. -In contrast, using `RefCell` and calling `borrow_mut` twice in the same -scope *will* compile, but it’ll panic at runtime instead. This code: +Now that we’ve seen how to use `RefCell`, let’s dig into how it works! -```rust,should_panic -use std::cell::RefCell; +#### `RefCell` Keeps Track of Borrows at Runtime -fn main() { - let s = RefCell::new(String::from("hello")); +When creating immutable and mutable references we use the `&` and `&mut` +syntax, respectively. With `RefCell`, we use the `borrow` and `borrow_mut` +methods, which are part of the safe API that belongs to `RefCell`. The +`borrow` method returns the smart pointer type `Ref`, and `borrow_mut` returns +the smart pointer type `RefMut`. Both types implement `Deref` so we can treat +them like regular references. + + + + +The `RefCell` keeps track of how many `Ref` and `RefMut` smart pointers are +currently active. Every time we call `borrow`, the `RefCell` increases its +count of how many immutable borrows are active. When a `Ref` value goes out of +scope, the count of immutable borrows goes down by one. Just like the compile +time borrowing rules, `RefCell` lets us have many immutable borrows or one +mutable borrow at any point in time. + +If we try to violate these rules, rather than getting a compiler error like we +would with references, the implementation of `RefCell` will `panic!` at +runtime. Listing 15-18 shows a modification to the implementation of `send` +from Listing 15-17 where we’re deliberately trying to create two mutable +borrows active for the same scope in order to illustrate that `RefCell` +prevents us from doing this at runtime: + +Filename: src/lib.rs - let r1 = s.borrow_mut(); - let r2 = s.borrow_mut(); +```rust,ignore +impl Messenger for MockMessenger { + fn send(&self, message: &str) { + let mut one_borrow = self.sent_messages.borrow_mut(); + let mut two_borrow = self.sent_messages.borrow_mut(); + + one_borrow.push(String::from(message)); + two_borrow.push(String::from(message)); + } } ``` -compiles but panics with the following error when we `cargo run`: +Listing 15-18: Creating two mutable references in the +same scope to see that `RefCell` will panic + +We create a variable `one_borrow` for the `RefMut` smart pointer returned from +`borrow_mut`. Then we create another mutable borrow in the same way in the +variable `two_borrow`. This makes two mutable references in the same scope, +which isn’t allowed. If we run the tests for our library, this code will +compile without any errors, but the test will fail: ```text - Finished dev [unoptimized + debuginfo] target(s) in 0.83 secs - Running `target/debug/refcell` -thread 'main' panicked at 'already borrowed: BorrowMutError', -/stable-dist-rustc/build/src/libcore/result.rs:868 +---- tests::it_sends_an_over_75_percent_warning_message stdout ---- + thread 'tests::it_sends_an_over_75_percent_warning_message' panicked at + 'already borrowed: BorrowMutError', src/libcore/result.rs:906:4 note: Run with `RUST_BACKTRACE=1` for a backtrace. ``` -This runtime `BorrowMutError` is similar to the compiler error: it says we’ve -already borrowed `s` mutably once, so we’re not allowed to borrow it again. We -aren’t getting around the borrowing rules, we’re just choosing to have Rust -enforce them at runtime instead of compile time. You could choose to use -`RefCell` everywhere all the time, but in addition to having to type -`RefCell` a lot, you’d find out about possible problems later (possibly in -production rather than during development). Also, checking the borrowing rules -while your program is running has a performance penalty. - -### Multiple Owners of Mutable Data by Combining `Rc` and `RefCell` - -So why would we choose to make the tradeoffs that using `RefCell` involves? -Well, remember when we said that `Rc` only lets you have an immutable -reference to `T`? Given that `RefCell` is immutable, but has interior -mutability, we can combine `Rc` and `RefCell` to get a type that’s both -reference counted and mutable. Listing 15-15 shows an example of how to do -that, again going back to our cons list from Listing 15-5. In this example, -instead of storing `i32` values in the cons list, we’ll be storing -`Rc>` values. We want to store that type so that we can have an -owner of the value that’s not part of the list (the multiple owners -functionality that `Rc` provides), and so we can mutate the inner `i32` -value (the interior mutability functionality that `RefCell` provides): +We can see that the code panicked with the message `already borrowed: +BorrowMutError`. This is how `RefCell` handles violations of the borrowing +rules at runtime. + +Catching borrowing errors at runtime rather than compile time means that we’d +find out that we made a mistake in our code later in the development process-- +and possibly not even until our code was deployed to production. There’s also a +small runtime performance penalty our code will incur as a result of keeping +track of the borrows at runtime rather than compile time. However, using +`RefCell` made it possible for us to write a mock object that can modify itself +to keep track of the messages it has seen while we’re using it in a context +where only immutable values are allowed. We can choose to use `RefCell` +despite its tradeoffs to get more abilities than regular references give us. + +### Having Multiple Owners of Mutable Data by Combining `Rc` and `RefCell` + +A common way to use `RefCell` is in combination with `Rc`. Recall that +`Rc` lets us have multiple owners of some data, but it only gives us +immutable access to that data. If we have an `Rc` that holds a `RefCell`, +then we can get a value that can have multiple owners *and* that we can mutate! + + + + +For example, recall the cons list example from Listing 15-13 where we used +`Rc` to let us have multiple lists share ownership of another list. Because +`Rc` only holds immutable values, we aren’t able to change any of the values +in the list once we’ve created them. Let’s add in `RefCell` to get the +ability to change the values in the lists. Listing 15-19 shows that by using a +`RefCell` in the `Cons` definition, we’re allowed to modify the value stored +in all the lists: Filename: src/main.rs @@ -203,54 +459,62 @@ use std::cell::RefCell; fn main() { let value = Rc::new(RefCell::new(5)); - let a = Cons(value.clone(), Rc::new(Nil)); - let shared_list = Rc::new(a); + let a = Rc::new(Cons(Rc::clone(&value), Rc::new(Nil))); - let b = Cons(Rc::new(RefCell::new(6)), shared_list.clone()); - let c = Cons(Rc::new(RefCell::new(10)), shared_list.clone()); + let b = Cons(Rc::new(RefCell::new(6)), Rc::clone(&a)); + let c = Cons(Rc::new(RefCell::new(10)), Rc::clone(&a)); *value.borrow_mut() += 10; - println!("shared_list after = {:?}", shared_list); + println!("a after = {:?}", a); println!("b after = {:?}", b); println!("c after = {:?}", c); } ``` -Listing 15-15: Using `Rc>` to create a +Listing 15-19: Using `Rc>` to create a `List` that we can mutate -We’re creating a value, which is an instance of `Rc>`. We’re -storing it in a variable named `value` because we want to be able to access it -directly later. Then we create a `List` in `a` that has a `Cons` variant that -holds `value`, and `value` needs to be cloned since we want `value` to also -have ownership in addition to `a`. Then we wrap `a` in an `Rc` so that we -can create lists `b` and `c` that start differently but both refer to `a`, -similarly to what we did in Listing 15-12. +We create a value that’s an instance of `Rc` and store it in a +variable named `value` so we can access it directly later. Then we create a +`List` in `a` with a `Cons` variant that holds `value`. We need to clone +`value` so that both `a` and `value` have ownership of the inner `5` value, +rather than transferring ownership from `value` to `a` or having `a` borrow +from `value`. + + + + +We wrap the list `a` in an `Rc` so that when we create lists `b` and +`c`, they can both refer to `a`, the same as we did in Listing 15-13. -Once we have the lists in `shared_list`, `b`, and `c` created, then we add 10 -to the 5 in `value` by dereferencing the `Rc` and calling `borrow_mut` on -the `RefCell`. +Once we have the lists in `a`, `b`, and `c` created, we add 10 to the value in +`value`. We do this by calling `borrow_mut` on `value`, which uses the +automatic dereferencing feature we discussed in Chapter 5 (“Where’s the `->` +Operator?”) to dereference the `Rc` to the inner `RefCell` value. The +`borrow_mut` method returns a `RefMut` smart pointer, and we use the +dereference operator on it and change the inner value. -When we print out `shared_list`, `b`, and `c`, we can see that they all have -the modified value of 15: +When we print out `a`, `b`, and `c`, we can see that they all have the modified +value of 15 rather than 5: ```text -shared_list after = Cons(RefCell { value: 15 }, Nil) +a after = Cons(RefCell { value: 15 }, Nil) b after = Cons(RefCell { value: 6 }, Cons(RefCell { value: 15 }, Nil)) c after = Cons(RefCell { value: 10 }, Cons(RefCell { value: 15 }, Nil)) ``` -This is pretty neat! By using `RefCell`, we can have an outwardly immutable +This is pretty neat! By using `RefCell`, we have an outwardly immutable `List`, but we can use the methods on `RefCell` that provide access to its -interior mutability to be able to modify our data when we need to. The runtime -checks of the borrowing rules that `RefCell` does protect us from data -races, and we’ve decided that we want to trade a bit of speed for the -flexibility in our data structures. - -`RefCell` is not the only standard library type that provides interior -mutability. `Cell` is similar but instead of giving references to the inner -value like `RefCell` does, the value is copied in and out of the `Cell`. -`Mutex` offers interior mutability that is safe to use across threads, and -we’ll be discussing its use in the next chapter on concurrency. Check out the -standard library docs for more details on the differences between these types. +interior mutability so we can modify our data when we need to. The runtime +checks of the borrowing rules protect us from data races, and it’s sometimes +worth trading a bit of speed for this flexibility in our data structures. + +The standard library has other types that provide interior mutability, too, +like `Cell`, which is similar except that instead of giving references to +the inner value, the value is copied in and out of the `Cell`. There’s also +`Mutex`, which offers interior mutability that’s safe to use across threads, +and we’ll be discussing its use in the next chapter on concurrency. Check out +the standard library docs for more details on the differences between these +types. diff --git a/src/doc/book/second-edition/src/ch15-06-reference-cycles.md b/src/doc/book/second-edition/src/ch15-06-reference-cycles.md index df52d48c94..de183aef0d 100644 --- a/src/doc/book/second-edition/src/ch15-06-reference-cycles.md +++ b/src/doc/book/second-edition/src/ch15-06-reference-cycles.md @@ -1,29 +1,27 @@ -## Creating Reference Cycles and Leaking Memory is Safe - -Rust makes a number of guarantees that we’ve talked about, for example that -we’ll never have a null value, and data races will be disallowed at compile -time. Rust’s memory safety guarantees make it more difficult to create memory -that never gets cleaned up, which is known as a *memory leak*. Rust does not -make memory leaks *impossible*, however: preventing memory leaks is *not* one -of Rust’s guarantees. In other words, memory leaks are memory safe. - -By using `Rc` and `RefCell`, it is possible to create cycles of -references where items refer to each other in a cycle. This is bad because the -reference count of each item in the cycle will never reach 0, and the values -will never be dropped. Let’s take a look at how that might happen and how to -prevent it. - -In Listing 15-16, we’re going to use another variation of the `List` definition -from Listing 15-5. We’re going back to storing an `i32` value as the first -element in the `Cons` variant. The second element in the `Cons` variant is now -`RefCell>`: instead of being able to modify the `i32` value this time, -we want to be able to modify which `List` a `Cons` variant is pointing to. -We’ve also added a `tail` method to make it convenient for us to access the -second item, if we have a `Cons` variant: +## Reference Cycles Can Leak Memory + +Rust’s memory safety guarantees make it *difficult* to accidentally create +memory that’s never cleaned up, known as a *memory leak*, but not impossible. +Entirely preventing memory leaks is not one of Rust’s guarantees in the same +way that disallowing data races at compile time is, meaning memory leaks are +memory safe in Rust. We can see this with `Rc` and `RefCell`: it’s +possible to create references where items refer to each other in a cycle. This +creates memory leaks because the reference count of each item in the cycle will +never reach 0, and the values will never be dropped. + +### Creating a Reference Cycle + +Let’s take a look at how a reference cycle might happen and how to prevent it, +starting with the definition of the `List` enum and a `tail` method in Listing +15-20: Filename: src/main.rs ```rust,ignore +use std::rc::Rc; +use std::cell::RefCell; +use List::{Cons, Nil}; + #[derive(Debug)] enum List { Cons(i32, RefCell>), @@ -40,18 +38,42 @@ impl List { } ``` -Listing 15-16: A cons list definition that holds a +Listing 15-20: A cons list definition that holds a `RefCell` so that we can modify what a `Cons` variant is referring to -Next, in Listing 15-17, we’re going to create a `List` value in the variable -`a` that initially is a list of `5, Nil`. Then we’ll create a `List` value in -the variable `b` that is a list of the value 10 and then points to the list in -`a`. Finally, we’ll modify `a` so that it points to `b` instead of `Nil`, which -will then create a cycle: +We’re using another variation of the `List` definition from Listing 15-6. The +second element in the `Cons` variant is now `RefCell>`, meaning that +instead of having the ability to modify the `i32` value like we did in Listing +15-19, we want to be able to modify which `List` a `Cons` variant is pointing +to. We’ve also added a `tail` method to make it convenient for us to access the +second item, if we have a `Cons` variant. + + + + +In listing 15-21, we’re adding a `main` function that uses the definitions from +Listing 15-20. This code creates a list in `a`, a list in `b` that points to +the list in `a`, and then modifies the list in `a` to point to `b`, which +creates a reference cycle. There are `println!` statements along the way to +show what the reference counts are at various points in this process. + + + Filename: src/main.rs ```rust +# use List::{Cons, Nil}; +# use std::rc::Rc; +# use std::cell::RefCell; # #[derive(Debug)] # enum List { # Cons(i32, RefCell>), @@ -67,25 +89,20 @@ will then create a cycle: # } # } # -use List::{Cons, Nil}; -use std::rc::Rc; -use std::cell::RefCell; - fn main() { - let a = Rc::new(Cons(5, RefCell::new(Rc::new(Nil)))); println!("a initial rc count = {}", Rc::strong_count(&a)); println!("a next item = {:?}", a.tail()); - let b = Rc::new(Cons(10, RefCell::new(a.clone()))); + let b = Rc::new(Cons(10, RefCell::new(Rc::clone(&a)))); println!("a rc count after b creation = {}", Rc::strong_count(&a)); println!("b initial rc count = {}", Rc::strong_count(&b)); println!("b next item = {:?}", b.tail()); if let Some(ref link) = a.tail() { - *link.borrow_mut() = b.clone(); + *link.borrow_mut() = Rc::clone(&b); } println!("b rc count after changing a = {}", Rc::strong_count(&b)); @@ -97,73 +114,154 @@ fn main() { } ``` -Listing 15-17: Creating a reference cycle of two `List` +Listing 15-21: Creating a reference cycle of two `List` values pointing to each other -We use the `tail` method to get a reference to the `RefCell` in `a`, which we -put in the variable `link`. Then we use the `borrow_mut` method on the -`RefCell` to change the value inside from an `Rc` that holds a `Nil` value to -the `Rc` in `b`. We’ve created a reference cycle that looks like Figure 15-18: +We create an `Rc` instance holding a `List` value in the variable `a` with an +initial list of `5, Nil`. We then create an `Rc` instance holding another +`List` value in the variable `b` that contains the value 10, then points to the +list in `a`. -Reference cycle of lists +Finally, we modify `a` so that it points to `b` instead of `Nil`, which creates +a cycle. We do that by using the `tail` method to get a reference to the +`RefCell` in `a`, which we put in the variable `link`. Then we use the +`borrow_mut` method on the `RefCell` to change the value inside from an `Rc` +that holds a `Nil` value to the `Rc` in `b`. -Figure 15-18: A reference cycle of lists `a` and `b` +If we run this code, keeping the last `println!` commented out for the moment, +we’ll get this output: + +```text +a initial rc count = 1 +a next item = Some(RefCell { value: Nil }) +a rc count after b creation = 2 +b initial rc count = 1 +b next item = Some(RefCell { value: Cons(5, RefCell { value: Nil }) }) +b rc count after changing a = 2 +a rc count after changing a = 2 +``` + +We can see that the reference count of the `Rc` instances in both `a` and `b` +are 2 after we change the list in `a` to point to `b`. At the end of `main`, +Rust will try and drop `b` first, which will decrease the count in each of the +`Rc` instances in `a` and `b` by one. + + + + + + + +However, because `a` is still referencing the `Rc` that was in `b`, that `Rc` +has a count of 1 rather than 0, so the memory the `Rc` has on the heap won’t be +dropped. The memory will just sit there with a count of one, forever. + +To visualize this, we’ve created a reference cycle that looks like Figure 15-22: + +Reference cycle of lists + +Figure 15-22: A reference cycle of lists `a` and `b` pointing to each other -If you uncomment the last `println!`, Rust will try and print this cycle out -with `a` pointing to `b` pointing to `a` and so forth until it overflows the -stack. - -Looking at the results of the `println!` calls before the last one, we’ll see -that the reference count of both `a` and `b` are 2 after we change `a` to point -to `b`. At the end of `main`, Rust will try and drop `b` first, which will -decrease the count of the `Rc` by one. However, because `a` is still -referencing that `Rc`, its count is 1 rather than 0, so the memory the `Rc` has -on the heap won’t be dropped. It’ll just sit there with a count of one, -forever. In this specific case, the program ends right away, so it’s not a -problem, but in a more complex program that allocates lots of memory in a cycle -and holds onto it for a long time, this would be a problem. The program would -be using more memory than it needs to be, and might overwhelm the system and -cause it to run out of memory available to use. - -Now, as you can see, creating reference cycles is difficult and inconvenient in -Rust. But it’s not impossible: preventing memory leaks in the form of reference -cycles is not one of the guarantees Rust makes. If you have `RefCell` values -that contain `Rc` values or similar nested combinations of types with -interior mutability and reference counting, be aware that you’ll have to ensure -that you don’t create cycles. In the example in Listing 15-14, the solution -would probably be to not write code that could create cycles like this, since -we do want `Cons` variants to own the list they point to. - -With data structures like graphs, it’s sometimes necessary to have references -that create cycles in order to have parent nodes point to their children and -children nodes point back in the opposite direction to their parents, for -example. If one of the directions is expressing ownership and the other isn’t, -one way of being able to model the relationship of the data without creating -reference cycles and memory leaks is using `Weak`. Let’s explore that next! - -### Prevent Reference Cycles: Turn an `Rc` into a `Weak` - -The Rust standard library provides `Weak`, a smart pointer type for use in -situations that have cycles of references but only one direction expresses -ownership. We’ve been showing how cloning an `Rc` increases the -`strong_count` of references; `Weak` is a way to reference an `Rc` that -does not increment the `strong_count`: instead it increments the `weak_count` -of references to an `Rc`. When an `Rc` goes out of scope, the inner value will -get dropped if the `strong_count` is 0, even if the `weak_count` is not 0. To -be able to get the value from a `Weak`, we first have to upgrade it to an -`Option>` by using the `upgrade` method. The result of upgrading a -`Weak` will be `Some` if the `Rc` value has not been dropped yet, and `None` -if the `Rc` value has been dropped. Because `upgrade` returns an `Option`, we -know Rust will make sure we handle both the `Some` case and the `None` case and -we won’t be trying to use an invalid pointer. - -Instead of the list in Listing 15-17 where each item knows only about the -next item, let’s say we want a tree where the items know about their children -items *and* their parent items. - -Let’s start just with a struct named `Node` that holds its own `i32` value as -well as references to its children `Node` values: +If you uncomment the last `println!` and run the program, Rust will try and +print this cycle out with `a` pointing to `b` pointing to `a` and so forth +until it overflows the stack. + + + + +In this specific case, right after we create the reference cycle, the program +ends. The consequences of this cycle aren’t so dire. If a more complex program +allocates lots of memory in a cycle and holds onto it for a long time, the +program would be using more memory than it needs, and might overwhelm the +system and cause it to run out of available memory. + +Creating reference cycles is not easily done, but it’s not impossible either. +If you have `RefCell` values that contain `Rc` values or similar nested +combinations of types with interior mutability and reference counting, be aware +that you have to ensure you don’t create cycles yourself; you can’t rely on +Rust to catch them. Creating a reference cycle would be a logic bug in your +program that you should use automated tests, code reviews, and other software +development practices to minimize. + + + + +Another solution is reorganizing your data structures so that some references +express ownership and some references don’t. In this way, we can have cycles +made up of some ownership relationships and some non-ownership relationships, +and only the ownership relationships affect whether a value may be dropped or +not. In Listing 15-20, we always want `Cons` variants to own their list, so +reorganizing the data structure isn’t possible. Let’s look at an example using +graphs made up of parent nodes and child nodes to see when non-ownership +relationships are an appropriate way to prevent reference cycles. + +### Preventing Reference Cycles: Turn an `Rc` into a `Weak` + +So far, we’ve shown how calling `Rc::clone` increases the `strong_count` of an +`Rc` instance, and that an `Rc` instance is only cleaned up if its +`strong_count` is 0. We can also create a *weak reference* to the value within +an `Rc` instance by calling `Rc::downgrade` and passing a reference to the +`Rc`. When we call `Rc::downgrade`, we get a smart pointer of type `Weak`. +Instead of increasing the `strong_count` in the `Rc` instance by one, calling +`Rc::downgrade` increases the `weak_count` by one. The `Rc` type uses +`weak_count` to keep track of how many `Weak` references exist, similarly to +`strong_count`. The difference is the `weak_count` does not need to be 0 in +order for the `Rc` instance to be cleaned up. + + + + +Strong references are how we can share ownership of an `Rc` instance. Weak +references don’t express an ownership relationship. They won’t cause a +reference cycle since any cycle involving some weak references will be broken +once the strong reference count of values involved is 0. + + + + +Because the value that `Weak` references might have been dropped, in order +to do anything with the value that a `Weak` is pointing to, we have to check +to make sure the value is still around. We do this by calling the `upgrade` +method on a `Weak` instance, which will return an `Option>`. We’ll get +a result of `Some` if the `Rc` value has not been dropped yet, and `None` if +the `Rc` value has been dropped. Because `upgrade` returns an `Option`, we can +be sure that Rust will handle both the `Some` case and the `None` case, and +there won’t be an invalid pointer. + +As an example, rather than using a list whose items know only about the next +item, we’ll create a tree whose items know about their children items *and* +their parent items. + +#### Creating a Tree Data Structure: a `Node` with Child Nodes + +To start building this tree, we’ll create a struct named `Node` that holds its +own `i32` value as well as references to its children `Node` values: Filename: src/main.rs @@ -178,17 +276,28 @@ struct Node { } ``` -We want to be able to have a `Node` own its children, and we also want to be -able to have variables own each node so we can access them directly. That’s why -the items in the `Vec` are `Rc` values. We want to be able to modify what -nodes are another node’s children, so that’s why we have a `RefCell` in -`children` around the `Vec`. In Listing 15-19, let’s create one instance of -`Node` named `leaf` with the value 3 and no children, and another instance -named `branch` with the value 5 and `leaf` as one of its children: +We want a `Node` to own its children, and we want to be able to share that +ownership with variables so we can access each `Node` in the tree directly. To +do this, we define the `Vec` items to be values of type `Rc`. We also +want to be able to modify which nodes are children of another node, so we have +a `RefCell` in `children` around the `Vec`. + +Next, let’s use our struct definition and create one `Node` instance named +`leaf` with the value 3 and no children, and another instance named `branch` +with the value 5 and `leaf` as one of its children, as shown in Listing 15-23: Filename: src/main.rs -```rust,ignore +```rust +# use std::rc::Rc; +# use std::cell::RefCell; +# +# #[derive(Debug)] +# struct Node { +# value: i32, +# children: RefCell>>, +# } +# fn main() { let leaf = Rc::new(Node { value: 3, @@ -197,30 +306,41 @@ fn main() { let branch = Rc::new(Node { value: 5, - children: RefCell::new(vec![leaf.clone()]), + children: RefCell::new(vec![Rc::clone(&leaf)]), }); } ``` -Listing 15-19: Creating a `leaf` node and a `branch` node -where `branch` has `leaf` as one of its children but `leaf` has no reference to -`branch` +Listing 15-23: Creating a `leaf` node with no children +and a `branch` node with `leaf` as one of its children + +We clone the `Rc` in `leaf` and store that in `branch`, meaning the `Node` in +`leaf` now has two owners: `leaf` and `branch`. We can get from `branch` to +`leaf` through `branch.children`, but there’s no way to get from `leaf` to +`branch`. `leaf` has no reference to `branch` and doesn’t know they are +related. We’d like `leaf` to know that `branch` is its parent. -The `Node` in `leaf` now has two owners: `leaf` and `branch`, since we clone -the `Rc` in `leaf` and store that in `branch`. The `Node` in `branch` knows -it’s related to `leaf` since `branch` has a reference to `leaf` in -`branch.children`. However, `leaf` doesn’t know that it’s related to `branch`, -and we’d like `leaf` to know that `branch` is its parent. +#### Adding a Reference from a Child to its Parent -To do that, we’re going to add a `parent` field to our `Node` struct -definition, but what should the type of `parent` be? We know it can’t contain -an `Rc`, since `leaf.parent` would point to `branch` and `branch.children` -contains a pointer to `leaf`, which makes a reference cycle. Neither `leaf` nor -`branch` would get dropped since they would always refer to each other and -their reference counts would never be zero. +To make the child node aware of its parent, we need to add a `parent` field to +our `Node` struct definition. The trouble is in deciding what the type of +`parent` should be. We know it can’t contain an `Rc` because that would +create a reference cycle, with `leaf.parent` pointing to `branch` and +`branch.children` pointing to `leaf`, which would cause their `strong_count` +values to never be zero. -So instead of `Rc`, we’re going to make the type of `parent` use `Weak`, -specifically a `RefCell>`: +Thinking about the relationships another way, a parent node should own its +children: if a parent node is dropped, its child nodes should be dropped as +well. However, a child should not own its parent: if we drop a child node, the +parent should still exist. This is a case for weak references! + +So instead of `Rc`, we’ll make the type of `parent` use `Weak`, specifically +a `RefCell>`. Now our `Node` struct definition looks like this: + + + Filename: src/main.rs @@ -236,14 +356,35 @@ struct Node { } ``` -This way, a node will be able to refer to its parent node if it has one, -but it does not own its parent. A parent node will be dropped even if -it has child nodes referring to it, as long as it doesn’t have a parent -node as well. Now let’s update `main` to look like Listing 15-20: + + + +This way, a node will be able to refer to its parent node, but does not own its +parent. In Listing 15-24, let’s update `main` to use this new definition so +that the `leaf` node will have a way to refer to its parent, `branch`: + + + Filename: src/main.rs -```rust,ignore +```rust +# use std::rc::{Rc, Weak}; +# use std::cell::RefCell; +# +# #[derive(Debug)] +# struct Node { +# value: i32, +# parent: RefCell>, +# children: RefCell>>, +# } +# fn main() { let leaf = Rc::new(Node { value: 3, @@ -256,7 +397,7 @@ fn main() { let branch = Rc::new(Node { value: 5, parent: RefCell::new(Weak::new()), - children: RefCell::new(vec![leaf.clone()]), + children: RefCell::new(vec![Rc::clone(&leaf)]), }); *leaf.parent.borrow_mut() = Rc::downgrade(&branch); @@ -265,30 +406,45 @@ fn main() { } ``` -Listing 15-20: A `leaf` node and a `branch` node where -`leaf` has a `Weak` reference to its parent, `branch` +Listing 15-24: A `leaf` node with a `Weak` reference to +its parent node, `branch` + + + +Creating the `leaf` node looks similar to how creating the `leaf` node looked +in Listing 15-23, with the exception of the `parent` field: `leaf` starts out +without a parent, so we create a new, empty `Weak` reference instance. -Creating the `leaf` node looks similar; since it starts out without a parent, -we create a new `Weak` reference instance. When we try to get a reference to -the parent of `leaf` by using the `upgrade` method, we’ll get a `None` value, -as shown by the first `println!` that outputs: +At this point, when we try to get a reference to the parent of `leaf` by using +the `upgrade` method, we get a `None` value. We see this in the output from the +first `println!`: ```text leaf parent = None ``` -Similarly, `branch` will also have a new `Weak` reference, since `branch` does -not have a parent node. We still make `leaf` be one of the children of -`branch`. Once we have a new `Node` instance in `branch`, we can modify `leaf` -to have a `Weak` reference to `branch` for its parent. We use the `borrow_mut` -method on the `RefCell` in the `parent` field of `leaf`, then we use the -`Rc::downgrade` function to create a `Weak` reference to `branch` from the `Rc` -in `branch.` + + + +When we create the `branch` node, it will also have a new `Weak` reference, +since `branch` does not have a parent node. We still have `leaf` as one of the +children of `branch`. Once we have the `Node` instance in `branch`, we can +modify `leaf` to give it a `Weak` reference to its parent. We use the +`borrow_mut` method on the `RefCell` in the `parent` field of `leaf`, then we +use the `Rc::downgrade` function to create a `Weak` reference to `branch` from +the `Rc` in `branch.` + + + When we print out the parent of `leaf` again, this time we’ll get a `Some` -variant holding `branch`. Also notice we don’t get a cycle printed out that -eventually ends in a stack overflow like we did in Listing 15-14: the `Weak` -references are just printed as `(Weak)`: +variant holding `branch`: `leaf` can now access its parent! When we print out +`leaf`, we also avoid the cycle that eventually ended in a stack overflow like +we had in Listing 15-21: the `Weak` references are printed as `(Weak)`: ```text leaf parent = Some(Node { value: 5, parent: RefCell { value: (Weak) }, @@ -296,12 +452,17 @@ children: RefCell { value: [Node { value: 3, parent: RefCell { value: (Weak) }, children: RefCell { value: [] } }] } }) ``` -The fact that we don’t get infinite output (or at least until the stack -overflows) is one way we can see that we don’t have a reference cycle in this -case. Another way we can tell is by looking at the values we get from calling -`Rc::strong_count` and `Rc::weak_count`. In Listing 15-21, let’s create a new -inner scope and move the creation of `branch` in there, so that we can see what -happens when `branch` is created and then dropped when it goes out of scope: +The lack of infinite output indicates that this code didn’t create a reference +cycle. We can also tell this by looking at the values we get from calling +`Rc::strong_count` and `Rc::weak_count`. + +#### Visualizing Changes to `strong_count` and `weak_count` + +Let’s look at how the `strong_count` and `weak_count` values of the `Rc` +instances change by creating a new inner scope and moving the creation of +`branch` into that scope. This will let us see what happens when `branch` is +created and then dropped when it goes out of scope. The modifications are shown +in Listing 15-25: Filename: src/main.rs @@ -323,7 +484,7 @@ fn main() { let branch = Rc::new(Node { value: 5, parent: RefCell::new(Weak::new()), - children: RefCell::new(vec![leaf.clone()]), + children: RefCell::new(vec![Rc::clone(&leaf)]), }); *leaf.parent.borrow_mut() = Rc::downgrade(&branch); @@ -349,53 +510,61 @@ fn main() { } ``` -Listing 15-21: Creating `branch` in an inner scope and -examining strong and weak reference counts of `leaf` and `branch` +Listing 15-25: Creating `branch` in an inner scope and +examining strong and weak reference counts -Right after creating `leaf`, its strong count is 1 (for `leaf` itself) and its -weak count is 0. In the inner scope, after we create `branch` and associate -`leaf` and `branch`, `branch` will have a strong count of 1 (for `branch` -itself) and a weak count of 1 (for `leaf.parent` pointing to `branch` with a -`Weak`). `leaf` will have a strong count of 2, since `branch` now has a -clone the `Rc` of `leaf` stored in `branch.children`. `leaf` still has a weak -count of 0. +Once `leaf` is created, its `Rc` has a strong count of 1 and a weak count of 0. +In the inner scope we create `branch` and associate it with `leaf`, at which +point the `Rc` in `branch` will have a strong count of 1 and a weak count of 1 +(for `leaf.parent` pointing to `branch` with a `Weak`). Here `leaf` will +have a strong count of 2, because `branch` now has a clone of the `Rc` of +`leaf` stored in `branch.children`, but will still have a weak count of 0. -When the inner scope ends, `branch` goes out of scope, and its strong count -decreases to 0, so its `Node` gets dropped. The weak count of 1 from -`leaf.parent` has no bearing on whether `Node` gets dropped or not, so we don’t -have a memory leak! +When the inner scope ends, `branch` goes out of scope and the strong count of +the `Rc` decreases to 0, so its `Node` gets dropped. The weak count of 1 from +`leaf.parent` has no bearing on whether `Node` is dropped or not, so we don’t +get any memory leaks! If we try to access the parent of `leaf` after the end of the scope, we’ll get -`None` again like we did before `leaf` had a parent. At the end of the program, -`leaf` has a strong count of 1 and a weak count of 0, since `leaf` is now the -only thing pointing to it again. - -All of the logic managing the counts and whether a value should be dropped or -not was managed by `Rc` and `Weak` and their implementations of the `Drop` -trait. By specifying that the relationship from a child to its parent should be -a `Weak` reference in the definition of `Node`, we’re able to have parent -nodes point to child nodes and vice versa without creating a reference cycle -and memory leaks. +`None` again. At the end of the program, the `Rc` in `leaf` has a strong count +of 1 and a weak count of 0, because the variable `leaf` is now the only +reference to the `Rc` again. + + + + +All of the logic that manages the counts and value dropping is built in to +`Rc` and `Weak` and their implementations of the `Drop` trait. By specifying +that the relationship from a child to its parent should be a `Weak` +reference in the definition of `Node`, we’re able to have parent nodes point to +child nodes and vice versa without creating a reference cycle and memory leaks. + + + ## Summary -We’ve now covered how you can use different kinds of smart pointers to choose -different guarantees and tradeoffs than those Rust makes with regular +This chapter covered how you can use smart pointers to make different +guarantees and tradeoffs than those Rust makes by default with regular references. `Box` has a known size and points to data allocated on the heap. `Rc` keeps track of the number of references to data on the heap so that data can have multiple owners. `RefCell` with its interior mutability gives -us a type that can be used where we need an immutable type, and enforces the -borrowing rules at runtime instead of at compile time. +us a type that can be used when we need an immutable type but need the ability +to change an inner value of that type, and enforces the borrowing rules at +runtime instead of at compile time. -We’ve also discussed the `Deref` and `Drop` traits that enable a lot of smart -pointers’ functionality. We explored how it’s possible to create a reference -cycle that would cause a memory leak, and how to prevent reference cycles by -using `Weak`. +We also discussed the `Deref` and `Drop` traits that enable a lot of the +functionality of smart pointers. We explored reference cycles that can cause +memory leaks, and how to prevent them using `Weak`. -If this chapter has piqued your interest and you now want to implement your own -smart pointers, check out [The Nomicon] for even more useful information. +If this chapter has piqued your interest and you want to implement your own +smart pointers, check out [“The Nomicon”] for even more useful information. -[The Nomicon]: https://doc.rust-lang.org/stable/nomicon/ +[“The Nomicon”]: https://doc.rust-lang.org/stable/nomicon/ Next, let’s talk about concurrency in Rust. We’ll even learn about a few new -smart pointers that can help us with it. +smart pointers. diff --git a/src/doc/book/second-edition/src/ch16-00-concurrency.md b/src/doc/book/second-edition/src/ch16-00-concurrency.md index 921672b1a7..e06e6bff1e 100644 --- a/src/doc/book/second-edition/src/ch16-00-concurrency.md +++ b/src/doc/book/second-edition/src/ch16-00-concurrency.md @@ -1,38 +1,55 @@ # Fearless Concurrency -Ensuring memory safety isn’t Rust’s only goal: being a language that is better -equipped to handle concurrent and parallel programming has always been another -major goal of Rust. *Concurrent programming*, where different parts of a -program execute independently, and *parallel programming*, where different -parts of a program are executing at the same time, are becoming more important -as more computers have multiple processors for our programs to take advantage -of. Historically, programming in these contexts has been difficult and error -prone: Rust hopes to change that. - -Originally, we thought that memory safety and preventing concurrency problems -were two separate challenges to be solved with different methods. However, over -time, we discovered that ownership and the type system are a powerful set of -tools that help in dealing with both memory safety *and* concurrency problems! -By leveraging ownership and type checking, many concurrency errors are *compile -time* errors in Rust, rather than runtime errors. We’ve nicknamed this aspect -of Rust *fearless concurrency*. Fearless concurrency means Rust not only allows -you to have confidence that your code is free of subtle bugs, but also lets you -refactor this kind of code easily without worrying about introducing new bugs. - -> Note: given that Rust’s slogan is *fearless concurrency*, we’ll be referring -> to many of the problems here as *concurrent* rather than being more precise -> by saying *concurrent and/or parallel*, for simplicity’s sake. If this were a -> book specifically about concurrency and/or parallelism, we’d be sure to be -> more specific. For this chapter, please mentally substitute -> *concurrent and/or parallel* whenever we say *concurrent*. - -Many languages are strongly opinionated about the solutions they offer you to -deal with concurrent problems. That’s a very reasonable strategy, especially -for higher-level languages, but lower-level languages don’t have that luxury. -Lower-level languages are expected to enable whichever solution would provide -the best performance in a given situation, and they have fewer abstractions -over the hardware. Rust, therefore, gives us a variety of tools for modeling -our problems in whatever way is appropriate for our situation and requirements. +Handling concurrent programming safely and efficiently is another of Rust’s +major goals. *Concurrent programming*, where different parts of a program +execute independently, and *parallel programming*, where different parts of a +program are executing at the same time, are becoming increasingly important as +more computers have multiple processors to take advantage of. Historically, +programming in these contexts has been difficult and error prone: Rust hopes to +change that. + +Initially, the Rust team thought that ensuring memory safety and preventing +concurrency problems were two separate challenges to be solved with different +methods. Over time, they discovered that the ownership and type systems are a +powerful set of tools to help in dealing with both memory safety *and* +concurrency problems! By leveraging ownership and type checking, many +concurrency errors are *compile time* errors in Rust, rather than runtime +errors. Rather than spending lots of time trying to reproduce the exact +circumstances under which a runtime concurrency bug occurs, incorrect code will +refuse to compile with an error explaining the problem. This lets you fix your +code while you’re working on it, rather than potentially after it’s been +shipped to production. We’ve nicknamed this aspect of Rust *fearless +concurrency*. Fearless concurrency allows you to write code that’s free of +subtle bugs and is easy to refactor without introducing new bugs. + + + + +> Note: we’ll be referring to many of the problems here as *concurrent* rather +> than being more precise by saying *concurrent and/or parallel*, for +> simplicity’s sake. If this were a book specifically about concurrency and/or +> parallelism, we’d be sure to be more specific. For this chapter, please +> mentally substitute *concurrent and/or parallel* whenever we say *concurrent*. + + + + +Many languages are strongly opinionated about the solutions they offer for +dealing with concurrent problems. For example, Erlang has elegant functionality +for message passing concurrency, but only obscure ways to share state between +threads. Only supporting a subset of possible solutions is a reasonable +strategy for higher-level languages to take, because a higher-level language +promises benefits from giving up some control in order to gain abstractions. +However, lower-level languages are expected to provide the solution with the +best performance in any given situation, and have fewer abstractions over the +hardware. Rust, therefore, gives us a variety of tools for modeling your +problems in whatever way is appropriate for your situation and requirements. Here’s what we’ll cover in this chapter: @@ -41,6 +58,5 @@ Here’s what we’ll cover in this chapter: between threads. * *Shared state* concurrency, where multiple threads have access to some piece of data. -* The `Sync` and `Send` traits, which allow Rust’s concurrency guarantees to be - extended to user-defined types as well as types provided by the standard - library. +* The `Sync` and `Send` traits, which extend Rust’s concurrency guarantees to + user-defined types as well as types provided by the standard library. diff --git a/src/doc/book/second-edition/src/ch16-01-threads.md b/src/doc/book/second-edition/src/ch16-01-threads.md index 551bb3290e..31a2c71e5a 100644 --- a/src/doc/book/second-edition/src/ch16-01-threads.md +++ b/src/doc/book/second-edition/src/ch16-01-threads.md @@ -1,69 +1,85 @@ ## Using Threads to Run Code Simultaneously -In most operating systems in use today, when your program executes, the context -in which the operating system runs your code is called a *process*. The -operating system runs many processes, and the operating system managing these -processes is what lets multiple programs execute at the same time on your -computer. - -We can take the idea of processes each running a program down one level of -abstraction: your program can also have independent parts that run -simultaneously within the context of your program. The feature that enables -this functionality is called *threads*. - -Splitting up the computation your program needs to do into multiple threads can -improve performance, since the program will be doing multiple things at the -same time. Programming with threads can add complexity, however. Since threads -run simultaneously, there’s no inherent guarantee about the order in which the -parts of your code on different threads will run. This can lead to race -conditions where threads are accessing data or resources in an inconsistent -order, deadlocks where two threads both prevent each other from continuing, or -bugs that only happen in certain situations that are hard to reproduce -reliably. Rust lessens the effect of these and other downsides of using -threads, but programming in a multithreaded context still takes thought and -code structured differently than for programs only expected to run in a single -thread. - -There are a few different ways that programming languages implement threads. -Many operating systems provide an API for creating new threads. In addition, -many programming languages provide their own special implementation of threads. -Programming language provided threads are sometimes called *lightweight* or -*green* threads. These languages take a number of green threads and execute -them in the context of a different number of operating system threads. For this -reason, the model where a language calls the operating system APIs to create -threads is sometimes called *1:1*, one OS thread per one language thread. The -green threaded model is called the *M:N* model, `M` green threads per `N` OS -threads, where `M` and `N` are not necessarily the same number. - -Each model has its own advantages and tradeoffs. The tradeoff that’s most -important to Rust is runtime support. *Runtime* is a confusing term; it can -have different meaning in different contexts. Here, we mean some code included -by the language in every binary. For some languages, this code is large, and -for others, this code is small. Colloquially, “no runtime” is often what people -will say when they mean “small runtime”, since every non-assembly language has -some amount of runtime. Smaller runtimes have fewer features but have the -advantage of resulting in smaller binaries. Smaller binaries make it easier to -combine the language with other languages in more contexts. While many -languages are okay with increasing the runtime in exchange for more features, -Rust needs to have nearly no runtime, and cannot compromise on being able to -call into C in order to maintain performance. - -The green threading model is a feature that requires a larger language runtime -in order to manage the threads. As such, the Rust standard library only -provides an implementation of 1:1 threading. Because Rust is such a low-level -language, there are crates that implement M:N threading if you would rather -trade overhead for aspects such as more control over which threads run when and -lower costs of context switching, for example. - -Now that we’ve defined what threads are in Rust, let’s explore how to use the -thread-related API that the standard library provides for us. +In most operating systems today, an executed program’s code is run in a +*process*, and the operating system manages multiple process at once. Within +your program, you can also have independent parts that run simultaneously. The +feature that runs these independent parts is called *threads*. + + + + +Splitting the computation in your program up into multiple threads can improve +performance, since the program will be doing multiple things at the same time, +but it also adds complexity. Because threads may run simultaneously, there’s no +inherent guarantee about the order in which parts of your code on different +threads will run. This can lead to problems such as: + +- Race conditions, where threads are accessing data or resources in an + inconsistent order +- Deadlocks, where two threads are waiting for each other to finish using a + resource the other thread has, which prevents both threads from continuing +- Bugs that only happen in certain situations and are hard to reproduce and + fix reliably + + + + +Rust attempts to mitigate negative effects of using threads. Programming in a +multithreaded context still takes careful thought and requires a code structure +that’s different from programs that run in a single thread. + +Programming languages implement threads in a few different ways. Many operating +systems provide an API for creating new threads. This model where a language +calls the operating system APIs to create threads is sometimes called *1:1*, +one OS thread per one language thread. + +Many programming languages provide their own special implementation of threads. +Programming language-provided threads are known as *green* threads, and +languages that use these green threads will execute them in the context of a +different number of operating system threads. For this reason, the green +threaded model is called the *M:N* model, `M` green threads per `N` OS threads, +where `M` and `N` are not necessarily the same number. + +Each model has its own advantages and tradeoffs, and the tradeoff most +important to Rust is runtime support. *Runtime* is a confusing term and can +have different meanings in different contexts. + + + + +In this context, by runtime we mean code that’s included by the language in +every binary. This code can be large or small depending on the language, but +every non-assembly language will have some amount of runtime code. For that +reason, colloquially when people say a language has “no runtime” they often +mean “small runtime.” Smaller runtimes have fewer features but have the +advantage of resulting in smaller binaries, which make it easier to combine the +language with other languages in more contexts. While many languages are okay +with increasing the runtime size in exchange for more features, Rust needs to +have nearly no runtime, and cannot compromise on being able to call into C in +order to maintain performance. + +The green threading M:N model requires a larger language runtime to manage +threads. As such, the Rust standard library only provides an implementation of +1:1 threading. Because Rust is such a low-level language, there are crates that +implement M:N threading if you would rather trade overhead for aspects such as +more control over which threads run when and lower costs of context switching, +for example. + +Now that we’ve defined threads in Rust, let’s explore how to use the +thread-related API provided by the standard library. ### Creating a New Thread with `spawn` -To create a new thread, we call the `thread::spawn` function and pass it a -closure (we talked about closures in Chapter 13), containing the code we want -to run in the new thread. The example in Listing 16-1 prints some text from a -new thread and other text from the main thread: +To create a new thread, we call the `thread::spawn` function, and pass it a +closure (we talked about closures in Chapter 13) containing the code we want to +run in the new thread. The example in Listing 16-1 prints some text from a main +thread and other text from a new thread: Filename: src/main.rs @@ -84,11 +100,12 @@ fn main() { ``` Listing 16-1: Creating a new thread to print one thing -while the main thread is printing something else +while the main thread prints something else -Note that the way this function is written, when the main thread ends, it will -stop the new thread too. The output from this program might be a little -different every time, but it will look similar to this: +Note that with this function, the new thread will be stopped when the main +thread ends, whether it has finished running or not. The output from this +program might be a little different every time, but it will look similar to +this: ```text hi number 1 from the main thread! @@ -102,21 +119,42 @@ hi number 4 from the spawned thread! hi number 5 from the spawned thread! ``` -The threads will probably take turns, but that’s not guaranteed. In this run, -the main thread printed first, even though the print statement from the spawned -thread appears first in the code we wrote. And even though we told the spawned -thread to print until `i` is 9, it only got to 5 before the main thread shut -down. If you always only see one thread, or if you don’t see any overlap, try + + + +The threads will probably take turns, but that’s not guaranteed: it depends on +how your operating system schedules the threads. In this run, the main thread +printed first, even though the print statement from the spawned thread appears +first in the code. And even though we told the spawned thread to print until +`i` is 9, it only got to 5 before the main thread shut down. + +If you run this code and only see one thread, or don’t see any overlap, try increasing the numbers in the ranges to create more opportunities for a thread to take a break and give the other thread a turn. #### Waiting for All Threads to Finish Using `join` Handles -Not only does the code in Listing 16-1 not allow the spawned thread to finish -most of the time since the main thread ends before the spawned thread is done, -there’s actually no guarantee that the spawned thread will get to run at all! We -can fix this by saving the return value of `thread::spawn`, which is a -`JoinHandle`. That looks like Listing 16-2: +The code in Listing 16-1 not only stops the spawned thread prematurely most of +the time, because the main thread ends before the spawned thread is done, +there’s actually no guarantee that the spawned thread will get to run at all, +because there’s no guarantee on the order in which threads run! + + + + +We can fix this by saving the return value of `thread::spawn` in a variable. +The return type of `thread::spawn` is `JoinHandle`. A `JoinHandle` is an owned +value that, when we call the `join` method on it, will wait for its thread to +finish. Listing 16-2 shows how to use the `JoinHandle` of the thread we created +in Listing 16-1 and call `join` in order to make sure the spawned thread +finishes before the `main` exits: + + + Filename: src/main.rs @@ -141,11 +179,15 @@ fn main() { Listing 16-2: Saving a `JoinHandle` from `thread::spawn` to guarantee the thread is run to completion -A `JoinHandle` is an owned value that can wait for a thread to finish, which is -what the `join` method does. By calling `join` on the handle, the current -thread will block until the thread that the handle represents terminates. Since -we’ve put the call to `join` after the main thread’s `for` loop, running this -example should produce output that looks something like this: +Calling `join` on the handle blocks the thread currently running until the +thread represented by the handle terminates. *Blocking* a thread means that +thread is prevented from performing work or exiting. Because we’ve put the call +to `join` after the main thread’s `for` loop, running this example should +produce output that looks something like this: + + ```text hi number 1 from the main thread! @@ -188,9 +230,8 @@ fn main() { } ``` -The main thread will wait for the spawned thread to finish before the main -thread starts running its `for` loop, so the output won’t be interleaved -anymore: +The main thread will wait for the spawned thread to finish and then run its +`for` loop, so the output won’t be interleaved anymore: ```text hi number 1 from the spawned thread! @@ -213,21 +254,24 @@ your threads are actually running at the same time or not. ### Using `move` Closures with Threads -There’s a feature of closures that we didn’t cover in Chapter 13 that’s often -useful with `thread::spawn`: `move` closures. We said this in Chapter 13: +The `move` closure, which we didn’t cover in Chapter 13, is often used +alongside `thread::spawn`, as it allows us to use data from one thread in +another thread. -> Creating closures that capture values from their environment is mostly used -> in the context of starting new threads. +In Chapter 13, we said that “Creating closures that capture values from their +environment is mostly used in the context of starting new threads.” + + Now we’re creating new threads, so let’s talk about capturing values in closures! -Notice the closure that we pass to `thread::spawn` in Listing 16-1 takes no +Notice in Listing 16-1 that the closure we pass to `thread::spawn` takes no arguments: we’re not using any data from the main thread in the spawned -thread’s code. In order to use data in the spawned thread that comes from the -main thread, we need the spawned thread’s closure to capture the values it -needs. Listing 16-3 shows an attempt to create a vector in the main thread and -use it in the spawned thread, which won’t work the way this example is written: +thread’s code. In order to do so, the spawned thread’s closure must capture the +values it needs. Listing 16-3 shows an attempt to create a vector in the main +thread and use it in the spawned thread. However, this won’t yet work, as +you’ll see in a moment: Filename: src/main.rs @@ -246,11 +290,11 @@ fn main() { ``` Listing 16-3: Attempting to use a vector created by the -main thread from another thread +main thread in another thread -The closure uses `v`, so the closure will capture `v` and make `v` part of the -closure’s environment. Because `thread::spawn` runs this closure in a new -thread, we can access `v` inside that new thread. +The closure uses `v`, so will capture `v` and make it part of the closure’s +environment. Because `thread::spawn` runs this closure in a new thread, we +should be able to access `v` inside that new thread. When we compile this example, however, we’ll get the following error: @@ -269,14 +313,13 @@ variables), use the `move` keyword, as shown: | let handle = thread::spawn(move || { ``` -When we capture something in a closure’s environment, Rust will try to infer -how to capture it. `println!` only needs a reference to `v`, so the closure -tries to borrow `v`. There’s a problem, though: we don’t know how long the -spawned thread will run, so we don’t know if the reference to `v` will always -be valid. +Rust *infers* how to capture `v`, and since `println!` only needs a reference +to `v`, the closure tries to borrow `v`. There’s a problem, though: Rust can’t +tell how long the spawned thread will run, so doesn’t know if the reference to +`v` will always be valid. -Consider the code in Listing 16-4 that shows a scenario where it’s more likely -that the reference to `v` won’t be valid: +Let’s look at a scenario that’s more likely to have a reference to `v` that +won’t be valid, shown Listing 16-4: Filename: src/main.rs @@ -299,14 +342,15 @@ fn main() { Listing 16-4: A thread with a closure that attempts to capture a reference to `v` from a main thread that drops `v` -This code could be run, and the spawned thread could immediately get put in the -background without getting a chance to run at all. The spawned thread has a -reference to `v` inside, but the main thread is still running: it immediately -drops `v`, using the `drop` function that we discussed in Chapter 15 that -explicitly drops its argument. Then, the spawned thread starts to execute. `v` -is now invalid, so a reference to it is also invalid. Oh no! +If we run this code, there’s a possibility the spawned thread will be +immediately put in the background without getting a chance to run at all. The +spawned thread has a reference to `v` inside, but the main thread immediately +drops `v`, using the `drop` function we discussed in Chapter 15. Then, when the +spawned thread starts to execute, `v` is no longer valid, so a reference to it +is also invalid. Oh no! -To fix this problem, we can listen to the advice of the error message: +To fix the problem in Listing 16-3, we can listen to the advice of the error +message: ```text help: to force the closure to take ownership of `v` (and any other referenced @@ -315,9 +359,9 @@ variables), use the `move` keyword, as shown: ``` By adding the `move` keyword before the closure, we force the closure to take -ownership of the values it’s using, rather than inferring borrowing. This -modification to the code from Listing 16-3 shown in Listing 16-5 will compile -and run as we intend: +ownership of the values it’s using, rather than allowing Rust to infer that it +should borrow. The modification to Listing 16-3 shown in Listing 16-5 will +compile and run as we intend: Filename: src/main.rs @@ -338,9 +382,15 @@ fn main() { Listing 16-5: Using the `move` keyword to force a closure to take ownership of the values it uses -What about the code in Listing 16-4 where the main thread called `drop`? If we -add `move` to the closure, we’ve moved `v` into the closure’s environment, and -we can no longer call `drop` on it. We get this compiler error instead: + + + +What would happen to the code in Listing 16-4 where the main thread called +`drop` if we use a `move` closure? Would `move` fix that case? Nope, we get a +different error, because what Listing 16-4 is trying to do isn’t allowed for a +different reason! If we add `move` to the closure, we’d move `v` into the +closure’s environment, and we could no longer call `drop` on it in the main +thread. We would get this compiler error instead: ```text error[E0382]: use of moved value: `v` @@ -356,7 +406,20 @@ error[E0382]: use of moved value: `v` not implement the `Copy` trait ``` -Rust’s ownership rules have saved us again! +Rust’s ownership rules have saved us again! We got an error from the code in +Listing 16-3 because Rust was being conservative and only borrowing `v` for the +thread, which meant the main thread could theoretically invalidate the spawned +thread’s reference. By telling Rust to move ownership of `v` to the spawned +thread, we’re guaranteeing to Rust that the main thread won’t use `v` anymore. +If we change Listing 16-4 in the same way, we’re then violating the ownership +rules when we try to use `v` in the main thread. The `move` keyword overrides +Rust’s conservative default of borrowing; it doesn’t let us violate the +ownership rules. + + + Now that we have a basic understanding of threads and the thread API, let’s talk about what we can actually *do* with threads. diff --git a/src/doc/book/second-edition/src/ch16-02-message-passing.md b/src/doc/book/second-edition/src/ch16-02-message-passing.md index a3401735bc..b1212ff65c 100644 --- a/src/doc/book/second-edition/src/ch16-02-message-passing.md +++ b/src/doc/book/second-edition/src/ch16-02-message-passing.md @@ -1,25 +1,39 @@ ## Message Passing to Transfer Data Between Threads -One approach to concurrency that’s seen a rise in popularity as of late is -*message passing*, where threads or actors communicate by sending each other -messages containing data. Here’s the idea in slogan form: +One increasingly popular approach to ensuring safe concurrency is *message +passing*, where threads or actors communicate by sending each other messages +containing data. Here’s the idea in slogan form from the Go language +documentation: > Do not communicate by sharing memory; instead, share memory by > communicating. > > --[Effective Go](http://golang.org/doc/effective_go.html) -A major tool to accomplish this goal is the *channel*. A channel has two -halves, a transmitter and a receiver. One part of our code can call methods on -the transmitter with the data we want to send, and another part can check the -receiving end for arriving messages. + + -We’re going to work up to an example where we have one thread that will -generate values and send them down a channel. The main thread will receive the -values and print them out. +One major tool Rust has for accomplishing message sending concurrency is the +*channel*, a programming concept that Rust’s standard library provides an +implementation of. You can imagine a channel in programming like a channel of +water, such as a stream or a river. If you put something like a rubber duck or +a boat into a stream, it will travel downstream to the end of the river. -First, though, let’s start by creating a channel but not doing anything with it -in Listing 16-6: +A channel in programming has two halves: a transmitter and a receiver. The +transmitter half is like the upstream location where we put rubber ducks into +the river, and the receiver half is the downstream place where the rubber duck +ends up. One part of our code calls methods on the transmitter with the data we +want to send, and another part checks the receiving end for arriving messages. + +Here we’ll work up to a program that has one thread to generate values and send +them down a channel, and another thread that will receive the values and print +them out. We’re going to be sending simple values between threads using a +channel for the purposes of illustration. Once you’re familiar with the +technique, you could use channels to implement a chat system, or a system where +many threads perform parts of a calculation and send the parts to one thread +that aggregates the results. + +First, we’ll create a channel but not do anything with it in Listing 16-6: Filename: src/main.rs @@ -35,21 +49,38 @@ fn main() { Listing 16-6: Creating a channel and assigning the two halves to `tx` and `rx` -The `mpsc::channel` function creates a new channel. `mpsc` stands for *multiple -producer, single consumer*. In short, we can have multiple *sending* ends of a -channel that produce values, but only one *receiving* end that consumes those -values. We’re going to start with a single producer for now, but we’ll add +We create a new channel using the `mpsc::channel` function; `mpsc` stands for +*multiple producer, single consumer*. In short, the way Rust’s standard library +has implemented channels is such that a channel can have multiple *sending* +ends that produce values, but only one *receiving* end that consumes those +values. Imagine multiple rivers and streams flowing together into one big +river: everything sent down any of the streams will end up in one river at the +end. We’re going to start with a single producer for now, but we’ll add multiple producers once we get this example working. -`mpsc::channel` returns a tuple: the first element is the sending end, and the -second element is the receiving end. For historical reasons, many people use -`tx` and `rx` to abbreviate *transmitter* and *receiver*, so those are the -names we’re using for the variables bound to each end. We’re using a `let` -statement with a pattern that destructures the tuples; we’ll be discussing the -use of patterns in `let` statements and destructuring in Chapter 18. +The `mpsc::channel` function returns a tuple, the first element of which is the +sending end and the second element the receiving end. The abbreviations `tx` +and `rx` are traditionally used in many fields for *transmitter* and *receiver* +respectively, so we give our variables those names to indicate each end. We’re +using a `let` statement with a pattern that destructures the tuples; we’ll be +discussing the use of patterns in `let` statements and destructuring in Chapter +18. Using a `let` statement in this way is a convenient way to extract the +pieces of the tuple returned by `mpsc::channel`. + + + Let’s move the transmitting end into a spawned thread and have it send one -string, shown in Listing 16-7: +string so that the spawned thread is communicating with the main thread, shown +in Listing 16-7. This is like putting a rubber duck in the river upstream or +sending a chat message from one thread to another: + + + Filename: src/main.rs @@ -70,20 +101,21 @@ fn main() { Listing 16-7: Moving `tx` to a spawned thread and sending “hi” -We’re using `thread::spawn` to create a new thread, just as we did in the -previous section. We use a `move` closure to make `tx` move into the closure so -that the thread owns it. +We’re again using `thread::spawn` to create a new thread, and then use `move` +to move `tx` into the closure so the spawned thread owns `tx`. The spawned +thread needs to own the transmitting end of the channel in order to be able to +send messages through the channel. -The transmitting end of a channel has the `send` method that takes the value we -want to send down the channel. The `send` method returns a `Result` type, -because if the receiving end has already been dropped, there’s nowhere to send -a value to, so the send operation would error. In this example, we’re simply -calling `unwrap` to ignore this error, but for a real application, we’d want to -handle it properly. Chapter 9 is where you’d go to review strategies for proper -error handling. +The transmitting end has a `send` method that takes the value we want to send. +The `send` method returns a `Result` type, so that if the receiving end +has already been dropped and there’s nowhere to send a value, the send +operation will error. In this example, we’re simply calling `unwrap` to panic +in case of error, but for a real application, we’d handle it properly--return +to Chapter 9 to review strategies for proper error handling. -In Listing 16-8, let’s get the value from the receiving end of the channel in -the main thread: +In Listing 16-8, we’ll get the value from the receiving end of the channel in +the main thread. This is like retrieving the rubber duck from the water at the +end of the river, or like getting a chat message: Filename: src/main.rs @@ -108,11 +140,32 @@ fn main() { and printing it out The receiving end of a channel has two useful methods: `recv` and `try_recv`. -Here, we’re using `recv`, which is short for *receive*. This method will block -execution until a value is sent down the channel. Once a value is sent, `recv` -will return it in a `Result`. When the sending end of the channel closes, -`recv` will return an error. The `try_recv` method will not block; it instead -returns a `Result` immediately. +We’re using `recv`, short for *receive*, which will block the main thread’s +execution and wait until a value is sent down the channel. Once a value is +sent, `recv` will return it in a `Result`. When the sending end of the +channel closes, `recv` will return an error to signal that no more values will +be coming. + + + + +The `try_recv` method doesn’t block, but will instead return a `Result` +immediately: an `Ok` value holding a message if one is available, and an `Err` +value if there aren’t any messages this time. Using `try_recv` is useful if +this thread has other work to do while waiting for messages: we could write a +loop that calls `try_recv` every so often, handles a message if one is +available, and otherwise does other work for a little while until checking +again. + +We’ve chosen to use `recv` in this example for simplicity; we don’t have any +other work for the main thread to do other than wait for messages, so blocking +the main thread is appropriate. + + + If we run the code in Listing 16-8, we’ll see the value printed out from the main thread: @@ -121,11 +174,29 @@ main thread: Got: hi ``` -### How Channels Interact with Ownership - -Let’s do an experiment at this point to see how channels and ownership work -together: we’ll try to use `val` in the spawned thread after we’ve sent it down -the channel. Try compiling the code in Listing 16-9: +Perfect! + +### Channels and Ownership Transference + + + + +The ownership rules play a vital role in message sending as far as helping us +write safe, concurrent code. Preventing errors in concurrent programming is the +advantage we get by making the tradeoff of having to think about ownership +throughout our Rust programs. Let’s do an experiment to show how channels and +ownership work together to prevent problems: we’ll try to use a `val` value in +the spawned thread *after* we’ve sent it down the channel. Try compiling the +code in Listing 16-9: Filename: src/main.rs @@ -151,12 +222,12 @@ fn main() { it down the channel Here, we try to print out `val` after we’ve sent it down the channel via -`tx.send`. This is a bad idea: once we’ve sent the value to another thread, -that thread could modify it or drop it before we try to use the value again. -This could cause errors or unexpected results due to inconsistent or -nonexistent data. +`tx.send`. Allowing this would be a bad idea: once the value has been sent to +another thread, that thread could modify or drop it before we try to use the +value again, which would potentially cause errors or unexpected results due to +inconsistent or nonexistent data. -If we try to compile this code, Rust will error: +However, Rust gives us an error if we try to compile this code: ```text error[E0382]: use of moved value: `val` @@ -171,24 +242,18 @@ error[E0382]: use of moved value: `val` not implement the `Copy` trait ``` -Our concurrency mistake has caused a compile-time error! `send` takes ownership -of its parameter and moves the value so that the value is owned by the -receiver. This means we can’t accidentally use the value again after sending -it; the ownership system checks that everything is okay. - -In this regard, message passing is very similar to single ownership in Rust. -Message passing enthusiasts enjoy message passing for similar reasons that -Rustaceans enjoy Rust’s ownership: single ownership means certain classes of -problems go away. If only one thread at a time can use some memory, there’s no -chance of a data race. +Our concurrency mistake has caused a compile-time error! The `send` function +takes ownership of its parameter, and when the value is moved the receiver +takes ownership of it. This stops us from accidentally use the value again +after sending it; the ownership system checks that everything is okay. ### Sending Multiple Values and Seeing the Receiver Waiting -The code in Listing 16-8 compiled and ran, but it wasn’t very interesting: it’s -hard to see that we have two separate threads talking to each other over a -channel. Listing 16-10 has some modifications that will prove to us that this -code is running concurrently: the spawned thread will now send multiple -messages and pause for a second between each message. +The code in Listing 16-8 compiled and ran, but doesn’t show us very clearly +that two separate threads are talking to each other over the channel. In +Listing 16-10 we’ve made some modifications that will prove this code is +running concurrently: the spawned thread will now send multiple messages and +pause for a second between each message. Filename: src/main.rs @@ -223,17 +288,17 @@ fn main() { Listing 16-10: Sending multiple messages and pausing between each one -This time, we have a vector of strings in the spawned thread that we want to -send to the main thread. We iterate over them, sending each individually and -then pausing by calling the `thread::sleep` function with a `Duration` value of +This time, the spawned thread has a vector of strings that we want to send to +the main thread. We iterate over them, sending each individually, and pause +between each by calling the `thread::sleep` function with a `Duration` value of one second. In the main thread, we’re not calling the `recv` function explicitly anymore: instead we’re treating `rx` as an iterator. For each value received, we’re printing it out. When the channel is closed, iteration will end. -When running the code in Listing 16-10, we’ll see this output, with a one second -pause in between each line: +When running the code in Listing 16-10, you should see the following output, +with a one second pause in between each line: ```text Got: hi @@ -242,16 +307,26 @@ Got: the Got: thread ``` -We don’t have any pausing or code that would take a while in the `for` loop in -the main thread, so we can tell that the main thread is waiting to receive -values from the spawned thread. +Because we don’t have any code that pauses or delays in the `for` loop in the +main thread, we can tell that the main thread is waiting to receive values from +the spawned thread. + + + -### Create Multiple Producers by Cloning the Transmitter +### Creating Multiple Producers by Cloning the Transmitter Near the start of this section, we mentioned that `mpsc` stood for *multiple -producer, single consumer*. We can expand the code from Listing 16-10 to create -multiple threads that all send values to the same receiver. We do that by -cloning the transmitting half of the channel, as shown in Listing 16-11: +producer, single consumer*. Let’s put that ability to use and expand the code +from Listing 16-10 to create multiple threads that all send values to the same +receiver. We can do that by cloning the transmitting half of the channel, as +shown in Listing 16-11: Filename: src/main.rs @@ -264,7 +339,7 @@ cloning the transmitting half of the channel, as shown in Listing 16-11: // ...snip... let (tx, rx) = mpsc::channel(); -let tx1 = tx.clone(); +let tx1 = mpsc::Sender::clone(&tx); thread::spawn(move || { let vals = vec![ String::from("hi"), @@ -304,9 +379,9 @@ thread::spawn(move || { between each one This time, before we create the first spawned thread, we call `clone` on the -sending end of the channel. This will give us a new sending handle that we can -pass to the first spawned thread. We’ll pass the original sending end of the -channel to a second spawned thread, and each thread is sending different +sending end of the channel. This will give us a new sending handle we can pass +to the first spawned thread. We pass the original sending end of the channel to +a second spawned thread. This gives us two threads, each sending different messages to the receiving end of the channel. If you run this, you’ll *probably* see output like this: @@ -322,10 +397,10 @@ Got: thread Got: you ``` -You might see the values in a different order, though. It depends on your -system! This is what makes concurrency interesting as well as difficult. If you -play around with `thread::sleep`, giving it different values in the different -threads, you can make the runs more non-deterministic and create different -output each time. +You might see the values in a different order, it depends on your system! This +is what makes concurrency interesting as well as difficult. If you play around +with `thread::sleep`, giving it different values in the different threads, each +run will be more non-deterministic and create different output each time. -Now that we’ve seen how channels work, let’s look at shared-memory concurrency. +Now that we’ve seen how channels work, let’s look at a different method of +concurrency. diff --git a/src/doc/book/second-edition/src/ch16-03-shared-state.md b/src/doc/book/second-edition/src/ch16-03-shared-state.md index d5aa110cd1..8c2f28da18 100644 --- a/src/doc/book/second-edition/src/ch16-03-shared-state.md +++ b/src/doc/book/second-edition/src/ch16-03-shared-state.md @@ -1,55 +1,70 @@ ## Shared State Concurrency -While message passing is a fine way of dealing with concurrency, it’s not the +Message passing is a fine way of dealing with concurrency, but it’s not the only one. Consider this slogan again: > Do not communicate by sharing memory; instead, share memory by > communicating. What would “communicate by sharing memory” look like? And moreover, why would -message passing enthusiasts dislike it, and dislike it enough to invert it -entirely? - -Remember how channels are sort of like single ownership? Shared memory -concurrency is sort of like multiple ownership: multiple threads can access the -same memory location at the same time. As we saw with multiple ownership made -possible by smart pointers in Chapter 15, multiple ownership can add additional -complexity, since we need to manage these different owners somehow. - -Rust’s type system and ownership can help a lot here in getting this management +message passing enthusiasts choose not to use it and do the opposite instead? + + + + +In a way, channels in any programming language are sort of like single +ownership, because once you transfer a value down a channel, you shouldn’t use +that value any longer. Shared memory concurrency is sort of like multiple +ownership: multiple threads can access the same memory location at the same +time. As we saw in Chapter 15 where multiple ownership was made possible by +smart pointers, multiple ownership can add additional complexity because these +different owners need managing. + +Rust’s type system and ownership rules assist a lot in getting this management correct, though. For an example, let’s look at one of the more common concurrency primitives for shared memory: mutexes. ### Mutexes Allow Access to Data from One Thread at a Time A *mutex* is a concurrency primitive for sharing memory. It’s short for “mutual -exclusion”, that is, it only allows one thread to access some data at any given -time. Mutexes have a reputation for being hard to use, since there’s a lot you -have to remember: - -1. You have to remember to attempt to acquire the lock before using the data. -2. Once you’re done with the data that’s being guarded by the mutex, you have - to remember to unlock the data so that other threads can acquire the lock. - -For a real-world example of a mutex, imagine a panel discussion at a conference -where there is only one microphone. Before a panelist may speak, they have to +exclusion”, as in, it only allows one thread to access some data at any given +time. In order to access the data in a mutex, a thread must first signal that +it wants access by asking to acquire the mutex’s *lock*. The lock is a data +structure that is part of the mutex that keeps track of who currently has +exclusive access to the data. We therefore describe the mutex as *guarding* the +data it holds via the locking system. + +Mutexes have a reputation for being hard to use because there are some +rules you have to remember: + + + + +1. You must attempt to acquire the lock before using the data. +2. Once you’re done with the data that’s guarded by the mutex, you must unlock + the data so other threads can acquire the lock. + +For a real-world metaphor of a mutex, imagine a panel discussion at a +conference with only one microphone. Before a panelist may speak, they have to ask or signal that they would like to use the microphone. Once they get the microphone, they may talk for as long as they would like, then hand the -microphone to the next panelist who would like to speak. It would be rude for a -panelist to start shouting without having the microphone or to steal the -microphone before another panelist was finished. No one else would be able to -speak if a panelist forgot to hand the microphone to the next person when they -finished using it. If the management of the shared microphone went wrong in any -of these ways, the panel would not work as planned! +microphone to the next panelist who requests to speak. If a panelist forgets to +hand the microphone off when they’re finished with it, no one else is able to +speak. If management of the shared microphone goes wrong, the panel would not +work as planned! Management of mutexes can be incredibly tricky to get right, and that’s why so -many people are enthusiastic about channels. However, in Rust, we can’t get -locking and unlocking wrong, thanks to the type system and ownership. +many people are enthusiastic about channels. However, thanks to Rust’s type +system and ownership rules, we can’t get locking and unlocking wrong. #### The API of `Mutex` -Let’s look at an example of using a mutex in Listing 16-12, without involving -multiple threads for the moment: +Let’s start simply with an example of using a mutex in a single-threaded +context, shown in Listing 16-12: Filename: src/main.rs @@ -71,29 +86,39 @@ fn main() { Listing 16-12: Exploring the API of `Mutex` in a single threaded context for simplicity -Like many types, we create a `Mutex` through an associated function named -`new`. To access the data inside the mutex, we use the `lock` method to acquire -the lock. This call will block until it’s our turn to have the lock. This call -can fail if another thread was holding the lock and then that thread panicked. -In a similar way as we did in Listing 16-6 in the last section, we’re using -`unwrap()` for now, rather than better error handling. See Chapter 9 for better -tools. - -Once we have acquired the lock, we can treat the return value, named `num` in -this case, as a mutable reference to the data inside. The type system is how -Rust ensures that we acquire a lock before using this value: `Mutex` is -not an `i32`, so we *must* acquire the lock in order to be able to use the -`i32` value. We can’t forget; the type system won’t let us do otherwise. - -As you may have suspected, `Mutex` is a smart pointer. Well, more -accurately, the call to `lock` returns a smart pointer called `MutexGuard`. -This smart pointer implements `Deref` to point at our inner data, similar to -the other smart pointers we saw in Chapter 15. In addition, `MutexGuard` has a -`Drop` implementation that releases the lock. This way, we can’t forget to -release the lock. It happens for us automatically when the `MutexGuard` goes -out of scope, which it does at the end of the inner scope in Listing 16-12. We -can print out the mutex value and see that we were able to change the inner -`i32` to 6. +As with many types, we create a `Mutex` using the associated function `new`. +To access the data inside the mutex, we use the `lock` method to acquire the +lock. This call will block the current thread so that it can’t do any work +until it’s our turn to have the lock. + + + + +The call to `lock` would fail if another thread holding the lock panicked. In +that case, no one would ever be able to get the lock, so we’ve chosen to +`unwrap` and have this thread panic if we’re in that situation. + + + + +Once we’ve acquired the lock, we can treat the return value, named `num` in +this case, as a mutable reference to the data inside. The type system ensures +that we acquire a lock before using this value: `Mutex` is not an `i32`, +so we *must* acquire the lock in order to be able to use the `i32` value. We +can’t forget; the type system won’t let us do it otherwise. + +As you may suspect, `Mutex` is a smart pointer. More accurately, the call to +`lock` *returns* a smart pointer called `MutexGuard`. This smart pointer +implements `Deref` to point at our inner data, and also has a `Drop` +implementation that releases the lock automatically when `MutexGuard` goes out +of scope, which happens at the end of the inner scope in Listing 16-12. This +way, we don’t risk forgetting to release the lock and blocking it from use by +other threads, because it happens automatically. + +After dropping the lock, we can print out the mutex value and see that we were +able to change the inner `i32` to 6. #### Sharing a `Mutex` Between Multiple Threads @@ -115,7 +140,7 @@ fn main() { let mut handles = vec![]; for _ in 0..10 { - let handle = thread::spawn(|| { + let handle = thread::spawn(move || { let mut num = counter.lock().unwrap(); *num += 1; @@ -131,50 +156,28 @@ fn main() { } ``` -Listing 16-13: The start of a program having 10 threads -each increment a counter guarded by a `Mutex` +Listing 16-13: Ten threads each increment a counter +guarded by a `Mutex` We’re creating a `counter` variable to hold an `i32` inside a `Mutex`, like we did in Listing 16-12. Next, we’re creating 10 threads by mapping over a range of numbers. We use `thread::spawn` and give all the threads the same -closure: they’re each going to acquire a lock on the `Mutex` by calling the -`lock` method and then add 1 to the value in the mutex. When a thread finishes -running its closure, `num` will go out of scope and release the lock so that -another thread can acquire it. +closure, one that moves the counter into the thread, acquires a lock on the +`Mutex` by calling the `lock` method, and then adds 1 to the value in the +mutex. When a thread finishes running its closure, `num` will go out of scope +and release the lock so another thread can acquire it. -In the main thread, we’re collecting all the join handles like we did in -Listing 16-2, and then calling `join` on each of them to make sure all the -threads finish. At that point, the main thread will acquire the lock and print -out the result of this program. +In the main thread, we collect all the join handles like we did in Listing +16-2, and then call `join` on each to make sure all the threads finish. At that +point, the main thread will acquire the lock and print out the result of this +program. -We hinted that this example won’t compile, let’s find out why! +We hinted that this example won’t compile, now let’s find out why! -```text -error[E0373]: closure may outlive the current function, but it borrows -`counter`, which is owned by the current function - --> - | -9 | let handle = thread::spawn(|| { - | ^^ may outlive borrowed value `counter` -10 | let mut num = counter.lock().unwrap(); - | ------- `counter` is borrowed here - | -help: to force the closure to take ownership of `counter` (and any other -referenced variables), use the `move` keyword, as shown: - | let handle = thread::spawn(move || { -``` - -This is similar to the problem we solved in Listing 16-5. Given that we spin up -multiple threads, Rust can’t know how long the threads will run and whether -`counter` will still be valid when each thread tries to borrow it. The help -message has a reminder for how to solve this: we can use `move` to give -ownership to each thread. Let’s try it by making this change to the closure: - -```rust,ignore -thread::spawn(move || { -``` - -And trying to compile again. We’ll get different errors this time! + + ```text error[E0382]: capture of moved value: `counter` @@ -203,14 +206,14 @@ error[E0382]: use of moved value: `counter` error: aborting due to 2 previous errors ``` -`move` didn’t fix this program like it fixed Listing 16-5. Why not? This error -message is a little confusing to read, because it’s saying that the `counter` -value is moved into the closure, then is captured when we call `lock`. That -sounds like what we wanted, but it’s not allowed. +The error message is saying that the `counter` value is moved into the closure, +then is captured when we call `lock`. That sounds like what we wanted, but it’s +not allowed! -Let’s reason this out. Instead of making 10 threads in a `for` loop, let’s just -make two threads without a loop and see what happens then. Replace the first -`for` loop in Listing 16-13 with this code instead: +Let’s reason this out by simplifying the program. Instead of making 10 threads +in a `for` loop, let’s just make two threads without a loop and see what +happens then. Replace the first `for` loop in Listing 16-13 with this code +instead: ```rust,ignore let handle = thread::spawn(move || { @@ -228,10 +231,8 @@ let handle2 = thread::spawn(move || { handles.push(handle2); ``` -Here we’re making 2 threads, and we changed the variable names used with the -second thread to `handle2` and `num2`. We’re simplifying our example for the -moment to see if we can understand the error message we’re getting. This time, -compiling gives us: +We make two threads and change the variable names used with the second thread +to `handle2` and `num2`. When we run this time, compiling gives us: ```text error[E0382]: capture of moved value: `counter` @@ -261,23 +262,23 @@ error[E0382]: use of moved value: `counter` error: aborting due to 2 previous errors ``` -Aha! In the first error message, Rust is showing us that `counter` is moved -into the closure for the thread that goes with `handle`. That move is -preventing us from capturing `counter` when we try to call `lock` on it and -store the result in `num2`, which is in the second thread! So Rust is telling -us that we can’t move ownership of `counter` into multiple threads. This was -hard to see before since we were creating multiple threads in a loop, and Rust -can’t point to different threads in different iterations of the loop. +Aha! The first error message tells us that `counter` is moved into the closure +for the thread associated with `handle`. That move is preventing us from +capturing `counter` when we try to call `lock` on it and store the result in +`num2` in the second thread! So Rust is telling us that we can’t move ownership +of `counter` into multiple threads. This was hard to see before because our +threads were in a loop, and Rust can’t point to different threads in different +iterations of the loop. Let’s try to fix this with a multiple-ownership method +we saw in Chapter 15. #### Multiple Ownership with Multiple Threads -In Chapter 15, we were able to have multiple ownership of a value by using the -smart pointer `Rc` to create a reference-counted value. We mentioned in -Chapter 15 that `Rc` was only for single-threaded contexts, but let’s try -using `Rc` in this case anyway and see what happens. We’ll wrap the -`Mutex` in `Rc` in Listing 16-14, and clone the `Rc` before moving -ownership to the thread. We’ll switch back to the `for` loop for creating the -threads, and keep the `move` keyword with the closure: +In Chapter 15, we were able to give a value multiple owners by using the smart +pointer `Rc` to create a reference-counted value. Let’s try to do the same +here and see what happens. We’ll wrap the `Mutex` in `Rc` in Listing +16-14, and clone the `Rc` before moving ownership to the thread. Now we’ve +seen the errors, we’ll also switch back to using the `for` loop, and we’ll keep +the `move` keyword with the closure: Filename: src/main.rs @@ -291,7 +292,7 @@ fn main() { let mut handles = vec![]; for _ in 0..10 { - let counter = counter.clone(); + let counter = Rc::clone(&counter); let handle = thread::spawn(move || { let mut num = counter.lock().unwrap(); @@ -331,42 +332,47 @@ std::marker::Send` is not satisfied = note: required by `std::thread::spawn` ``` -Wow, that’s quite wordy! Some important parts to pick out: the first note says -`Rc> cannot be sent between threads safely`. The reason for this is -in the error message, which, once distilled, says `the trait bound Send is not -satisfied`. We’re going to talk about `Send` in the next section; it’s one of -the traits that ensures the types we use with threads are meant for use in -concurrent situations. +Wow, that’s quite wordy! Here are some important parts to pick out: the first +note says `Rc> cannot be sent between threads safely`. The reason +for this is in the error message, which, once distilled, says `the trait bound +Send is not satisfied`. We’re going to talk about `Send` in the next section; +it’s one of the traits that ensures the types we use with threads are meant for +use in concurrent situations. + + + Unfortunately, `Rc` is not safe to share across threads. When `Rc` -manages the reference count, it has to add to the count for each call to -`clone` and subtract from the count when each clone is dropped. `Rc` doesn’t -use any concurrency primitives to make sure that changes to the count happen in -an operation that couldn’t be interrupted by another thread. This could lead to -subtle bugs where the counts are wrong, which could lead to memory leaks or -dropping a value before we’re done with it. So what if we had a type that was -exactly like `Rc`, but made changes to the reference count in a thread-safe -way? +manages the reference count, it adds to the count for each call to `clone` and +subtracts from the count when each clone is dropped, but it doesn’t use any +concurrency primitives to make sure that changes to the count can’t be +interrupted by another thread. This could lead to wrong counts: subtle bugs +that could in turn lead to memory leaks or a value being dropped before we’re +done with it. What we need is a type exactly like `Rc`, but that makes +changes to the reference count in a thread-safe way. #### Atomic Reference Counting with `Arc` -If you thought that question sounded like a leading one, you’d be right. There -is a type like `Rc` that’s safe to use in concurrent situations: `Arc`. -The ‘a’ stands for *atomic*, so it’s an *atomically reference counted* type. -Atomics are an additional kind of concurrency primitive that we won’t cover -here; see the standard library documentation for `std::sync::atomic` for more -details. The gist of it is this: atomics work like primitive types, but are -safe to share across threads. - -Why aren’t all primitive types atomic, and why aren’t all standard library -types implemented to use `Arc` by default? Thread safety comes with a -performance penalty that we only want to pay when we need it. If we’re only -doing operations on values within a single thread, our code can run faster -since it doesn’t need the guarantees that atomics give us. - -Back to our example: `Arc` and `Rc` are identical except for the atomic -internals of `Arc`. Their API is the same, so we can change the `use` line -and the call to `new`. The code in Listing 16-15 will finally compile and run: +Luckily for us, there *is* a type like `Rc` that’s safe to use in concurrent +situations: `Arc`. The ‘a’ stands for *atomic*, meaning it’s an *atomically +reference counted* type. Atomics are an additional kind of concurrency +primitive that we won’t cover in detail here; see the standard library +documentation for `std::sync::atomic` for more details. What you need to know +here is that atomics work like primitive types, but are safe to share across +threads. + +You might then wonder why all primitive types aren’t atomic, and why standard +library types aren’t implemented to use `Arc` by default. The reason is that +thread safety comes with a performance penalty that you only want to pay when +you really need to. If you’re only doing operations on values within a single +thread, your code can run faster if it doesn’t have to enforce the guarantees +atomics provide. + +Back to our example: `Arc` and `Rc` have the same API, so we fix our +program by changing the `use` line and the call to `new`. The code in Listing +16-15 will finally compile and run: Filename: src/main.rs @@ -379,7 +385,7 @@ fn main() { let mut handles = vec![]; for _ in 0..10 { - let counter = counter.clone(); + let counter = Arc::clone(&counter); let handle = thread::spawn(move || { let mut num = counter.lock().unwrap(); @@ -405,36 +411,43 @@ This will print: Result: 10 ``` -We did it! We counted from 0 to 10, which may not seem very impressive, but we -learned a lot about `Mutex` and thread safety along the way! The structure -that we’ve built in this example could be used to do more complicated -operations than just incrementing a counter. Calculations that can be divided -up into independent parts could be split across threads in this way, and we can -use a `Mutex` to allow each thread to update the final result with its part. - -You may have noticed that, since `counter` is immutable but we could get a -mutable reference to the value inside it, this means `Mutex` provides -interior mutability, like the `Cell` family does. In the same way that we used -`RefCell` in Chapter 15 to be able to mutate contents inside an `Rc`, we -use `Mutex` to be able to mutate contents inside of an `Arc`. - -Recall that `Rc` did not prevent every possible problem: we also talked -about the possibility of creating reference cycles where two `Rc` values -refer to each other, which would cause a memory leak. We have a similar problem -with `Mutex` that Rust also doesn’t prevent: deadlocks. A *deadlock* is a -situation in which an operation needs to lock two resources, and two threads -have each acquired one of the locks and will now wait for each other forever. -If you’re interested in this topic, try creating a Rust program that has a -deadlock, then research deadlock mitigation strategies that apply to the use of -mutexes in any language and try implementing them in Rust. The standard library -API documentation for `Mutex` and `MutexGuard` will have useful information. - -Rust’s type system and ownership has made sure that our threads have exclusive -access to the shared value when they’re updating it, so the threads won’t -overwrite each other’s answers in unpredictable ways. It took us a while to -work with the compiler to get everything right, but we’ve saved future time +We did it! We counted from 0 to 10, which may not seem very impressive, but it +did teach us a lot about `Mutex` and thread safety! This structure could +also be used to do more complicated operations than just incrementing a +counter: these methods allow us to divide calculations up into independent +parts, which we could split across threads, and then we can use a `Mutex` to +have each thread update the final result with its part. + +### Similarities between `RefCell`/`Rc` and `Mutex`/`Arc` + +You may have noticed that `counter` is immutable but we could get a mutable +reference to the value inside it; this means `Mutex` provides interior +mutability, like the `Cell` family does. In the same way we used `RefCell` +in Chapter 15 to allow us to mutate contents inside an `Rc`, we use +`Mutex` to mutate contents inside of an `Arc`. + +Another thing to note is that Rust can’t prevent us from all kinds of logic +errors when using `Mutex`. Recall from Chapter 15 that using `Rc` came +with the risk of creating reference cycles, where two `Rc` values refer to +each other, causing memory leaks. Similarly, `Mutex` comes the risk of +*deadlocks*. These occur when an operation needs to lock two resources and two +threads have each acquired one of the locks, causing them to wait for each +other forever. If you’re interested in this topic, try creating a Rust program +that has a deadlock, then research deadlock mitigation strategies for mutexes +in any language, and have a go at implementing them in Rust. The standard +library API documentation for `Mutex` and `MutexGuard` will have useful +information. + + + + Let’s round out this chapter by talking about the `Send` and `Sync` traits and how we could use them with custom types. diff --git a/src/doc/book/second-edition/src/ch16-04-extensible-concurrency-sync-and-send.md b/src/doc/book/second-edition/src/ch16-04-extensible-concurrency-sync-and-send.md index 7d4028a0e3..4603a668b4 100644 --- a/src/doc/book/second-edition/src/ch16-04-extensible-concurrency-sync-and-send.md +++ b/src/doc/book/second-edition/src/ch16-04-extensible-concurrency-sync-and-send.md @@ -1,87 +1,84 @@ ## Extensible Concurrency with the `Sync` and `Send` Traits -One interesting aspect of Rust’s concurrency model is that the language knows -*very* little about concurrency. Almost everything we’ve been talking about so -far has been part of the standard library, not the language itself. Because we -don’t need the language to provide everything we need to program in a -concurrent context, we’re not limited to the concurrency options that the -standard library or language provide: we can write our own or use ones others -have written. - -We said *almost* everything wasn’t in the language, so what is? There are two -traits, both in `std::marker`: `Sync` and `Send`. - -### `Send` for Indicating Ownership May Be Transferred to Another Thread - -The `Send` marker trait indicates that ownership of that type may be -transferred between threads. Almost every Rust type is `Send`, but there are -some exceptions. One type provided by the standard library that is not `Send` -is `Rc`: if we clone an `Rc` value and try to transfer ownership of the -clone to another thread, both threads might update the reference count at the -same time. As we mentioned in the previous section, `Rc` is implemented for -use in single-threaded situations where you don’t want to pay the performance -penalty of having a threadsafe reference count. - -Because `Rc` is not marked `Send`, Rust’s type system and trait bounds -ensure that we can never forget and accidentally send an `Rc` value across -threads unsafely. We tried to do this in Listing 16-14, and we got an error -that said `the trait Send is not implemented for Rc>`. When we -switched to `Arc`, which is `Send`, the code compiled. - -Any type that is composed entirely of `Send` types is automatically marked as -`Send` as well. Almost all primitive types are `Send`, aside from raw pointers, -which we’ll discuss in Chapter 19. Most standard library types are `Send`, -aside from `Rc`. - -### `Sync` for Indicating Access from Multiple Threads is Safe - -The `Sync` marker trait indicates that a type is safe to have references to a -value from multiple threads. Another way to say this is for any type `T`, `T` -is `Sync` if `&T` (a reference to `T`) is `Send` so that the reference can be -sent safely to another thread. In a similar manner as `Send`, primitive types -are `Sync` and types composed entirely of types that are `Sync` are also `Sync`. +Interestingly, the Rust language itself knows *very* little about concurrency. +Almost everything we’ve talked about so far in this chapter has been part of +the standard library, not the language. Our concurrency options are not limited +to the language or the standard library, meaning we can write our own +concurrency options or use ones others have written. + +There *are* two concurrency concepts embedded in the language, however: the +`std::marker` traits `Sync` and `Send`. + +### Allowing Transference of Ownership Between Threads with `Send` + +The `Send` marker trait indicates that ownership of the type implementing +`Send` may be transferred between threads. Almost every Rust type is `Send`, +but there are some exceptions, including `Rc`: this cannot be `Send` because +if we cloned an `Rc` value and tried to transfer ownership of the clone to +another thread, both threads might update the reference count at the same time. +For this reason, `Rc` is implemented for use in single-threaded situations +where you don’t want to pay the threadsafe performance penalty. + +In this way Rust’s type system and trait bounds ensure we can never +accidentally send an `Rc` value across threads unsafely. When we tried to do +this in Listing 16-14, we got an error that said `the trait Send is not +implemented for Rc>`. When we switched to `Arc`, which is `Send`, +the code compiled. + +Any type composed entirely of `Send` types is automatically marked as `Send` as +well. Almost all primitive types are `Send`, aside from raw pointers, which +we’ll discuss in Chapter 19. + +### Allowing Access from Multiple Threads with `Sync` + +The `Sync` marker trait indicates that it is safe for the type implementing +`Sync` to be referenced from multiple threads. Another way to say this is that +any type `T` is `Sync` if `&T` (a reference to `T`) is `Send`, meaning the +reference can be sent safely to another thread. In a similar manner as `Send`, +primitive types are `Sync` and types composed entirely of types that are `Sync` +are also `Sync`. `Rc` is also not `Sync`, for the same reasons that it’s not `Send`. `RefCell` (which we talked about in Chapter 15) and the family of related -`Cell` types are not `Sync`. The implementation of the borrow checking at -runtime that `RefCell` does is not threadsafe. `Mutex` is `Sync`, and can +`Cell` types are not `Sync`. The implementation of borrow checking that +`RefCell` does at runtime is not threadsafe. `Mutex` is `Sync`, and can be used to share access with multiple threads as we saw in the previous section. ### Implementing `Send` and `Sync` Manually is Unsafe -Usually, we don’t need to implement the `Send` and `Sync` traits, since types -that are made up of `Send` and `Sync` traits are automatically also `Send` and -`Sync`. Because they’re marker traits, they don’t even have any methods to -implement. They’re just useful for enforcing concurrency-related invariants. +Because types that are made up of `Send` and `Sync` traits are automatically +also `Send` and `Sync`, we don’t have to implement those traits ourselves. As +marker traits, they don’t even have any methods to implement. They’re just +useful for enforcing concurrency-related invariants. -Implementing the guarantees that these traits are markers for involves -implementing unsafe Rust code. We’re going to be talking about using unsafe -Rust code in Chapter 19; for now, the important information is that building -new concurrent types that aren’t made up of `Send` and `Sync` parts requires -careful thought to make sure the safety guarantees are upheld. [The Nomicon] -has more information about these guarantees and how to uphold them. +Manually implementing these traits involves implementing unsafe Rust code. +We’re going to be talking about using unsafe Rust code in Chapter 19; for now, +the important information is that building new concurrent types not made up of +`Send` and `Sync` parts requires careful thought, in order to uphold the safety +guarantees. [The Nomicon] has more information about these guarantees and how +to uphold them. [The Nomicon]: https://doc.rust-lang.org/stable/nomicon/ ## Summary -This isn’t the last time we’ll see concurrency in this book; the project in +This isn’t the last we’ll see of concurrency in this book; the project in Chapter 20 will use these concepts in a more realistic situation than the -smaller examples we discussed in this chapter. +smaller examples discussed here. -As we mentioned, since very little of how Rust deals with concurrency has to be -part of the language, there are many concurrency solutions implemented as -crates. These evolve more quickly than the standard library; search online for -the current state-of-the-art crates for use in multithreaded situations. +As we mentioned, since very little of how Rust deals with concurrency is part +of the language, many concurrency solutions are implemented as crates. These +evolve more quickly than the standard library; search online for the current +state-of-the-art crates to use in multithreaded situations. Rust provides channels for message passing and smart pointer types like `Mutex` and `Arc` that are safe to use in concurrent contexts. The type -system and the borrow checker will make sure the code we write using these -solutions won’t have data races or invalid references. Once we get our code -compiling, we can rest assured that our code will happily run on multiple -threads without the kinds of hard-to-track-down bugs common in other -programming languages. Concurrent programming is no longer something to be -afraid of: go forth and make your programs concurrent, fearlessly! +system and the borrow checker will make sure the code using these solutions +won’t end up with data races or invalid references. Once we get our code +compiling, we can rest assured that it will happily run on multiple threads +without the kinds of hard-to-track-down bugs common in other languages. +Concurrent programming is no longer something to be afraid of: go forth and +make your programs concurrent, fearlessly! Next, let’s talk about idiomatic ways to model problems and structure solutions as your Rust programs get bigger, and how Rust’s idioms relate to those you diff --git a/src/doc/book/second-edition/src/ch18-01-all-the-places-for-patterns.md b/src/doc/book/second-edition/src/ch18-01-all-the-places-for-patterns.md index ada68dddba..352d2ed17e 100644 --- a/src/doc/book/second-edition/src/ch18-01-all-the-places-for-patterns.md +++ b/src/doc/book/second-edition/src/ch18-01-all-the-places-for-patterns.md @@ -86,7 +86,7 @@ Note that `if let` can also introduce shadowed variables like `match` arms can: the value inside the `Ok` variant. This also means the `if age > 30` condition needs to go within the block; we aren’t able to combine these two conditions into `if let Ok(age) = age && age > 30` since the shadowed `age` that we want -to compare to 30 isn’t valid until the new scope starts with the curly brace. +to compare to 30 isn’t valid until the new scope starts with the curly bracket. Also note that conditionals with many cases like these are not as powerful as `match` expression since exhaustiveness is not checked by the compiler. If we diff --git a/src/doc/book/second-edition/src/ch18-02-refutability.md b/src/doc/book/second-edition/src/ch18-02-refutability.md index baa1a68574..525b8ca29f 100644 --- a/src/doc/book/second-edition/src/ch18-02-refutability.md +++ b/src/doc/book/second-edition/src/ch18-02-refutability.md @@ -49,8 +49,8 @@ We didn’t cover (and couldn’t cover!) every valid value with the pattern `Some(x)`, so Rust will rightfully complain. If we have a refutable pattern, instead of using `let`, we can use `if let`. -That way, if the pattern doesn’t match, the code inside the curly braces won’t -execute. That code will only make sense and run if the value matches the +That way, if the pattern doesn’t match, the code inside the curly brackets +won’t execute. That code will only make sense and run if the value matches the pattern. Listing 18-8 shows how to fix the code in Listing 18-7 with `Some(x)` matching `some_option_value`. Using the refutable pattern `Some(x)` is allowed, since this example uses `if let`: diff --git a/src/doc/book/second-edition/src/ch19-02-advanced-lifetimes.md b/src/doc/book/second-edition/src/ch19-02-advanced-lifetimes.md index d79bf97f63..d5192a16ee 100644 --- a/src/doc/book/second-edition/src/ch19-02-advanced-lifetimes.md +++ b/src/doc/book/second-edition/src/ch19-02-advanced-lifetimes.md @@ -398,7 +398,7 @@ having to do with lifetimes and trait objects: When we must be explicit, we can add a lifetime bound on a trait object like `Box` with the syntax `Box` or `Box`, depending on what’s needed. Just as with the other bounds, this means that any -implementer of the `Foo` trait that has any references inside must have the +implementor of the `Foo` trait that has any references inside must have the lifetime specified in the trait object bounds as those references. Next, let’s take a look at some other advanced features dealing with traits! diff --git a/src/doc/book/second-edition/src/ch19-03-advanced-traits.md b/src/doc/book/second-edition/src/ch19-03-advanced-traits.md index 24602f639a..9f00cbcc16 100644 --- a/src/doc/book/second-edition/src/ch19-03-advanced-traits.md +++ b/src/doc/book/second-edition/src/ch19-03-advanced-traits.md @@ -7,7 +7,7 @@ details. Now that we know more Rust, we can get into the nitty-gritty. *Associated types* are a way of associating a type placeholder with a trait such that the trait method definitions can use these placeholder types in their -signatures. The implementer of a trait will specify the concrete type to be +signatures. The implementor of a trait will specify the concrete type to be used in this type’s place for the particular implementation. We’ve described most of the things in this chapter as being very rare. @@ -31,9 +31,9 @@ that has an associated type `Item` This says that the `Iterator` trait has an associated type named `Item`. `Item` is a placeholder type, and the return value of the `next` method will return -values of type `Option`. Implementers of this trait will specify +values of type `Option`. Implementors of this trait will specify the concrete type for `Item`, and the `next` method will return an `Option` -containing a value of whatever type the implementer has specified. +containing a value of whatever type the implementor has specified. #### Associated Types Versus Generics @@ -318,121 +318,298 @@ Rust cannot prevent a trait from having a method with the same name as another trait’s method, nor can it prevent us from implementing both of these traits on one type. We can also have a method implemented directly on the type with the same name as well! In order to be able to call each of the methods with the -same name, then, we need to tell Rust which one we want to use. Consider the -code in Listing 19-27 where traits `Foo` and `Bar` both have method `f` and we -implement both traits on struct `Baz`, which also has a method named `f`: +same name, then, we need to tell Rust which one we want to use. + +Consider the code in Listing 19-27 where we've defined two traits, `Pilot` and +`Wizard`, that both have a method called `fly`. We then implement both traits +on a type `Human` that itself already has a method named `fly` implemented on +it. Each `fly` method does something different: Filename: src/main.rs ```rust -trait Foo { - fn f(&self); +trait Pilot { + fn fly(&self); } -trait Bar { - fn f(&self); +trait Wizard { + fn fly(&self); } -struct Baz; +struct Human; -impl Foo for Baz { - fn f(&self) { println!("Baz’s impl of Foo"); } +impl Pilot for Human { + fn fly(&self) { + println!("This is your captain speaking."); + } } -impl Bar for Baz { - fn f(&self) { println!("Baz’s impl of Bar"); } +impl Wizard for Human { + fn fly(&self) { + println!("Up!"); + } } -impl Baz { - fn f(&self) { println!("Baz's impl"); } +impl Human { + fn fly(&self) { + println!("*waving arms furiously*"); + } } +``` + +Listing 19-27: Two traits defined to have a `fly` method, +and implementations of those traits on the `Human` type in addition to a `fly` +method on `Human` directly +When we call `fly` on an instance of `Human`, the compiler defaults to calling +the method that is directly implemented on the type, as shown in Listing 19-28: + +Filename: src/main.rs + +```rust +# trait Pilot { +# fn fly(&self); +# } +# +# trait Wizard { +# fn fly(&self); +# } +# +# struct Human; +# +# impl Pilot for Human { +# fn fly(&self) { +# println!("This is your captain speaking."); +# } +# } +# +# impl Wizard for Human { +# fn fly(&self) { +# println!("Up!"); +# } +# } +# +# impl Human { +# fn fly(&self) { +# println!("*waving arms furiously*"); +# } +# } +# fn main() { - let b = Baz; - b.f(); + let person = Human; + person.fly(); } ``` -Listing 19-27: Implementing two traits that both have a -method with the same name as a method defined on the struct directly +Listing 19-28: Calling `fly` on an instance of +`Human` -For the implementation of the `f` method for the `Foo` trait on `Baz`, we’re -printing out `Baz's impl of Foo`. For the implementation of the `f` method for -the `Bar` trait on `Baz`, we’re printing out `Baz's impl of Bar`. The -implementation of `f` directly on `Baz` prints out `Baz's impl`. What should -happen when we call `b.f()`? In this case, Rust will always use the -implementation on `Baz` directly and will print out `Baz's impl`. +Running this will print out `*waving arms furiously*`, which shows that Rust +called the `fly` method implemented on `Human` directly. -In order to be able to call the `f` method from `Foo` and the `f` method from -`Baz` rather than the implementation of `f` directly on `Baz`, we need to use -the *fully qualified syntax* for calling methods. It works like this: for any -method call like: +In order to call the `fly` methods from either the `Pilot` trait or the +`Wizard` trait, we need to use more explicit syntax in order to specify which +`fly` method we mean. This syntax is demonstrated in Listing 19-29: -```rust,ignore -receiver.method(args); +Filename: src/main.rs + +```rust +# trait Pilot { +# fn fly(&self); +# } +# +# trait Wizard { +# fn fly(&self); +# } +# +# struct Human; +# +# impl Pilot for Human { +# fn fly(&self) { +# println!("This is your captain speaking."); +# } +# } +# +# impl Wizard for Human { +# fn fly(&self) { +# println!("Up!"); +# } +# } +# +# impl Human { +# fn fly(&self) { +# println!("*waving arms furiously*"); +# } +# } +# +fn main() { + let person = Human; + Pilot::fly(&person); + Wizard::fly(&person); + person.fly(); +} +``` + +Listing 19-29: Specifying which trait's `fly` method we +want to call + +Specifying the trait name before the method name clarifies to Rust which +implementation of `fly` we want to call. We could also choose to write +`Human::fly(&person)`, which is equivalent to `person.fly()` that we had in +Listing 19-28, but is a bit longer to write if we don't need to disambiguate. + +Running this code will print: + +```text +This is your captain speaking. +Up! +*waving arms furiously* ``` -We can fully qualify the method call like this: +Because the `fly` method takes a `self` parameter, if we had two *types* that +both implement one *trait*, Rust can figure out which implementation of a trait +to use based on the type of `self`. + +However, associated functions that are part of traits don't have a `self` +parameter. When two types in the same scope implement that trait, Rust can't +figure out which type we mean unless we use *fully qualified syntax*. For +example, take the `Animal` trait in Listing 19-30 that has the associated +function `baby_name`, the implementation of `Animal` for the struct `Dog`, and +the associated function `baby_name` defined on `Dog` directly: + +Filename: src/main.rs + +```rust +trait Animal { + fn baby_name() -> String; +} + +struct Dog; + +impl Dog { + fn baby_name() -> String { + String::from("Spot") + } +} + +impl Animal for Dog { + fn baby_name() -> String { + String::from("puppy") + } +} + +fn main() { + println!("A baby dog is called a {}", Dog::baby_name()); +} +``` + +Listing 19-30: A trait with an associated function and a +type that has an associated function with the same name that also implements +the trait + +This code is for an animal shelter where they want to give all puppies the name +Spot, which is implemented in the `baby_name` associated function that is +defined on `Dog`. The `Dog` type also implements the trait `Animal`, which +describes characteristics that all animals have. Baby dogs are called puppies, +and that is expressed in the implementation of the `Animal` trait on `Dog` in +the `baby_name` function associated with the `Animal` trait. + +In `main`, we're calling the `Dog::baby_name` function, which calls the +associated function defined on `Dog` directly. This code prints: + +```text +A baby dog is called a Spot +``` + +This isn't really what we wanted, in this case we want to call the `baby_name` +function that's part of the `Animal` trait that we implemented on `Dog`, so +that we can print `A baby dog is called a puppy`. The technique we used in +Listing 19-29 doesn't help here; if we change `main` to be the code in Listing +19-31: + +Filename: src/main.rs ```rust,ignore -::method(receiver, args); +fn main() { + println!("A baby dog is called a {}", Animal::baby_name()); +} ``` -So in order to disambiguate and be able to call all the `f` methods defined in -Listing 19-27, we specify that we want to treat the type `Baz` as each trait -within angle brackets, then use two colons, then call the `f` method and pass -the instance of `Baz` as the first argument. Listing 19-28 shows how to call -`f` from `Foo` and then `f` from `Bar` on `b`: +Listing 19-31: Attempting to call the `baby_name` +function from the `Animal` trait, but Rust doesn't know which implementation to +use + +Because `Animal::baby_name` is an associated function rather than a method, and +thus doesn't have a `self` parameter, Rust has no way to figure out which +implementation of `Animal::baby_name` we want. We'll get this compiler error: + +```text +error[E0283]: type annotations required: cannot resolve `_: Animal` + --> src/main.rs + | +20 | println!("A baby dog is called a {}", Animal::baby_name()); + | ^^^^^^^^^^^^^^^^^ + | + = note: required by `Animal::baby_name` +``` + +In order to tell Rust that we want to use the implementation of `Animal` for +`Dog`, we need to use *fully qualified syntax*, which is the most specific we +can be when calling a function. Listing 19-32 demonstrates how to use fully +qualified syntax in this case: Filename: src/main.rs ```rust -# trait Foo { -# fn f(&self); -# } -# trait Bar { -# fn f(&self); +# trait Animal { +# fn baby_name() -> String; # } -# struct Baz; -# impl Foo for Baz { -# fn f(&self) { println!("Baz’s impl of Foo"); } -# } -# impl Bar for Baz { -# fn f(&self) { println!("Baz’s impl of Bar"); } +# +# struct Dog; +# +# impl Dog { +# fn baby_name() -> String { +# String::from("Spot") +# } # } -# impl Baz { -# fn f(&self) { println!("Baz's impl"); } +# +# impl Animal for Dog { +# fn baby_name() -> String { +# String::from("puppy") +# } # } # fn main() { - let b = Baz; - b.f(); - ::f(&b); - ::f(&b); + println!("A baby dog is called a {}", ::baby_name()); } ``` -Listing 19-28: Using fully qualified syntax to call the -`f` methods defined as part of the `Foo` and `Bar` traits +Listing 19-32: Using fully qualified syntax to specify +that we want to call the `baby_name` function from the `Animal` trait as +implemented on `Dog` -This will print: +We're providing Rust with a type annotation within the angle brackets, and +we're specifying that we want to call the `baby_name` method from the `Animal` +trait as implemented on `Dog` by saying that we want to treat the `Dog` type as +an `Animal` for this function call. This code will now print what we want: ```text -Baz's impl -Baz’s impl of Foo -Baz’s impl of Bar +A baby dog is called a puppy ``` -We only need the `Type as` part if it’s ambiguous, and we only need the `<>` -part if we need the `Type as` part. So if we only had the `f` method directly -on `Baz` and the `Foo` trait implemented on `Baz` in scope, we could call the -`f` method in `Foo` by using `Foo::f(&b)` since we wouldn’t have to -disambiguate from the `Bar` trait. +In general, fully qualified syntax is defined as: + +```rust,ignore +::function(receiver_if_method, next_arg, ...); +``` -We could also have called the `f` defined directly on `Baz` by using -`Baz::f(&b)`, but since that definition of `f` is the one that gets used by -default when we call `b.f()`, it’s not required to fully specify that -implementation if that’s what we want to call. +For associated functions, there would not be a `receiver`, there would only be +the list of other arguments. We could choose to use fully qualified syntax +everywhere that we call functions or methods. However, we're allowed to leave +out any part of this syntax that Rust is able to figure out from other +information in the program. We only need to use this more verbose syntax in +cases where there are multiple implementations that use the same name and Rust +needs help in order to know which implementation we want to call. ### Supertraits to Use One Trait’s Functionality Within Another Trait @@ -460,7 +637,7 @@ In the implementation of `outline_print`, since we want to be able to use the `OutlinePrint` trait will only work for types that also implement `Display` and provide the functionality that `OutlinePrint` needs. We can do that in the trait definition by specifying `OutlinePrint: Display`. It’s like adding a -trait bound to the trait. Listing 19-29 shows an implementation of the +trait bound to the trait. Listing 19-33 shows an implementation of the `OutlinePrint` trait: ```rust @@ -479,7 +656,7 @@ trait OutlinePrint: fmt::Display { } ``` -Listing 19-29: Implementing the `OutlinePrint` trait that +Listing 19-33: Implementing the `OutlinePrint` trait that requires the functionality from `Display` Because we’ve specified that `OutlinePrint` requires the `Display` trait, we @@ -554,7 +731,7 @@ is elided at compile time. For example, if we wanted to implement `Display` on `Vec`, we can make a `Wrapper` struct that holds an instance of `Vec`. Then we can implement -`Display` on `Wrapper` and use the `Vec` value as shown in Listing 19-30: +`Display` on `Wrapper` and use the `Vec` value as shown in Listing 19-34: Filename: src/main.rs @@ -575,7 +752,7 @@ fn main() { } ``` -Listing 19-30: Creating a `Wrapper` type around +Listing 19-34: Creating a `Wrapper` type around `Vec` to be able to implement `Display` The implementation of `Display` uses `self.0` to access the inner `Vec`, and diff --git a/src/doc/book/second-edition/src/ch19-04-advanced-types.md b/src/doc/book/second-edition/src/ch19-04-advanced-types.md index 0ec1d3e90e..8520a63806 100644 --- a/src/doc/book/second-edition/src/ch19-04-advanced-types.md +++ b/src/doc/book/second-edition/src/ch19-04-advanced-types.md @@ -70,7 +70,7 @@ Box Writing this out in function signatures and as type annotations all over the place can be tiresome and error-prone. Imagine having a project full of code -like that in Listing 19-31: +like that in Listing 19-35: ```rust let f: Box = Box::new(|| println!("hi")); @@ -85,12 +85,12 @@ fn returns_long_type() -> Box { } ``` -Listing 19-31: Using a long type in many places +Listing 19-35: Using a long type in many places A type alias makes this code more manageable by reducing the amount of repetition this project has. Here, we’ve introduced an alias named `Thunk` for the verbose type, and we can replace all uses of the type with the shorter -`Thunk` as shown in Listing 19-32: +`Thunk` as shown in Listing 19-36: ```rust type Thunk = Box; @@ -107,7 +107,7 @@ fn returns_long_type() -> Thunk { } ``` -Listing 19-32: Introducing a type alias `Thunk` to reduce +Listing 19-36: Introducing a type alias `Thunk` to reduce repetition Much easier to read and write! Choosing a good name for a type alias can help @@ -178,7 +178,7 @@ This is read as “the function `bar` returns never,” and functions that retur never are called *diverging functions*. We can’t create values of the type `!`, so `bar` can never possibly return. What use is a type you can never create values for? If you think all the way back to Chapter 2, we had some code that -looked like this, reproduced here in Listing 19-33: +looked like this, reproduced here in Listing 19-37: ```rust # let guess = "3"; @@ -191,7 +191,7 @@ let guess: u32 = match guess.trim().parse() { # } ``` -Listing 19-33: A `match` with an arm that ends in +Listing 19-37: A `match` with an arm that ends in `continue` At the time, we skipped over some details in this code. In Chapter 6, we @@ -207,7 +207,7 @@ let guess = match guess.trim().parse() { What would the type of `guess` be here? It’d have to be both an integer and a string, and Rust requires that `guess` can only have one type. So what does `continue` return? Why are we allowed to return a `u32` from one arm in Listing -19-33 and have another arm that ends with `continue`? +19-37 and have another arm that ends with `continue`? As you may have guessed, `continue` has a value of `!`. That is, when Rust goes to compute the type of `guess`, it looks at both of the match arms. The former diff --git a/src/doc/book/second-edition/src/ch19-05-advanced-functions-and-closures.md b/src/doc/book/second-edition/src/ch19-05-advanced-functions-and-closures.md index cda8628025..38be2a2c85 100644 --- a/src/doc/book/second-edition/src/ch19-05-advanced-functions-and-closures.md +++ b/src/doc/book/second-edition/src/ch19-05-advanced-functions-and-closures.md @@ -9,7 +9,7 @@ We’ve talked about how to pass closures to functions, but you can pass regular functions to functions too! Functions coerce to the type `fn`, with a lower case ‘f’ not to be confused with the `Fn` closure trait. `fn` is called a *function pointer*. The syntax for specifying that a parameter is a function -pointer is similar to that of closures, as shown in Listing 19-34: +pointer is similar to that of closures, as shown in Listing 19-38: Filename: src/main.rs @@ -29,7 +29,7 @@ fn main() { } ``` -Listing 19-34: Using the `fn` type to accept a function +Listing 19-38: Using the `fn` type to accept a function pointer as an argument This prints `The answer is: 12`. We specify that the parameter `f` in diff --git a/src/doc/book/second-edition/src/ch20-01-single-threaded.md b/src/doc/book/second-edition/src/ch20-01-single-threaded.md index 07dfcc727a..df89351d3d 100644 --- a/src/doc/book/second-edition/src/ch20-01-single-threaded.md +++ b/src/doc/book/second-edition/src/ch20-01-single-threaded.md @@ -549,7 +549,7 @@ fn handle_connection(mut stream: TcpStream) { # let get = b"GET / HTTP/1.1\r\n"; // ...snip... - let (status_line, filename) = if buffer.starts_with(get) { + let (status_line, filename) = if buffer.starts_with(get) { ("HTTP/1.1 200 OK\r\n\r\n", "hello.html") } else { ("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html") diff --git a/src/doc/book/second-edition/src/ch20-05-sending-requests-via-channels.md b/src/doc/book/second-edition/src/ch20-05-sending-requests-via-channels.md index 10fb20bf75..798a48e351 100644 --- a/src/doc/book/second-edition/src/ch20-05-sending-requests-via-channels.md +++ b/src/doc/book/second-edition/src/ch20-05-sending-requests-via-channels.md @@ -203,7 +203,7 @@ impl ThreadPool { let mut workers = Vec::with_capacity(size); for id in 0..size { - workers.push(Worker::new(id, receiver.clone())); + workers.push(Worker::new(id, Arc::clone(&receiver))); } ThreadPool { diff --git a/src/doc/book/second-edition/src/ch20-06-graceful-shutdown-and-cleanup.md b/src/doc/book/second-edition/src/ch20-06-graceful-shutdown-and-cleanup.md index c05a316b3c..9097dec1c4 100644 --- a/src/doc/book/second-edition/src/ch20-06-graceful-shutdown-and-cleanup.md +++ b/src/doc/book/second-edition/src/ch20-06-graceful-shutdown-and-cleanup.md @@ -307,22 +307,15 @@ fn main() { let listener = TcpListener::bind("127.0.0.1:8080").unwrap(); let pool = ThreadPool::new(4); - let mut counter = 0; - - for stream in listener.incoming() { - if counter == 2 { - println!("Shutting down."); - break; - } - - counter += 1; - + for stream in listener.incoming().take(2) { let stream = stream.unwrap(); pool.execute(|| { handle_connection(stream); }); } + + println!("Shutting down."); } ``` @@ -333,10 +326,10 @@ Only serving two requests isn’t behavior you’d like a production web server have, but this will let us see the graceful shutdown and cleanup working since we won’t be stopping the server with ctrl-C. -We’ve added a `counter` variable that we’ll increment every time we receive an -incoming TCP stream. If that counter reaches 2, we’ll stop serving requests and -instead break out of the `for` loop. The `ThreadPool` will go out of scope at -the end of `main`, and we’ll see the `drop` implementation run. +The `.take(2)` we added to `listener.incoming()` artificially limits the +iteration to the first 2 items at most. This combinator works for any +implementation of the `Iterator` trait. The `ThreadPool` will go out of scope +at the end of `main`, and we’ll see the `drop` implementation run. Start the server with `cargo run`, and make three requests. The third request should error, and in your terminal you should see output that looks like: @@ -400,22 +393,15 @@ fn main() { let listener = TcpListener::bind("127.0.0.1:8080").unwrap(); let pool = ThreadPool::new(4); - let mut counter = 0; - - for stream in listener.incoming() { - if counter == 2 { - println!("Shutting down."); - break; - } - - counter += 1; - + for stream in listener.incoming().take(2) { let stream = stream.unwrap(); pool.execute(|| { handle_connection(stream); }); } + + println!("Shutting down."); } fn handle_connection(mut stream: TcpStream) { @@ -494,7 +480,7 @@ impl ThreadPool { let mut workers = Vec::with_capacity(size); for id in 0..size { - workers.push(Worker::new(id, receiver.clone())); + workers.push(Worker::new(id, Arc::clone(&receiver))); } ThreadPool { diff --git a/src/doc/book/second-edition/theme/index.hbs b/src/doc/book/second-edition/theme/index.hbs index 925d5c161f..bc6f789024 100644 --- a/src/doc/book/second-edition/theme/index.hbs +++ b/src/doc/book/second-edition/theme/index.hbs @@ -10,7 +10,8 @@ - + + @@ -19,6 +20,13 @@ + + + + {{#each additional_css}} + + {{/each}} + + {{#if mathjax_support}} - + + {{/if}} + + + + @@ -95,18 +113,27 @@ document.write(unescape("%3Cscript src='jquery.js'%3E%3C/script%3E")); } + + + + + + {{#each additional_js}} + + {{/each}} + @@ -117,7 +144,7 @@
-
+

You are reading a draft of the next edition of TRPL. For more, go here.

-
{{{ content }}}
{{#previous}} - {{/previous}} {{#next}} - {{/next}} @@ -177,8 +205,35 @@ {{{livereload}}} + {{#if google_analytics}} + + {{/if}} + + {{#if playpens_editable}} + + + + + + {{/if}} + + {{#if is_print}} + + {{/if}} + - diff --git a/src/doc/book/second-edition/tools/docx-to-md.xsl b/src/doc/book/second-edition/tools/docx-to-md.xsl index d3f6c0668e..47ebc7dea0 100644 --- a/src/doc/book/second-edition/tools/docx-to-md.xsl +++ b/src/doc/book/second-edition/tools/docx-to-md.xsl @@ -14,6 +14,7 @@ + @@ -85,7 +86,7 @@ - + ``` @@ -94,7 +95,7 @@ - + @@ -102,7 +103,7 @@ - + diff --git a/man/rustc.1 b/src/doc/man/rustc.1 similarity index 100% rename from man/rustc.1 rename to src/doc/man/rustc.1 diff --git a/man/rustdoc.1 b/src/doc/man/rustdoc.1 similarity index 100% rename from man/rustdoc.1 rename to src/doc/man/rustdoc.1 diff --git a/src/doc/nomicon/src/other-reprs.md b/src/doc/nomicon/src/other-reprs.md index ebd33cafa9..a77ebf0d1f 100644 --- a/src/doc/nomicon/src/other-reprs.md +++ b/src/doc/nomicon/src/other-reprs.md @@ -36,8 +36,6 @@ still consumes a byte of space. * Tuple structs are like structs with regards to `repr(C)`, as the only difference from a struct is that the fields aren’t named. -* **If the type would have any [drop flags], they will still be added** - * This is equivalent to one of `repr(u*)` (see the next section) for enums. The chosen size is the default enum size for the target platform's C ABI. Note that enum representation in C is implementation defined, so this is really a "best diff --git a/src/doc/reference/src/SUMMARY.md b/src/doc/reference/src/SUMMARY.md index 289f59e1f2..c8dfe8630f 100644 --- a/src/doc/reference/src/SUMMARY.md +++ b/src/doc/reference/src/SUMMARY.md @@ -23,15 +23,47 @@ - [Items and attributes](items-and-attributes.md) - [Items](items.md) + - [Modules](items/modules.md) + - [Extern crates](items/extern-crates.md) + - [Use declarations](items/use-declarations.md) + - [Functions](items/functions.md) + - [Type aliases](items/type-aliases.md) + - [Structs](items/structs.md) + - [Enumerations](items/enumerations.md) + - [Unions](items/unions.md) + - [Constant items](items/constant-items.md) + - [Static items](items/static-items.md) + - [Traits](items/traits.md) + - [Implementations](items/implementations.md) + - [External blocks](items/external-blocks.md) - [Visibility and Privacy](visibility-and-privacy.md) - [Attributes](attributes.md) - [Statements and expressions](statements-and-expressions.md) - [Statements](statements.md) - [Expressions](expressions.md) + - [Literal expressions](expressions/literal-expr.md) + - [Path expressions](expressions/path-expr.md) + - [Block expressions](expressions/block-expr.md) + - [Operator expressions](expressions/operator-expr.md) + - [Array and index expressions](expressions/array-expr.md) + - [Tuple and index expressions](expressions/tuple-expr.md) + - [Struct expressions](expressions/struct-expr.md) + - [Enum variant expressions](expressions/enum-variant-expr.md) + - [Call expressions](expressions/call-expr.md) + - [Method call expressions](expressions/method-call-expr.md) + - [Field access expressions](expressions/field-expr.md) + - [Closure expressions](expressions/closure-expr.md) + - [Loop expressions](expressions/loop-expr.md) + - [Range expressions](expressions/range-expr.md) + - [If and if let expressions](expressions/if-expr.md) + - [Match expressions](expressions/match-expr.md) + - [Return expressions](expressions/return-expr.md) - [Type system](type-system.md) - [Types](types.md) + - [Dynamically Sized Types](dynamically-sized-types.md) + - [Interior mutability](interior-mutability.md) - [Subtyping](subtyping.md) - [Type coercions](type-coercions.md) diff --git a/src/doc/reference/src/attributes.md b/src/doc/reference/src/attributes.md index 7d7620bdf0..c921d47390 100644 --- a/src/doc/reference/src/attributes.md +++ b/src/doc/reference/src/attributes.md @@ -137,8 +137,8 @@ On an `extern` block, the following attributes are interpreted: - `link` - indicate that a native library should be linked to for the declarations in this block to be linked correctly. `link` supports an optional `kind` key with three possible values: `dylib`, `static`, and `framework`. See - [external blocks](items.html#external-blocks) for more about external blocks. Two - examples: `#[link(name = "readline")]` and + [external blocks](items/external-blocks.html) for more about external blocks. + Two examples: `#[link(name = "readline")]` and `#[link(name = "CoreFoundation", kind = "framework")]`. - `linked_from` - indicates what native library this block of FFI items is coming from. This attribute is of the form `#[linked_from = "foo"]` where @@ -408,20 +408,7 @@ pub mod m3 { Some primitive Rust operations are defined in Rust code, rather than being implemented directly in C or assembly language. The definitions of these operations have to be easy for the compiler to find. The `lang` attribute -makes it possible to declare these operations. For example, the `str` module -in the Rust standard library defines the string equality function: - -```rust,ignore -#[lang = "str_eq"] -pub fn eq_slice(a: &str, b: &str) -> bool { - // details elided -} -``` - -The name `str_eq` has a special meaning to the Rust compiler, and the presence -of this definition means that it will use this definition when generating calls -to the string equality function. - +makes it possible to declare these operations. The set of language items is currently considered unstable. A complete list of the built-in language items will be added in the future. diff --git a/src/doc/reference/src/behavior-considered-undefined.md b/src/doc/reference/src/behavior-considered-undefined.md index 9fb3d74148..1a6a8a6591 100644 --- a/src/doc/reference/src/behavior-considered-undefined.md +++ b/src/doc/reference/src/behavior-considered-undefined.md @@ -1,35 +1,42 @@ ## Behavior considered undefined -The following is a list of behavior which is forbidden in all Rust code, -including within `unsafe` blocks and `unsafe` functions. Type checking provides -the guarantee that these issues are never caused by safe code. +Rust code, including within `unsafe` blocks and `unsafe` functions is incorrect +if it exhibits any of the behaviors in the following list. It is the +programmer's responsibility when writing `unsafe` code that it is not possible +to let `safe` code exhibit these behaviors. -* Data races -* Dereferencing a null/dangling raw pointer -* Reads of [undef](http://llvm.org/docs/LangRef.html#undefined-values) - (uninitialized) memory -* Breaking the [pointer aliasing - rules](http://llvm.org/docs/LangRef.html#pointer-aliasing-rules) - on accesses through raw pointers (a subset of the rules used by C) +* Data races. +* Dereferencing a null or dangling raw pointer. +* Unaligned pointer reading and writing outside of [`read_unaligned`] + and [`write_unaligned`]. +* Reads of [undef] \(uninitialized) memory. +* Breaking the [pointer aliasing rules] on accesses through raw pointers; + a subset of the rules used by C. * `&mut T` and `&T` follow LLVM’s scoped [noalias] model, except if the `&T` - contains an `UnsafeCell`. Unsafe code must not violate these aliasing - guarantees. -* Mutating non-mutable data (that is, data reached through a shared reference or - data owned by a `let` binding), unless that data is contained within an `UnsafeCell`. + contains an [`UnsafeCell`]. +* Mutating non-mutable data — that is, data reached through a shared + reference or data owned by a `let` binding), unless that data is contained + within an [`UnsafeCell`]. * Invoking undefined behavior via compiler intrinsics: - * Indexing outside of the bounds of an object with `std::ptr::offset` - (`offset` intrinsic), with - the exception of one byte past the end which is permitted. - * Using `std::ptr::copy_nonoverlapping_memory` (`memcpy32`/`memcpy64` - intrinsics) on overlapping buffers -* Invalid values in primitive types, even in private fields/locals: - * Dangling/null references or boxes - * A value other than `false` (0) or `true` (1) in a `bool` - * A discriminant in an `enum` not included in the type definition - * A value in a `char` which is a surrogate or above `char::MAX` - * Non-UTF-8 byte sequences in a `str` + * Indexing outside of the bounds of an object with [`offset`] with + the exception of one byte past the end of the object. + * Using [`std::ptr::copy_nonoverlapping_memory`], a.k.a. the `memcpy32`and + `memcpy64` intrinsics, on overlapping buffers. +* Invalid values in primitive types, even in private fields and locals: + * Dangling or null references and boxes. + * A value other than `false` (`0`) or `true` (`1`) in a `bool`. + * A discriminant in an `enum` not included in the type definition. + * A value in a `char` which is a surrogate or above `char::MAX`. + * Non-UTF-8 byte sequences in a `str`. * Unwinding into Rust from foreign code or unwinding from Rust into foreign - code. Rust's failure system is not compatible with exception handling in + code. Rust's panic system is not compatible with exception handling in other languages. Unwinding must be caught and handled at FFI boundaries. [noalias]: http://llvm.org/docs/LangRef.html#noalias +[pointer aliasing rules]: http://llvm.org/docs/LangRef.html#pointer-aliasing-rules +[undef]: http://llvm.org/docs/LangRef.html#undefined-values +[`offset`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.offset +[`std::ptr::copy_nonoverlapping_memory`]: https://doc.rust-lang.org/std/ptr/fn.copy_nonoverlapping.html +[`UnsafeCell`]: https://doc.rust-lang.org/std/cell/struct.UnsafeCell.html +[`read_unaligned`]: https://doc.rust-lang.org/std/ptr/fn.read_unaligned.html +[`write_unaligned`]: https://doc.rust-lang.org/std/ptr/fn.write_unaligned.html diff --git a/src/doc/reference/src/comments.md b/src/doc/reference/src/comments.md index b855f6f0e3..f1e4bb37fe 100644 --- a/src/doc/reference/src/comments.md +++ b/src/doc/reference/src/comments.md @@ -11,16 +11,16 @@ >    | `/**/` >    | `/***/` > -> OUTER_LINE_DOC : +> INNER_LINE_DOC : >    `//!` ~[`\n` _IsolatedCR_]\* > -> OUTER_BLOCK_DOC : +> INNER_BLOCK_DOC : >    `/*!` ( _BlockCommentOrDoc_ | ~[`*/` _IsolatedCR_] )\* `*/` > -> INNER_LINE_DOC : +> OUTER_LINE_DOC : >    `///` (~`/` ~[`\n` _IsolatedCR_]\*)? > -> INNER_BLOCK_DOC : +> OUTER_BLOCK_DOC : >    `/**` (~`*` | _BlockCommentOrDoc_ ) > (_BlockCommentOrDoc_ | ~[`*/` _IsolatedCR_])\* `*/` > diff --git a/src/doc/reference/src/crates-and-source-files.md b/src/doc/reference/src/crates-and-source-files.md index 62f33343bf..a1cfd0c1b4 100644 --- a/src/doc/reference/src/crates-and-source-files.md +++ b/src/doc/reference/src/crates-and-source-files.md @@ -96,7 +96,7 @@ fn main() { ECMA-335 CLI model, a *library* in the SML/NJ Compilation Manager, a *unit* in the Owens and Flatt module system, or a *configuration* in Mesa. -[module]: items.html#modules +[module]: items/modules.html [module path]: paths.html [attributes]: items-and-attributes.html [unit]: types.html#tuple-types diff --git a/src/doc/reference/src/dynamically-sized-types.md b/src/doc/reference/src/dynamically-sized-types.md new file mode 100644 index 0000000000..e2fa8bcba1 --- /dev/null +++ b/src/doc/reference/src/dynamically-sized-types.md @@ -0,0 +1,32 @@ +# Dynamically Sized Types + +Most types have a fixed size that is known at compile time and implement the +trait [`Sized`][sized]. A type with a size that is known only at run-time is +called a _dynamically sized type_ (_DST_) or (informally) an unsized type. +[Slices] and [trait objects] are two examples of DSTs. Such types can only be used in certain cases: + +* [Pointer types] to DSTs are + sized but have twice the size of pointers to sized types + * Pointers to slices also store the number of elements of the slice. + * Pointers to trait objects also store a pointer to a vtable. +* DSTs can be provided as + type arguments when a bound of `?Sized`. By default any type parameter + has a `?Sized` bound. +* Traits may be implemented for DSTs. Unlike type parameters`Self: ?Sized` by default in trait + definitions. +* Structs may contain a DST as the + last field, this makes the struct itself a + DST. + +Notably: [variables], function parameters, [const] and [static] items must be +`Sized`. + +[sized]: the-sized-trait.html +[Slices]: types.html#array-and-slice-types +[trait objects]: types.html#trait-objects +[Pointer types]: types.html#pointer-types +[variables]: variables.html +[const]: items/constant-items.html +[static]: items/static-items.html diff --git a/src/doc/reference/src/expressions.md b/src/doc/reference/src/expressions.md index 0c84fbe94b..b2b4e03bd5 100644 --- a/src/doc/reference/src/expressions.md +++ b/src/doc/reference/src/expressions.md @@ -23,20 +23,17 @@ or _rvalue context_. The evaluation of an expression depends both on its own category and the context it occurs within. An lvalue is an expression that represents a memory location. These expressions -are [paths](#path-expressions) which refer to local variables, function and -method arguments, or static variables, -[dereferences](#the-dereference-operator) (`*expr`), [indexing -expressions](#index-expressions) (`expr[expr]`), [field -references](#field-expressions) (`expr.f`) and parenthesized lvalue +are paths which refer to local variables, function and method arguments, or +static variables, [dereferences] (`*expr`), [array indexing] expressions +(`expr[expr]`), [field] references (`expr.f`) and parenthesized lvalue expressions. All other expressions are rvalues. -The left operand of an [assignment](#assignment-expressions) or -[compound-assignment](#compound-assignment-expressions) expression is an lvalue -context, as is the single operand of a unary [borrow](#borrow-operators), and -the operand of any [implicit borrow](#implicit-borrows). The discriminant or -subject of a [match expression](#match-expressions) and right side of a `let` -binding may be an lvalue context, if ref bindings are made, but is otherwise an -rvalue context. All other expression contexts are rvalue contexts. +The left operand of an [assign]ment or [compound assignment] expression is an +lvalue context, as is the single operand of a unary [borrow], and the operand +of any [implicit borrow](#implicit-borrows). The discriminant or subject of a +[match] expression and right side of a `let` binding may be an lvalue context, +if ref bindings are made, but is otherwise an rvalue context. All other +expression contexts are rvalue contexts. ### Moved and copied types @@ -48,10 +45,10 @@ the following lvalues may be moved out of: * [Variables](variables.html) which are not currently borrowed. * [Temporary values](#temporary-lifetimes). -* [Fields](#field-expressions) of an lvalue which can be moved out of and +* [Field]s of an lvalue which can be moved out of and doesn't implement [`Drop`](the-drop-trait.html). -* The result of [dereferencing](#the-dereference-operator) an expression with - type `Box` and that can also be moved out of. +* The result of [dereferencing] an expression with type `Box` and that can + also be moved out of. Moving out of an lvalue deinitializes that location (if it comes from a local variable), so that it can't be read from again. In all other cases, trying to @@ -59,8 +56,8 @@ use an lvalue in an rvalue context is an error. ### Mutability -For an lvalue to be [assigned](#assignment-expressions) to, [mutably -borrowed](#borrow-operators), [implicitly mutably borrowed](#implicit-borrows) +For an lvalue to be [assign]ed to, mutably [borrow]ed, +[implicitly mutably borrowed](#implicit-borrows) or bound to a pattern containing `ref mut` it must be _mutable_, we call these contexts _mutable_ lvalue contexts, other lvalue contexts are called _immutable_. @@ -68,16 +65,15 @@ _immutable_. The following expressions can create mutable lvalues: * Mutable [variables](variables.html), which are not currently borrowed. -* [Mutable `static` items](items.html#mutable-statics). +* [Mutable `static` items](items/static-items.html#mutable-statics). * [Temporary values](#temporary-lifetimes). -* [Fields](#field-expressions), this evaluates the subexpression in a mutable - lvalue context. -* [Dereferences](#the-dereference-operator) of a `*mut T` pointer. +* [Field]s, this evaluates the subexpression in a mutable lvalue context. +* [Dereferences] of a `*mut T` pointer. * Dereference of a variable, or field of a variable, with type `&mut T`. Note: this is an exception to the requirement for the next rule. * Dereferences of a type that implements `DerefMut`, this then requires that the value being dereferenced is evaluated is a mutable lvalue context. -* [Indexing](#index-expressions) of a type that implements `DerefMut`, this +* [Array indexing] of a type that implements `DerefMut`, this then evaluates the value being indexed (but not the index) in mutable lvalue context. @@ -146,9 +142,8 @@ Here are some examples: ### Implicit Borrows Certain expressions will treat an expression as an lvalue by implicitly -borrowing it. For example, it is possible to compare two unsized -[slices](types.html#array-and-slice-types) for equality directly, because the -`==` operator implicitly borrows it's operands: +borrowing it. For example, it is possible to compare two unsized [slices] for +equality directly, because the `==` operator implicitly borrows it's operands: ```rust # let c = [1, 2, 3]; @@ -165,59 +160,51 @@ let b: &[i32]; Implicit borrows may be taken in the following expressions: -* Left operand in [method-call expressions](#method-call-expressions). -* Left operand in [field expressions](#field-expressions). -* Left operand in [call expressions](#call-expressions). -* Left operand in [index expressions](#index-expressions). -* Operand of the [dereference](#the-dereference-operator) (`*`) operator. -* Operands of [comparison operators](#comparison-operators). -* Left operands of the [compound assignment](#compound-assignment-expressions). +* Left operand in [method-call] expressions. +* Left operand in [field] expressions. +* Left operand in [call expressions]. +* Left operand in [array indexing] expressions. +* Operand of the [dereference] operator (`*`). +* Operands of [comparison]. +* Left operands of the [compound assignment]. ## Constant expressions Certain types of expressions can be evaluated at compile time. These are called _constant expressions_. Certain places, such as in -[constants](items.html#constant-items) and [statics](items.html#static-items), +[constants](items/constant-items.html) and [statics](items/static-items.html), require a constant expression, and are always evaluated at compile time. In other places, such as in [`let` statements](statements.html#let-statements), constant expressions may be evaluated at compile time. If errors, such as out -of bounds [array access](#index-expressions) or [overflow](#overflow) occurs, +of bounds [array indexing] or [overflow] occurs, then it is a compiler error if the value must be evaluated at compile time, otherwise it is just a warning, but the code will most likely panic when run. The following expressions are constant expressions, so long as any operands are also constant expressions: -* [Literals](#literal-expressions). -* [Paths](#path-expressions) to [functions](items.html#functions) and constants. +* [Literals]. +* [Paths] to [functions](items/functions.html) and constants. Recursively defining constants is not allowed. * Paths to statics, so long as only their address, not their value, is used. This includes using their value indirectly through a complicated expression. \* -* [Tuple expressions](#tuple-expressions). -* [Array expressions](#array-expressions). -* [Struct expressions](#struct-expressions), where the type does not implement - [`Drop`](the-drop-trait.html). -* [Variant expressions](#enumeration-variant-expressions), where the - enumeration type does not implement `Drop`. -* [Block expressions](#block-expressions) (and `unsafe` blocks) which contain - only items and possibly a (constant) tail expression. -* [Field expressions](#field-expressions). -* [Index expressions](#index-expressions), indexing a [array or - slice](types.html#array-and-slice-types) with a `usize`. -* [Range expressions](#range-expressions). -* [Closure expressions](#closure-expressions) which don't capture variables - from the environment. -* Built in [negation](#negation-operators), [arithmetic, - logical](#arithmetic-and-logical-binary-operators), - [comparison](#comparison-operators) or [lazy - boolean](#lazy-boolean-operators) operators used on integer and floating - point types, `bool` and `char`. -* Shared [borrow expressions](#borrow-operators). -* The [dereference operator](#the-dereference-operator), but not to circumvent the - rule on statics. -* [Grouped expressions](#grouped-expressions). -* [Cast expressions](#type-cast-expressions), except pointer to address and +* [Tuple expressions]. +* [Array expressions]. +* [Struct] expressions, where the type does not implement [`Drop`](the-drop-trait.html). +* [Enum variant] expressions, where the enumeration type does not implement `Drop`. +* [Block expressions] (and `unsafe` blocks) which contain only items and + possibly a (constant) tail expression. +* [Field] expressions. +* Index expressions, [array indexing] or [slice] with a `usize`. +* [Range expressions]. +* [Closure expressions] which don't capture variables from the environment. +* Built in [negation], [arithmetic, logical], [comparison] or [lazy boolean] + operators used on integer and floating point types, `bool` and `char`. +* Shared [borrow]. +* The [dereference operator], but not to circumvent the rule on statics. +* [Grouped] expressions. +* [Cast] expressions, except pointer to address and function pointer to address casts. \* Only in static items. @@ -228,1246 +215,34 @@ Many of the following operators and expressions can also be overloaded for other types using traits in `std::ops` or `std::cmp`, these traits here also exist in `core::ops` and `core::cmp` with the same names. -## Literal expressions - -A _literal expression_ consists of one of the [literal](tokens.html#literals) -forms described earlier. It directly describes a number, character, string, -boolean value, or the unit value. - -```rust -(); // unit type -"hello"; // string type -'5'; // character type -5; // integer type -``` - -## Path expressions - -A [path](paths.html) used as an expression context denotes either a local -variable or an item. Path expressions that resolve to local or static variables -are [lvalues](expressions.html#lvalues-and-rvalues), other paths -are rvalues. Using a `static mut` variable requires an [`unsafe` -block](#unsafe-blocks). - -```rust -# mod globals { -# pub static STATIC_VAR: i32 = 5; -# pub static mut STATIC_MUT_VAR: i32 = 7; -# } -# let local_var = 3; -local_var; -globals::STATIC_VAR; -unsafe { globals::STATIC_MUT_VAR }; -let some_constructor = Some::; -let push_integer = Vec::::push; -let slice_reverse = <[i32]>::reverse; -``` - -## Tuple expressions - -Tuples are written by enclosing zero or more comma-separated expressions in -parentheses. They are used to create [tuple-typed](types.html#tuple-types) -values. - -```rust -(0.0, 4.5); -("a", 4usize, true); -``` - -You can disambiguate a single-element tuple from a value in parentheses with a -comma: - -```rust -(0,); // single-element tuple -(0); // zero in parentheses -``` - -## Struct expressions - -There are several forms of struct expressions. A _struct expression_ consists -of the [path](paths.html) of a [struct item](items.html#structs), followed by a -brace-enclosed list of zero or more comma-separated name-value pairs, providing -the field values of a new instance of the struct. A field name can be any -[identifier](identifiers.html), and is separated from its value expression by a -colon. In the case of a tuple struct the field names are numbers corresponding -to the position of the field. The numbers must be written in decimal, -containing no underscores and with no leading zeros or integer suffix. A value -of a [union](items.html#unions) type can also be created using this syntax, -except that it must specify exactly one field. - -Struct expressions can't be used directly in the head of a [loop](#loops) or an -[`if`](#if-expressions), [`if let`](#if-let-expressions) or -[`match`](#match-expressions) expression. But struct expressions can still be -in used inside parentheses, for example. - -A _tuple struct expression_ consists of the path of a struct item, followed by -a parenthesized list of one or more comma-separated expressions (in other -words, the path of a struct item followed by a tuple expression). The struct -item must be a tuple struct item. - -A _unit-like struct expression_ consists only of the path of a struct item. - -The following are examples of struct expressions: - -```rust -# struct Point { x: f64, y: f64 } -# struct NothingInMe { } -# struct TuplePoint(f64, f64); -# mod game { pub struct User<'a> { pub name: &'a str, pub age: u32, pub score: usize } } -# struct Cookie; fn some_fn(t: T) {} -Point {x: 10.0, y: 20.0}; -NothingInMe {}; -TuplePoint(10.0, 20.0); -TuplePoint { 0: 10.0, 1: 20.0 }; // Results in the same value as the above line -let u = game::User {name: "Joe", age: 35, score: 100_000}; -some_fn::(Cookie); -``` - -A struct expression forms a new value of the named struct type. Note that for a -given *unit-like* struct type, this will always be the same value. - -A struct expression can terminate with the syntax `..` followed by an -expression to denote a functional update. The expression following `..` (the -base) must have the same struct type as the new struct type being formed. The -entire expression denotes the result of constructing a new struct (with the -same type as the base expression) with the given values for the fields that -were explicitly specified and the values in the base expression for all other -fields. Just as with all struct expressions, all of the fields of the struct -must be [visible](visibility-and-privacy.html), even those not explicitly -named. - -```rust -# struct Point3d { x: i32, y: i32, z: i32 } -let base = Point3d {x: 1, y: 2, z: 3}; -Point3d {y: 0, z: 10, .. base}; -``` - -#### Struct field init shorthand - -When initializing a data structure (struct, enum, union) with named (but not -numbered) fields, it is allowed to write `fieldname` as a shorthand for -`fieldname: fieldname`. This allows a compact syntax with less duplication. - -Example: - -```rust -# struct Point3d { x: i32, y: i32, z: i32 } -# let x = 0; -# let y_value = 0; -# let z = 0; -Point3d { x: x, y: y_value, z: z }; -Point3d { x, y: y_value, z }; -``` - -### Enumeration Variant expressions - -Enumeration variants can be constructed similarly to structs, using a path to -an enum variant instead of to a struct: - -```rust -# enum Message { -# Quit, -# WriteString(String), -# Move { x: i32, y: i32 }, -# } -let q = Message::Quit; -let w = Message::WriteString("Some string".to_string()); -let m = Message::Move { x: 50, y: 200 }; -``` - -## Block expressions - -A _block expression_ is similar to a module in terms of the declarations that -are possible, but can also contain [statements](statements.html) and end with -an expression. Each block conceptually introduces a new namespace scope. Use -items can bring new names into scopes and declared items are in scope for only -the block itself. - -A block will execute each statement sequentially, and then execute the -expression (if given). If the block doesn't end in an expression, its value is -`()`: - -```rust -let x: () = { println!("Hello."); }; -``` - -If it ends in an expression, its value and type are that of the expression: - -```rust -let x: i32 = { println!("Hello."); 5 }; - -assert_eq!(5, x); -``` - -Blocks are always [rvalues](expressions.html#lvalues-and-rvalues) and evaluate the last -expression in rvalue context. This can be used to force moving a value -if really needed. - -### `unsafe` blocks - -_See [`unsafe` block](unsafe-blocks.html) for more information on when to use `unsafe`_ - -A block of code can be prefixed with the `unsafe` keyword, to permit calling -`unsafe` functions or dereferencing raw pointers within a safe function. - -## Method-call expressions - -A _method call_ consists of an expression followed by a single dot, an -[identifier](identifiers.html), and a parenthesized expression-list. Method -calls are resolved to methods on specific traits, either statically dispatching -to a method if the exact `self`-type of the left-hand-side is known, or -dynamically dispatching if the left-hand-side expression is an indirect [trait -object](types.html#trait-objects). Method call expressions will automatically -take a shared or mutable borrow of the receiver if needed. - -```rust -let pi: Result = "3.14".parse(); -let log_pi = pi.unwrap_or(1.0).log(2.72); -# assert!(1.14 < log_pi && log_pi < 1.15) -``` - -When resolving method calls on an expression of type `A`, Rust will use the -following order: - -1. Inherent methods, with receiver of type `A`, `&A`, `&mut A`. -1. Trait methods with receiver of type `A`. -1. Trait methods with receiver of type `&A`. -1. Trait methods with receiver of type `&mut A`. -1. If it's possible, Rust will then repeat steps 1-5 with - `::Target`, and insert a dereference operator. -1. If `A` is now an [array](types.html#array-and-slice-types) type, then - repeat steps 1-4 with the corresponding slice type. - -Note: that in steps 1-4 the receiver is used, not the type of `Self` nor the -type of `A`. For example - -```rust,ignore -// `Self` is `&A`, receiver is `&A`. -impl<'a> Trait for &'a A { - fn method(self) {} -} -// If `A` is `&B`, then `Self` is `B` and the receiver is `A`. -impl B { - fn method(&self) {} -} -``` - -Another note: this process does not use the mutability or lifetime of the -receiver, or whether `unsafe` methods can currently be called to resolve -methods. These constraints instead lead to compiler errors. - -If a step is reached where there is more than one possible method (where -generic methods or traits are considered the same), then it is a compiler -error. These cases require a [more specific -syntax.](#disambiguating-function-calls) for method and function invocation. - -## Field expressions - -A _field expression_ consists of an expression followed by a single dot and an -[identifier](identifiers.html), when not immediately followed by a -parenthesized expression-list (the latter is always a [method call -expression](#method-call-expressions)). A field expression denotes a field of a -[struct](types.html#struct-types) or [union](items.html#unions). To call a -function stored in a struct parentheses are needed around the field expression - -```rust,ignore -mystruct.myfield; -foo().x; -(Struct {a: 10, b: 20}).a; -mystruct.method(); // Method expression -(mystruct.function_field)() // Call expression containing a field expression -``` - -A field access is an [lvalue](expressions.html#lvalues-and-rvalues) referring -to the location of that field. When the subexpression is -[mutable](#mutability), the field expression is also mutable. - -Also, if the type of the expression to the left of the dot is a pointer, it is -automatically dereferenced as many times as necessary to make the field access -possible. In cases of ambiguity, we prefer fewer autoderefs to more. - -Finally the fields of a struct, a reference to a struct are treated as separate -entities when borrowing. If the struct does not implement -[`Drop`](the-drop-trait.html) this also applies to moving out of each of its fields -where possible. This also does not apply if automatic dereferencing is done -though user defined types. - -```rust -# struct A { f1: String, f2: String, f3: String } -# let mut x = A { -# f1: "f1".to_string(), -# f2: "f2".to_string(), -# f3: "f3".to_string() -# }; -let a: &mut String = &mut x.f1; // x.f1 borrowed mutably -let b: &String = &x.f2; // x.f2 borrowed immutably -let c: &String = &x.f2; // Can borrow again -let d: String = x.f3; // Move out of x.f3 -``` - -### Tuple indexing expressions - -[Tuples](types.html#tuple-types) and [struct tuples](items.html#structs) can be -indexed using the number corresponding to the position of the field. The index -must be written as a [decimal literal](tokens.html#integer-literals) with no -underscores or suffix. Tuple indexing expressions also differ from field -expressions in that they can unambiguously be called as a function. In all -other aspects they have the same behavior. - -```rust -# struct Point(f32, f32); -let pair = (1, 2); -assert_eq!(pair.1, 2); -let unit_x = Point(1.0, 0.0); -assert_eq!(unit_x.0, 1.0); -``` - -## Call expressions - -A _call expression_ consists of an expression followed by a parenthesized -expression-list. It invokes a function, providing zero or more input variables. -If the function eventually returns, then the expression completes. For -[non-function types](types.html#function-item-types), the expression f(...) uses the -method on one of the `std::ops::Fn`, `std::ops::FnMut` or `std::ops::FnOnce` -traits, which differ in whether they take the type by reference, mutable -reference, or take ownership respectively. An automatic borrow will be taken if -needed. Rust will also automatically dereference `f` as required. Some examples -of call expressions: - -```rust -# fn add(x: i32, y: i32) -> i32 { 0 } -let three: i32 = add(1i32, 2i32); -let name: &'static str = (|| "Rust")(); -``` - -### Disambiguating Function Calls - -Rust treats all function calls as sugar for a more explicit, fully-qualified -syntax. Upon compilation, Rust will desugar all function calls into the explicit -form. Rust may sometimes require you to qualify function calls with trait, -depending on the ambiguity of a call in light of in-scope items. - -> **Note**: In the past, the Rust community used the terms "Unambiguous -> Function Call Syntax", "Universal Function Call Syntax", or "UFCS", in -> documentation, issues, RFCs, and other community writings. However, the term -> lacks descriptive power and potentially confuses the issue at hand. We mention -> it here for searchability's sake. - -Several situations often occur which result in ambiguities about the receiver or -referent of method or associated function calls. These situations may include: - -* Multiple in-scope traits define methods with the same name for the same types -* Auto-`deref` is undesirable; for example, distinguishing between methods on a - smart pointer itself and the pointer's referent -* Methods which take no arguments, like `default()`, and return properties of a - type, like `size_of()` - -To resolve the ambiguity, the programmer may refer to their desired method or -function using more specific paths, types, or traits. - -For example, - -```rust -trait Pretty { - fn print(&self); -} - -trait Ugly { - fn print(&self); -} - -struct Foo; -impl Pretty for Foo { - fn print(&self) {} -} - -struct Bar; -impl Pretty for Bar { - fn print(&self) {} -} -impl Ugly for Bar{ - fn print(&self) {} -} - -fn main() { - let f = Foo; - let b = Bar; - - // we can do this because we only have one item called `print` for `Foo`s - f.print(); - // more explicit, and, in the case of `Foo`, not necessary - Foo::print(&f); - // if you're not into the whole brevity thing - ::print(&f); - - // b.print(); // Error: multiple 'print' found - // Bar::print(&b); // Still an error: multiple `print` found - - // necessary because of in-scope items defining `print` - ::print(&b); -} -``` - -Refer to [RFC 132] for further details and motivations. - -[RFC 132]: https://github.com/rust-lang/rfcs/blob/master/text/0132-ufcs.md - -## Closure expressions - -A _closure expression_ defines a closure and denotes it as a value, in a single -expression. A closure expression is a pipe-symbol-delimited (`|`) list of -patterns followed by an expression. Type annotations may optionally be added -for the type of the parameters or for the return type. If there is a return -type, the expression used for the body of the closure must be a normal -[block](#block-expressions). A closure expression also may begin with the -`move` keyword before the initial `|`. - -A closure expression denotes a function that maps a list of parameters -(`ident_list`) onto the expression that follows the `ident_list`. The patterns -in the `ident_list` are the parameters to the closure. If a parameter's types -is not specified, then the compiler infers it from context. Each closure -expression has a unique anonymous type. - -Closure expressions are most useful when passing functions as arguments to other -functions, as an abbreviation for defining and capturing a separate function. - -Significantly, closure expressions _capture their environment_, which regular -[function definitions](items.html#functions) do not. Without the `move` -keyword, the closure expression infers how it captures each variable from its -environment, preferring to capture by shared reference, effectively borrowing -all outer variables mentioned inside the closure's body. If needed the compiler -will infer that instead mutable references should be taken, or that the values -should be moved or copied (depending on their type) from the environment. A -closure can be forced to capture its environment by copying or moving values by -prefixing it with the `move` keyword. This is often used to ensure that the -closure's type is `'static`. - -The compiler will determine which of the [closure -traits](types.html#closure-types) the closure's type will implement by how it -acts on its captured variables. The closure will also implement -[`Send`](the-send-trait.html) and/or [`Sync`](the-sync-trait.html) if all of -its captured types do. These traits allow functions to accept closures using -generics, even though the exact types can't be named. - -In this example, we define a function `ten_times` that takes a higher-order -function argument, and we then call it with a closure expression as an argument, -followed by a closure expression that moves values from its environment. - -```rust -fn ten_times(f: F) where F: Fn(i32) { - for index in 0..10 { - f(index); - } -} - -ten_times(|j| println!("hello, {}", j)); -// With type annotations -ten_times(|j: i32| -> () { println!("hello, {}", j) }); - -let word = "konnichiwa".to_owned(); -ten_times(move |j| println!("{}, {}", word, j)); -``` - -## Array expressions - -An _[array](types.html#array-and-slice-types) expression_ can be written by -enclosing zero or more comma-separated expressions of uniform type in square -brackets. This produces and array containing each of these values in the -order they are written. - -Alternatively there can be exactly two expressions inside the brackets, -separated by a semi-colon. The expression after the `;` must be a have type -`usize` and be a [constant expression](#constant-expressions), such as a -[literal](tokens.html#literals) or a [constant -item](items.html#constant-items). `[a; b]` creates an array containing `b` -copies of the value of `a`. If the expression after the semi-colon has a value -greater than 1 then this requires that the type of `a` is -[`Copy`](the-copy-trait.html). - -```rust -[1, 2, 3, 4]; -["a", "b", "c", "d"]; -[0; 128]; // array with 128 zeros -[0u8, 0u8, 0u8, 0u8]; -``` - -## Index expressions - -[Array and slice](types.html#array-and-slice-types)-typed expressions can be -indexed by writing a square-bracket-enclosed expression (the index) after them. -When the array is mutable, the resulting -[lvalue](expressions.html#lvalues-and-rvalues) can be assigned to. -For other types an index expression `a[b]` is equivalent to -`*std::ops::Index::index(&a, b)`, or `*std::opsIndexMut::index_mut(&mut a, b)` -in a mutable lvalue context. Just as with methods, Rust will also insert -dereference operations on `a` repeatedly to find an implementation. - -Indices are zero-based, and are of type `usize` for arrays and slices. Array -access is a [constant expression](#constant-expressions), so bounds can be -checked at compile-time for constant arrays with a constant index value. -Otherwise a check will be performed at run-time that will put the thread in a -_panicked state_ if it fails. - -```rust,should_panic -([1, 2, 3, 4])[2]; // Evaluates to 3 - -let x = (["a", "b"])[10]; // warning: const index-expr is out of bounds - -let n = 10; -let y = (["a", "b"])[n]; // panics - -let arr = ["a", "b"]; -arr[10]; // panics -``` - -## Range expressions - -The `..` operator will construct an object of one of the `std::ops::Range` (or -`core::ops::Range`) variants. - -```rust -1..2; // std::ops::Range -3..; // std::ops::RangeFrom -..4; // std::ops::RangeTo -..; // std::ops::RangeFull -``` - -The following expressions are equivalent. - -```rust -let x = std::ops::Range {start: 0, end: 10}; -let y = 0..10; - -assert_eq!(x, y); -``` - -## Operator expressions - -Operators are defined for built in types by the Rust language. Many of the -following operators can also be overloaded using traits in `std::ops` or -`std::cmp`. - -### Overflow - -Integer operators will panic when they overflow when compiled in debug mode. -The `-C debug-assertions` and `-C overflow-checks` compiler flags can be used -to control this more directly. The following things are considered to be -overflow: - -* When `+`, `*` or `-` create a value greater than the maximum value, or less - than the minimum value that can be stored. This includes unary `-` on the - smallest value of any signed integer type. -* Using `/` or `%`, where the left-hand argument is the smallest integer of a - signed integer type and the right-hand argument is `-1`. -* Using `<<` or `>>` where the right-hand argument is greater than or equal to - the number of bits in the type of the left-hand argument, or is negative. - -### Borrow operators - -The `&` (shared borrow) and `&mut` (mutable borrow) operators are unary prefix -operators. When applied to an lvalue produce a reference (pointer) to the -location that the value refers to. The lvalue is also placed into a borrowed -state for the duration of the reference. For a shared borrow (`&`), this -implies that the lvalue may not be mutated, but it may be read or shared again. -For a mutable borrow (`&mut`), the lvalue may not be accessed in any way until -the borrow expires. `&mut` evaluates its operand in a mutable lvalue context. -If the `&` or `&mut` operators are applied to an rvalue, a temporary value is -created; the lifetime of this temporary value is defined by [syntactic -rules](#temporary-lifetimes). These operators cannot be overloaded. - -```rust -{ - // a temporary with value 7 is created that lasts for this scope. - let shared_reference = &7; -} -let mut array = [-2, 3, 9]; -{ - // Mutably borrows `array` for this scope. - // `array` may only be used through `mutable_reference`. - let mutable_reference = &mut array; -} -``` - -### The dereference operator - -The `*` (dereference) operator is also a unary prefix operator. When applied to -a [pointer](types.html#pointer-types) it denotes the pointed-to location. If -the expression is of type `&mut T` and `*mut T`, and is either a local -variable, a (nested) field of a local variance or is a mutable lvalue, then the -resulting [lvalue](expressions.html#lvalues-and-rvalues) can be -assigned to. Dereferencing a raw pointer requires `unsafe`. - -On non-pointer types `*x` is equivalent to `*std::ops::Deref::deref(&x)` in an -[immutable lvalue context](#mutability) and `*std::ops::Deref::deref_mut(&mut -x)` in a mutable lvalue context. - -```rust -let x = &7; -assert_eq!(*x, 7); -let y = &mut 9; -*y = 11; -assert_eq!(*y, 11); -``` - -### The `?` operator. - -The `?` ("question mark") operator can be applied to values of the `Result` type to propagate errors. If applied to `Err(e)` it will return -`Err(From::from(e))` from the enclosing function or closure. If applied to -`Ok(x)` it will unwrap the value to return `x`. Unlike other unary operators -`?` is written in postfix notation. `?` cannot be overloaded. - -```rust -# use std::num::ParseIntError; -fn try_to_parse() -> Result { - let x: i32 = "123".parse()?; // x = 123 - let y: i32 = "24a".parse()?; // returns an Err() immediately - Ok(x + y) // Doesn't run. -} - -let res = try_to_parse(); -println!("{:?}", res); -# assert!(res.is_err()) -``` - -### Negation operators - -These are the last two unary operators. This table summarizes the behavior of -them on primitive types and which traits are used to overload these operators -for other types. Remember that signed integers are always represented using -two's complement. The operands of all of these operators are evaluated in -rvalue context so are moved or copied. - -| Symbol | Integer | `bool` | Floating Point | Overloading Trait | -|--------|-------------|-------------|----------------|--------------------| -| `-` | Negation* | | Negation | `std::ops::Neg` | -| `!` | Bitwise NOT | Logical NOT | | `std::ops::Not` | - -\* Only for signed integer types. - -Here are some example of these operators - -```rust -let x = 6; -assert_eq!(-x, -6); -assert_eq!(!x, -7); -assert_eq!(true, !false); -``` - -### Arithmetic and Logical Binary Operators - -Binary operators expressions are all written with infix notation. This table -summarizes the behavior of arithmetic and logical binary operators on -primitive types and which traits are used to overload these operators for other -types. Remember that signed integers are always represented using two's -complement. The operands of all of these operators are evaluated in rvalue -context so are moved or copied. - -| Symbol | Integer | `bool` | Floating Point | Overloading Trait | -|--------|-------------------------|-------------|----------------|--------------------| -| `+` | Addition | | Addition | `std::ops::Add` | -| `-` | Subtraction | | Subtraction | `std::ops::Sub` | -| `*` | Multiplication | | Multiplication | `std::ops::Mul` | -| `/` | Division | | Division | `std::ops::Div` | -| `%` | Remainder | | Remainder | `std::ops::Rem` | -| `&` | Bitwise AND | Logical AND | | `std::ops::BitAnd` | -| | | Bitwise OR | Logical OR | | `std::ops::BitOr` | -| `^` | Bitwise XOR | Logical XOR | | `std::ops::BitXor` | -| `<<` | Left Shift | | | `std::ops::Shl` | -| `>>` | Right Shift* | | | `std::ops::Shr` | - -\* Arithmetic right shift on signed integer types, logical right shift on -unsigned integer types. - -Here are examples of these operators being used. - -```rust -assert_eq!(3 + 6, 9); -assert_eq!(5.5 - 1.25, 4.25); -assert_eq!(-5 * 14, -70); -assert_eq!(14 / 3, 4); -assert_eq!(100 % 7, 2); -assert_eq!(0b1010 & 0b1100, 0b1000); -assert_eq!(0b1010 | 0b1100, 0b1110); -assert_eq!(0b1010 ^ 0b1100, 0b110); -assert_eq!(13 << 3, 104); -assert_eq!(-10 >> 2, -3); -``` - -### Comparison Operators - -Comparison operators are also defined both for primitive types and many type in -the standard library. Parentheses are required when chaining comparison -operators. For example, the expression `a == b == c` is invalid and may be -written as `(a == b) == c`. - -Unlike arithmetic and logical operators, the traits for -overloading the operators the traits for these operators are used more -generally to show how a type may be compared and will likely be assumed to -define actual comparisons by functions that use these traits as bounds. Many -functions and macros in the standard library can then use that assumption -(although not to ensure safety). Unlike the arithmetic and logical operators -above, these operators implicitly take shared borrows of their operands, -evaluating them in lvalue context: - -```rust,ignore -a == b; -// is equivalent to -::std::cmp::PartialEq::eq(&a, &b); -``` - -This means that the operands don't have to be moved out of. - -| Symbol | Meaning | Overloading method | -|--------|--------------------------|----------------------------| -| `==` | Equal | `std::cmp::PartialEq::eq` | -| `!=` | Not equal | `std::cmp::PartialEq::ne` | -| `>` | Greater than | `std::cmp::PartialOrd::gt` | -| `<` | Less than | `std::cmp::PartialOrd::lt` | -| `>=` | Greater than or equal to | `std::cmp::PartialOrd::ge` | -| `<=` | Less than or equal to | `std::cmp::PartialOrd::le` | - -Here are examples of the comparison operators being used. - -```rust -assert!(123 == 123); -assert!(23 != -12); -assert!(12.5 > 12.2); -assert!([1, 2, 3] < [1, 3, 4]); -assert!('A' <= 'B'); -assert!("World" >= "Hello"); -``` - -### Lazy boolean operators - -The operators `||` and `&&` may be applied to operands of boolean type. The -`||` operator denotes logical 'or', and the `&&` operator denotes logical -'and'. They differ from `|` and `&` in that the right-hand operand is only -evaluated when the left-hand operand does not already determine the result of -the expression. That is, `||` only evaluates its right-hand operand when the -left-hand operand evaluates to `false`, and `&&` only when it evaluates to -`true`. - -```rust -let x = false || true; // true -let y = false && panic!(); // false, doesn't evaluate `panic!()` -``` - -### Type cast expressions - -A type cast expression is denoted with the binary operator `as`. - -Executing an `as` expression casts the value on the left-hand side to the type -on the right-hand side. - -An example of an `as` expression: - -```rust -# fn sum(values: &[f64]) -> f64 { 0.0 } -# fn len(values: &[f64]) -> i32 { 0 } -fn average(values: &[f64]) -> f64 { - let sum: f64 = sum(values); - let size: f64 = len(values) as f64; - sum / size -} -``` - -`as` can be used to explicitly perform [coercions](type-coercions.html), as -well as the following additional casts. Here `*T` means either `*const T` or -`*mut T`. - -| Type of `e` | `U` | Cast performed by `e as U` | -|-----------------------|-----------------------|----------------------------------| -| Integer or Float type | Integer or Float type | Numeric cast | -| C-like enum | Integer type | Enum cast | -| `bool` or `char` | Integer type | Primitive to integer cast | -| `u8` | `char` | `u8` to `char` cast | -| `*T` | `*V` where `V: Sized` \* | Pointer to pointer cast | -| `*T` where `T: Sized` | Numeric type | Pointer to address cast | -| Integer type | `*V` where `V: Sized` | Address to pointer cast | -| `&[T; n]` | `*const T` | Array to pointer cast | -| [Function pointer](types.html#function-pointer-types) | `*V` where `V: Sized` | Function pointer to pointer cast | -| Function pointer | Integer | Function pointer to address cast | - -\* or `T` and `V` are compatible unsized types, e.g., both slices, both the -same trait object. - -#### Semantics - -* Numeric cast - * Casting between two integers of the same size (e.g. i32 -> u32) is a no-op - * Casting from a larger integer to a smaller integer (e.g. u32 -> u8) will - truncate - * Casting from a smaller integer to a larger integer (e.g. u8 -> u32) will - * zero-extend if the source is unsigned - * sign-extend if the source is signed - * Casting from a float to an integer will round the float towards zero - * **[NOTE: currently this will cause Undefined Behavior if the rounded - value cannot be represented by the target integer type][float-int]**. - This includes Inf and NaN. This is a bug and will be fixed. - * Casting from an integer to float will produce the floating point - representation of the integer, rounded if necessary (rounding strategy - unspecified) - * Casting from an f32 to an f64 is perfect and lossless - * Casting from an f64 to an f32 will produce the closest possible value - (rounding strategy unspecified) - * **[NOTE: currently this will cause Undefined Behavior if the value - is finite but larger or smaller than the largest or smallest finite - value representable by f32][float-float]**. This is a bug and will - be fixed. -* Enum cast - * Casts an enum to its discriminant, then uses a numeric cast if needed. -* Primitive to integer cast - * `false` casts to `0`, `true` casts to `1` - * `char` casts to the value of the code point, then uses a numeric cast if needed. -* `u8` to `char` cast - * Casts to the `char` with the corresponding code point. - -[float-int]: https://github.com/rust-lang/rust/issues/10184 -[float-float]: https://github.com/rust-lang/rust/issues/15536 - -### Assignment expressions - -An _assignment expression_ consists of an -[lvalue](expressions.html#lvalues-and-rvalues) expression followed -by an equals sign (`=`) and an -[rvalue](expressions.html#lvalues-and-rvalues) expression. - -Evaluating an assignment expression [either copies or -moves](#moved-and-copied-types) its right-hand operand to its left-hand -operand. The left-hand operand must be an lvalue: using an rvalue results in a -compiler error, rather than promoting it to a temporary. - -```rust -# let mut x = 0; -# let y = 0; -x = y; -``` - -### Compound assignment expressions - -The `+`, `-`, `*`, `/`, `%`, `&`, `|`, `^`, `<<`, and `>>` operators may be -composed with the `=` operator. The expression `lval OP= val` is equivalent to -`lval = lval OP val`. For example, `x = x + 1` may be written as `x += 1`. -Any such expression always has the [`unit`](types.html#tuple-types) type. -These operators can all be overloaded using the trait with the same name as for -the normal operation followed by 'Assign', for example, `std::ops::AddAssign` -is used to overload `+=`. As with `=`, `lval` must be an lvalue. - -```rust -let mut x = 10; -x += 4; -assert_eq!(x, 14); -``` - -### Operator precedence - -The precedence of Rust operators is ordered as follows, going from strong to -weak. Binary Operators at the same precedence level are evaluated in the order -given by their associativity. - - -| Operator | Associativity | -|-----------------------------|---------------------| -| `?` | | -| Unary `-` `*` `!` `&` `&mut` | | -| `as` `:` | left to right | -| `*` `/` `%` | left to right | -| `+` `-` | left to right | -| `<<` `>>` | left to right | -| `&` | left to right | -| `^` | left to right | -| | | left to right | -| `==` `!=` `<` `>` `<=` `>=` | Require parentheses | -| `&&` | left to right | -| || | left to right | -| `..` `...` | Require parentheses | -| `<-` | right to left | -| `=` `+=` `-=` `*=` `/=` `%=`
`&=` |= `^=` `<<=` `>>=` | right to left | - -## Grouped expressions - -An expression enclosed in parentheses evaluates to the result of the enclosed -expression. Parentheses can be used to explicitly specify evaluation order -within an expression. - -An example of a parenthesized expression: - -```rust -let x: i32 = 2 + 3 * 4; -let y: i32 = (2 + 3) * 4; -assert_eq!(x, 14); -assert_eq!(y, 20); -``` - -## Loops - -Rust supports three loop expressions: - -* A [`loop` expression](#infinite-loops) denotes an infinite loop. -* A [`while` expression](#predicate-loops) loops until a predicate is false. -* A [`for` expression](#iterator-loops) extracts values from an iterator, - looping until the iterator is empty. - -All three types of loop support [`break` expressions](#break-expressions), -[`continue` expressions](#continue-expressions), and [labels](#loop-labels). -Only `loop` supports [evaluation to non-trivial values](#break-and-loop-values). - -### Infinite loops - -A `loop` expression repeats execution of its body continuously: -`loop { println!("I live."); }`. - -A `loop` expression without an associated `break` expression is -[diverging](items.html#diverging-functions), and doesn't -return anything. A `loop` expression containing associated -[`break` expression(s)](#break-expressions) -may terminate, and must have type compatible with the value of the `break` -expression(s). - -### Predicate loops - -A `while` loop begins by evaluating the boolean loop conditional expression. If -the loop conditional expression evaluates to `true`, the loop body block -executes, then control returns to the loop conditional expression. If the loop -conditional expression evaluates to `false`, the `while` expression completes. - -An example: - -```rust -let mut i = 0; - -while i < 10 { - println!("hello"); - i = i + 1; -} -``` - -### Iterator loops - -A `for` expression is a syntactic construct for looping over elements provided -by an implementation of `std::iter::IntoIterator`. If the iterator yields a -value, that value is given the specified name and the body of the loop is -executed, then control returns to the head of the `for` loop. If the iterator -is empty, the `for` expression completes. - -An example of a `for` loop over the contents of an array: - -```rust -let v = &["apples", "cake", "coffee"]; - -for text in v { - println!("I like {}.", text); -} -``` - -An example of a for loop over a series of integers: - -```rust -let mut sum = 0; -for n in 1..11 { - sum += n; -} -assert_eq!(sum, 55); -``` - -### Loop labels - -A loop expression may optionally have a _label_. The label is written as -a lifetime preceding the loop expression, as in `'foo: loop { break 'foo; }`, -`'bar: while false {}`, `'humbug: for _ in 0..0 {}`. -If a label is present, then labeled `break` and `continue` expressions nested -within this loop may exit out of this loop or return control to its head. -See [break expressions](#break-expressions) and [continue -expressions](#continue-expressions). - -### `break` expressions - -When `break` is encountered, execution of the associated loop body is -immediately terminated, for example: - -```rust -let mut last = 0; -for x in 1..100 { - if x > 12 { - break; - } - last = x; -} -assert_eq!(last, 12); -``` - -A `break` expression is normally associated with the innermost `loop`, `for` or -`while` loop enclosing the `break` expression, but a [label](#loop-labels) can -be used to specify which enclosing loop is affected. Example: - -```rust -'outer: loop { - while true { - break 'outer; - } -} -``` - -A `break` expression is only permitted in the body of a loop, and has one of -the forms `break`, `break 'label` or ([see below](#break-and-loop-values)) -`break EXPR` or `break 'label EXPR`. - -### `continue` expressions - -When `continue` is encountered, the current iteration of the associated loop -body is immediately terminated, returning control to the loop *head*. In -the case of a `while` loop, the head is the conditional expression controlling -the loop. In the case of a `for` loop, the head is the call-expression -controlling the loop. - -Like `break`, `continue` is normally associated with the innermost enclosing -loop, but `continue 'label` may be used to specify the loop affected. -A `continue` expression is only permitted in the body of a loop. - -### `break` and loop values - -When associated with a `loop`, a break expression may be used to return a value -from that loop, via one of the forms `break EXPR` or `break 'label EXPR`, where -`EXPR` is an expression whose result is returned from the `loop`. For example: - -```rust -let (mut a, mut b) = (1, 1); -let result = loop { - if b > 10 { - break b; - } - let c = a + b; - a = b; - b = c; -}; -// first number in Fibonacci sequence over 10: -assert_eq!(result, 13); -``` - -In the case a `loop` has an associated `break`, it is not considered diverging, -and the `loop` must have a type compatible with each `break` expression. -`break` without an expression is considered identical to `break` with -expression `()`. - -## `if` expressions - -An `if` expression is a conditional branch in program control. The form of an -`if` expression is a condition expression, followed by a consequent block, any -number of `else if` conditions and blocks, and an optional trailing `else` -block. The condition expressions must have type `bool`. If a condition -expression evaluates to `true`, the consequent block is executed and any -subsequent `else if` or `else` block is skipped. If a condition expression -evaluates to `false`, the consequent block is skipped and any subsequent `else -if` condition is evaluated. If all `if` and `else if` conditions evaluate to -`false` then any `else` block is executed. An if expression evaluates to the -same value as the executed block, or `()` if no block is evaluated. An `if` -expression must have the same type in all situations. - -```rust -# let x = 3; -if x == 4 { - println!("x is four"); -} else if x == 3 { - println!("x is three"); -} else { - println!("x is something else"); -} - -let y = if 12 * 15 > 150 { - "Bigger" -} else { - "Smaller" -}; -assert_eq!(y, "Bigger"); -``` - -## `match` expressions - -A `match` expression branches on a *pattern*. The exact form of matching that -occurs depends on the pattern. Patterns consist of some combination of -literals, destructured arrays or enum constructors, structs and tuples, -variable binding specifications, wildcards (`..`), and placeholders (`_`). A -`match` expression has a *head expression*, which is the value to compare to -the patterns. The type of the patterns must equal the type of the head -expression. - -A `match` behaves differently depending on whether or not the head expression -is an [lvalue or an rvalue](expressions.html#lvalues-and-rvalues). -If the head expression is an rvalue, it is first evaluated into a temporary -location, and the resulting value is sequentially compared to the patterns in -the arms until a match is found. The first arm with a matching pattern is -chosen as the branch target of the `match`, any variables bound by the pattern -are assigned to local variables in the arm's block, and control enters the -block. - -When the head expression is an lvalue, the match does not allocate a temporary -location (however, a by-value binding may copy or move from the lvalue). When -possible, it is preferable to match on lvalues, as the lifetime of these -matches inherits the lifetime of the lvalue, rather than being restricted to -the inside of the match. - -An example of a `match` expression: - -```rust -let x = 1; - -match x { - 1 => println!("one"), - 2 => println!("two"), - 3 => println!("three"), - 4 => println!("four"), - 5 => println!("five"), - _ => println!("something else"), -} -``` - -Patterns that bind variables default to binding to a copy or move of the -matched value (depending on the matched value's type). This can be changed to -bind to a reference by using the `ref` keyword, or to a mutable reference using -`ref mut`. - -Patterns can be used to *destructure* structs, enums, and tuples. Destructuring -breaks a value up into its component pieces. The syntax used is the same as -when creating such values. When destructing a data structure with named (but -not numbered) fields, it is allowed to write `fieldname` as a shorthand for -`fieldname: fieldname`. In a pattern whose head expression has a `struct`, -`enum` or `tupl` type, a placeholder (`_`) stands for a *single* data field, -whereas a wildcard `..` stands for *all* the fields of a particular variant. - -```rust -# enum Message { -# Quit, -# WriteString(String), -# Move { x: i32, y: i32 }, -# ChangeColor(u8, u8, u8), -# } -# let message = Message::Quit; -match message { - Message::Quit => println!("Quit"), - Message::WriteString(write) => println!("{}", &write), - Message::Move{ x, y: 0 } => println!("move {} horizontally", x), - Message::Move{ .. } => println!("other move"), - Message::ChangeColor { 0: red, 1: green, 2: _ } => { - println!("color change, red: {}, green: {}", red, green); - } -}; -``` - -Patterns can also dereference pointers by using the `&`, `&mut` and `box` -symbols, as appropriate. For example, these two matches on `x: &i32` are -equivalent: - -```rust -# let x = &3; -let y = match *x { 0 => "zero", _ => "some" }; -let z = match x { &0 => "zero", _ => "some" }; - -assert_eq!(y, z); -``` - -Subpatterns can also be bound to variables by the use of the syntax `variable @ -subpattern`. For example: - -```rust -let x = 1; - -match x { - e @ 1 ... 5 => println!("got a range element {}", e), - _ => println!("anything"), -} -``` - -Multiple match patterns may be joined with the `|` operator. A range of values -may be specified with `...`. For example: - -```rust -# let x = 2; -let message = match x { - 0 | 1 => "not many", - 2 ... 9 => "a few", - _ => "lots" -}; -``` - -Range patterns only work on scalar types (like integers and characters; not -like arrays and structs, which have sub-components). A range pattern may not be -a sub-range of another range pattern inside the same `match`. - -Finally, match patterns can accept *pattern guards* to further refine the -criteria for matching a case. Pattern guards appear after the pattern and -consist of a bool-typed expression following the `if` keyword. A pattern guard -may refer to the variables bound within the pattern they follow. - -```rust -# let maybe_digit = Some(0); -# fn process_digit(i: i32) { } -# fn process_other(i: i32) { } -let message = match maybe_digit { - Some(x) if x < 10 => process_digit(x), - Some(x) => process_other(x), - None => panic!(), -}; -``` - -## `if let` expressions - -An `if let` expression is semantically similar to an `if` expression but in -place of a condition expression it expects the keyword `let` followed by a -refutable pattern, an `=` and an expression. If the value of the expression on -the right hand side of the `=` matches the pattern, the corresponding block -will execute, otherwise flow proceeds to the following `else` block if it -exists. Like `if` expressions, `if let` expressions have a value determined by -the block that is evaluated. - -```rust -let dish = ("Ham", "Eggs"); - -// this body will be skipped because the pattern is refuted -if let ("Bacon", b) = dish { - println!("Bacon is served with {}", b); -} else { - // This block is evaluated instead. - println!("No bacon will be served"); -} - -// this body will execute -if let ("Ham", b) = dish { - println!("Ham is served with {}", b); -} -``` - -## `while let` loops - -A `while let` loop is semantically similar to a `while` loop but in place of a -condition expression it expects the keyword `let` followed by a refutable -pattern, an `=` and an expression. If the value of the expression on the right -hand side of the `=` matches the pattern, the loop body block executes then -control returns to the pattern matching statement. Otherwise, the while -expression completes. - -```rust -let mut x = vec![1, 2, 3]; - -while let Some(y) = x.pop() { - println!("y = {}", y); -} -``` - -## `return` expressions - -Return expressions are denoted with the keyword `return`. Evaluating a `return` -expression moves its argument into the designated output location for the -current function call, destroys the current function activation frame, and -transfers control to the caller frame. - -An example of a `return` expression: - -```rust -fn max(a: i32, b: i32) -> i32 { - if a > b { - return a; - } - return b; -} -``` +[block expressions]: expressions/block-expr.html +[call expressions]: expressions/call-expr.html +[closure expressions]: expressions/closure-expr.html +[enum variant]: expressions/enum-variant-expr.html +[field]: expressions/field-expr.html +[literals]: expressions/literal-expr.html +[match]: expressions/match-expr.html +[method-call]: expressions/method-call-expr.html +[paths]: expressions/path-expr.html +[range expressions]: expressions/range-expr.html +[struct]: expressions/struct-expr.html +[tuple expressions]: expressions/tuple-expr.html + +[array expressions]: expressions/array-expr.html +[array indexing]: expressions/array-expr.html#array-and-slice-indexing-expressions + +[arithmetic, logical]: expressions/operator-expr.html#arithmetic-and-logical-binary-operators +[assign]: expressions/operator-expr.html#assignment-expressions +[borrow]: expressions/operator-expr.html#borrow-operators +[cast]: expressions/operator-expr.html#type-cast-expressions +[comparison]: expressions/operator-expr.html#comparison-operators +[compound assignment]: expressions/operator-expr.html#compound-assignment-expressions +[dereferences]: expressions/operator-expr.html#the-dereference-operator +[dereferencing]: expressions/operator-expr.html#the-dereference-operator +[dereference operator]: expressions/operator-expr.html#the-dereference-operator +[grouped]: expressions/operator-expr.html#grouped-expressions +[lazy boolean]: expressions/operator-expr.html#lazy-boolean-operators +[negation]: expressions/operator-expr.html#negation-operators +[overflow]: expressions/operator-expr.html#overflow + +[slice]: types.html#array-and-slice-types diff --git a/src/doc/reference/src/expressions/array-expr.md b/src/doc/reference/src/expressions/array-expr.md new file mode 100644 index 0000000000..00e81356da --- /dev/null +++ b/src/doc/reference/src/expressions/array-expr.md @@ -0,0 +1,53 @@ +# Array and array index expressions + +## Array expressions + +An _[array](types.html#array-and-slice-types) expression_ can be written by +enclosing zero or more comma-separated expressions of uniform type in square +brackets. This produces and array containing each of these values in the +order they are written. + +Alternatively there can be exactly two expressions inside the brackets, +separated by a semi-colon. The expression after the `;` must be a have type +`usize` and be a [constant expression](expressions.html#constant-expressions), +such as a [literal](tokens.html#literals) or a [constant +item](items/constant-items.html). `[a; b]` creates an array containing `b` +copies of the value of `a`. If the expression after the semi-colon has a value +greater than 1 then this requires that the type of `a` is +[`Copy`](the-copy-trait.html). + +```rust +[1, 2, 3, 4]; +["a", "b", "c", "d"]; +[0; 128]; // array with 128 zeros +[0u8, 0u8, 0u8, 0u8]; +``` + +## Array and slice indexing expressions + +[Array and slice](types.html#array-and-slice-types)-typed expressions can be +indexed by writing a square-bracket-enclosed expression (the index) after them. +When the array is mutable, the resulting +[lvalue](expressions.html#lvalues-and-rvalues) can be assigned to. +For other types an index expression `a[b]` is equivalent to +`*std::ops::Index::index(&a, b)`, or `*std::opsIndexMut::index_mut(&mut a, b)` +in a mutable lvalue context. Just as with methods, Rust will also insert +dereference operations on `a` repeatedly to find an implementation. + +Indices are zero-based, and are of type `usize` for arrays and slices. Array +access is a [constant expression](expressions.html#constant-expressions), so bounds can be +checked at compile-time for constant arrays with a constant index value. +Otherwise a check will be performed at run-time that will put the thread in a +_panicked state_ if it fails. + +```rust,should_panic +([1, 2, 3, 4])[2]; // Evaluates to 3 + +let x = (["a", "b"])[10]; // warning: const index-expr is out of bounds + +let n = 10; +let y = (["a", "b"])[n]; // panics + +let arr = ["a", "b"]; +arr[10]; // panics +``` diff --git a/src/doc/reference/src/expressions/block-expr.md b/src/doc/reference/src/expressions/block-expr.md new file mode 100644 index 0000000000..ee0d8c14d1 --- /dev/null +++ b/src/doc/reference/src/expressions/block-expr.md @@ -0,0 +1,34 @@ +# Block expressions + +A _block expression_ is similar to a module in terms of the declarations that +are possible, but can also contain [statements](statements.html) and end with +an expression. Each block conceptually introduces a new namespace scope. Use +items can bring new names into scopes and declared items are in scope for only +the block itself. + +A block will execute each statement sequentially, and then execute the +expression (if given). If the block doesn't end in an expression, its value is +`()`: + +```rust +let x: () = { println!("Hello."); }; +``` + +If it ends in an expression, its value and type are that of the expression: + +```rust +let x: i32 = { println!("Hello."); 5 }; + +assert_eq!(5, x); +``` + +Blocks are always [rvalues](expressions.html#lvalues-and-rvalues) and evaluate the last +expression in rvalue context. This can be used to force moving a value +if really needed. + +## `unsafe` blocks + +_See [`unsafe` block](unsafe-blocks.html) for more information on when to use `unsafe`_ + +A block of code can be prefixed with the `unsafe` keyword, to permit calling +`unsafe` functions or dereferencing raw pointers within a safe function. diff --git a/src/doc/reference/src/expressions/call-expr.md b/src/doc/reference/src/expressions/call-expr.md new file mode 100644 index 0000000000..1e2ad8518f --- /dev/null +++ b/src/doc/reference/src/expressions/call-expr.md @@ -0,0 +1,89 @@ +# Call expressions + +A _call expression_ consists of an expression followed by a parenthesized +expression-list. It invokes a function, providing zero or more input variables. +If the function eventually returns, then the expression completes. For +[non-function types](types.html#function-item-types), the expression f(...) uses the +method on one of the `std::ops::Fn`, `std::ops::FnMut` or `std::ops::FnOnce` +traits, which differ in whether they take the type by reference, mutable +reference, or take ownership respectively. An automatic borrow will be taken if +needed. Rust will also automatically dereference `f` as required. Some examples +of call expressions: + +```rust +# fn add(x: i32, y: i32) -> i32 { 0 } +let three: i32 = add(1i32, 2i32); +let name: &'static str = (|| "Rust")(); +``` + +## Disambiguating Function Calls + +Rust treats all function calls as sugar for a more explicit, fully-qualified +syntax. Upon compilation, Rust will desugar all function calls into the explicit +form. Rust may sometimes require you to qualify function calls with trait, +depending on the ambiguity of a call in light of in-scope items. + +> **Note**: In the past, the Rust community used the terms "Unambiguous +> Function Call Syntax", "Universal Function Call Syntax", or "UFCS", in +> documentation, issues, RFCs, and other community writings. However, the term +> lacks descriptive power and potentially confuses the issue at hand. We mention +> it here for searchability's sake. + +Several situations often occur which result in ambiguities about the receiver or +referent of method or associated function calls. These situations may include: + +* Multiple in-scope traits define methods with the same name for the same types +* Auto-`deref` is undesirable; for example, distinguishing between methods on a + smart pointer itself and the pointer's referent +* Methods which take no arguments, like `default()`, and return properties of a + type, like `size_of()` + +To resolve the ambiguity, the programmer may refer to their desired method or +function using more specific paths, types, or traits. + +For example, + +```rust +trait Pretty { + fn print(&self); +} + +trait Ugly { + fn print(&self); +} + +struct Foo; +impl Pretty for Foo { + fn print(&self) {} +} + +struct Bar; +impl Pretty for Bar { + fn print(&self) {} +} +impl Ugly for Bar{ + fn print(&self) {} +} + +fn main() { + let f = Foo; + let b = Bar; + + // we can do this because we only have one item called `print` for `Foo`s + f.print(); + // more explicit, and, in the case of `Foo`, not necessary + Foo::print(&f); + // if you're not into the whole brevity thing + ::print(&f); + + // b.print(); // Error: multiple 'print' found + // Bar::print(&b); // Still an error: multiple `print` found + + // necessary because of in-scope items defining `print` + ::print(&b); +} +``` + +Refer to [RFC 132] for further details and motivations. + +[RFC 132]: https://github.com/rust-lang/rfcs/blob/master/text/0132-ufcs.md diff --git a/src/doc/reference/src/expressions/closure-expr.md b/src/doc/reference/src/expressions/closure-expr.md new file mode 100644 index 0000000000..339763ae86 --- /dev/null +++ b/src/doc/reference/src/expressions/closure-expr.md @@ -0,0 +1,58 @@ +# Closure expressions + +A _closure expression_ defines a closure and denotes it as a value, in a single +expression. A closure expression is a pipe-symbol-delimited (`|`) list of +patterns followed by an expression. Type annotations may optionally be added +for the type of the parameters or for the return type. If there is a return +type, the expression used for the body of the closure must be a normal +[block]. A closure expression also may begin with the +`move` keyword before the initial `|`. + +A closure expression denotes a function that maps a list of parameters +(`ident_list`) onto the expression that follows the `ident_list`. The patterns +in the `ident_list` are the parameters to the closure. If a parameter's types +is not specified, then the compiler infers it from context. Each closure +expression has a unique anonymous type. + +Closure expressions are most useful when passing functions as arguments to other +functions, as an abbreviation for defining and capturing a separate function. + +Significantly, closure expressions _capture their environment_, which regular +[function definitions] do not. Without the `move` +keyword, the closure expression infers how it captures each variable from its +environment, preferring to capture by shared reference, effectively borrowing +all outer variables mentioned inside the closure's body. If needed the compiler +will infer that instead mutable references should be taken, or that the values +should be moved or copied (depending on their type) from the environment. A +closure can be forced to capture its environment by copying or moving values by +prefixing it with the `move` keyword. This is often used to ensure that the +closure's type is `'static`. + +The compiler will determine which of the [closure +traits](types.html#closure-types) the closure's type will implement by how it +acts on its captured variables. The closure will also implement +[`Send`](the-send-trait.html) and/or [`Sync`](the-sync-trait.html) if all of +its captured types do. These traits allow functions to accept closures using +generics, even though the exact types can't be named. + +In this example, we define a function `ten_times` that takes a higher-order +function argument, and we then call it with a closure expression as an argument, +followed by a closure expression that moves values from its environment. + +```rust +fn ten_times(f: F) where F: Fn(i32) { + for index in 0..10 { + f(index); + } +} + +ten_times(|j| println!("hello, {}", j)); +// With type annotations +ten_times(|j: i32| -> () { println!("hello, {}", j) }); + +let word = "konnichiwa".to_owned(); +ten_times(move |j| println!("{}, {}", word, j)); +``` + +[block]: expressions/block-expr.html +[function definitions]: items/functions.html diff --git a/src/doc/reference/src/expressions/enum-variant-expr.md b/src/doc/reference/src/expressions/enum-variant-expr.md new file mode 100644 index 0000000000..333b909f82 --- /dev/null +++ b/src/doc/reference/src/expressions/enum-variant-expr.md @@ -0,0 +1,15 @@ +# Enumeration Variant expressions + +Enumeration variants can be constructed similarly to structs, using a path to +an enum variant instead of to a struct: + +```rust +# enum Message { +# Quit, +# WriteString(String), +# Move { x: i32, y: i32 }, +# } +let q = Message::Quit; +let w = Message::WriteString("Some string".to_string()); +let m = Message::Move { x: 50, y: 200 }; +``` diff --git a/src/doc/reference/src/expressions/field-expr.md b/src/doc/reference/src/expressions/field-expr.md new file mode 100644 index 0000000000..86203adf1c --- /dev/null +++ b/src/doc/reference/src/expressions/field-expr.md @@ -0,0 +1,43 @@ +# Field access expressions + +A _field expression_ consists of an expression followed by a single dot and an +[identifier](identifiers.html), when not immediately followed by a +parenthesized expression-list (the latter is always a [method call +expression](expressions/method-call-expr.html)). A field expression denotes a field of a +[struct](types.html#struct-types) or [union](items/unions.html). To call a +function stored in a struct parentheses are needed around the field expression + +```rust,ignore +mystruct.myfield; +foo().x; +(Struct {a: 10, b: 20}).a; +mystruct.method(); // Method expression +(mystruct.function_field)() // Call expression containing a field expression +``` + +A field access is an [lvalue](expressions.html#lvalues-and-rvalues) referring +to the location of that field. When the subexpression is +[mutable](expressions.html#mutability), the field expression is also mutable. + +Also, if the type of the expression to the left of the dot is a pointer, it is +automatically dereferenced as many times as necessary to make the field access +possible. In cases of ambiguity, we prefer fewer autoderefs to more. + +Finally the fields of a struct, a reference to a struct are treated as separate +entities when borrowing. If the struct does not implement +[`Drop`](the-drop-trait.html) this also applies to moving out of each of its fields +where possible. This also does not apply if automatic dereferencing is done +though user defined types. + +```rust +# struct A { f1: String, f2: String, f3: String } +# let mut x = A { +# f1: "f1".to_string(), +# f2: "f2".to_string(), +# f3: "f3".to_string() +# }; +let a: &mut String = &mut x.f1; // x.f1 borrowed mutably +let b: &String = &x.f2; // x.f2 borrowed immutably +let c: &String = &x.f2; // Can borrow again +let d: String = x.f3; // Move out of x.f3 +``` diff --git a/src/doc/reference/src/expressions/if-expr.md b/src/doc/reference/src/expressions/if-expr.md new file mode 100644 index 0000000000..585f316c4a --- /dev/null +++ b/src/doc/reference/src/expressions/if-expr.md @@ -0,0 +1,59 @@ +# `if` and `if let` expressions + +## `if` expressions + +An `if` expression is a conditional branch in program control. The form of an +`if` expression is a condition expression, followed by a consequent block, any +number of `else if` conditions and blocks, and an optional trailing `else` +block. The condition expressions must have type `bool`. If a condition +expression evaluates to `true`, the consequent block is executed and any +subsequent `else if` or `else` block is skipped. If a condition expression +evaluates to `false`, the consequent block is skipped and any subsequent `else +if` condition is evaluated. If all `if` and `else if` conditions evaluate to +`false` then any `else` block is executed. An if expression evaluates to the +same value as the executed block, or `()` if no block is evaluated. An `if` +expression must have the same type in all situations. + +```rust +# let x = 3; +if x == 4 { + println!("x is four"); +} else if x == 3 { + println!("x is three"); +} else { + println!("x is something else"); +} + +let y = if 12 * 15 > 150 { + "Bigger" +} else { + "Smaller" +}; +assert_eq!(y, "Bigger"); +``` +## `if let` expressions + +An `if let` expression is semantically similar to an `if` expression but in +place of a condition expression it expects the keyword `let` followed by a +refutable pattern, an `=` and an expression. If the value of the expression on +the right hand side of the `=` matches the pattern, the corresponding block +will execute, otherwise flow proceeds to the following `else` block if it +exists. Like `if` expressions, `if let` expressions have a value determined by +the block that is evaluated. + +```rust +let dish = ("Ham", "Eggs"); + +// this body will be skipped because the pattern is refuted +if let ("Bacon", b) = dish { + println!("Bacon is served with {}", b); +} else { + // This block is evaluated instead. + println!("No bacon will be served"); +} + +// this body will execute +if let ("Ham", b) = dish { + println!("Ham is served with {}", b); +} +``` diff --git a/src/doc/reference/src/expressions/literal-expr.md b/src/doc/reference/src/expressions/literal-expr.md new file mode 100644 index 0000000000..5b1ef77872 --- /dev/null +++ b/src/doc/reference/src/expressions/literal-expr.md @@ -0,0 +1,11 @@ +# Literal expressions + +A _literal expression_ consists of one of the [literal](tokens.html#literals) +forms described earlier. It directly describes a number, character, string, +or boolean value. + +```rust +"hello"; // string type +'5'; // character type +5; // integer type +``` diff --git a/src/doc/reference/src/expressions/loop-expr.md b/src/doc/reference/src/expressions/loop-expr.md new file mode 100644 index 0000000000..37ba2f21b8 --- /dev/null +++ b/src/doc/reference/src/expressions/loop-expr.md @@ -0,0 +1,167 @@ +# Loops + +Rust supports four loop expressions: + +* A [`loop` expression](#infinite-loops) denotes an infinite loop. +* A [`while` expression](#predicate-loops) loops until a predicate is false. +* A [`while let` expression](#while-let-loops) tests a refutable pattern. +* A [`for` expression](#iterator-loops) extracts values from an iterator, + looping until the iterator is empty. + +All four types of loop support [`break` expressions](#break-expressions), +[`continue` expressions](#continue-expressions), and [labels](#loop-labels). +Only `loop` supports [evaluation to non-trivial values](#break-and-loop-values). + +## Infinite loops + +A `loop` expression repeats execution of its body continuously: +`loop { println!("I live."); }`. + +A `loop` expression without an associated `break` expression is +[diverging](items/functions.html#diverging-functions), and doesn't +return anything. A `loop` expression containing associated +[`break` expression(s)](#break-expressions) +may terminate, and must have type compatible with the value of the `break` +expression(s). + +## Predicate loops + +A `while` loop begins by evaluating the boolean loop conditional expression. If +the loop conditional expression evaluates to `true`, the loop body block +executes, then control returns to the loop conditional expression. If the loop +conditional expression evaluates to `false`, the `while` expression completes. + +An example: + +```rust +let mut i = 0; + +while i < 10 { + println!("hello"); + i = i + 1; +} +``` + +## `while let` loops + +A `while let` loop is semantically similar to a `while` loop but in place of a +condition expression it expects the keyword `let` followed by a refutable +pattern, an `=` and an expression. If the value of the expression on the right +hand side of the `=` matches the pattern, the loop body block executes then +control returns to the pattern matching statement. Otherwise, the while +expression completes. + +```rust +let mut x = vec![1, 2, 3]; + +while let Some(y) = x.pop() { + println!("y = {}", y); +} +``` + +## Iterator loops + +A `for` expression is a syntactic construct for looping over elements provided +by an implementation of `std::iter::IntoIterator`. If the iterator yields a +value, that value is given the specified name and the body of the loop is +executed, then control returns to the head of the `for` loop. If the iterator +is empty, the `for` expression completes. + +An example of a `for` loop over the contents of an array: + +```rust +let v = &["apples", "cake", "coffee"]; + +for text in v { + println!("I like {}.", text); +} +``` + +An example of a for loop over a series of integers: + +```rust +let mut sum = 0; +for n in 1..11 { + sum += n; +} +assert_eq!(sum, 55); +``` + +## Loop labels + +A loop expression may optionally have a _label_. The label is written as +a lifetime preceding the loop expression, as in `'foo: loop { break 'foo; }`, +`'bar: while false {}`, `'humbug: for _ in 0..0 {}`. +If a label is present, then labeled `break` and `continue` expressions nested +within this loop may exit out of this loop or return control to its head. +See [break expressions](#break-expressions) and [continue +expressions](#continue-expressions). + +## `break` expressions + +When `break` is encountered, execution of the associated loop body is +immediately terminated, for example: + +```rust +let mut last = 0; +for x in 1..100 { + if x > 12 { + break; + } + last = x; +} +assert_eq!(last, 12); +``` + +A `break` expression is normally associated with the innermost `loop`, `for` or +`while` loop enclosing the `break` expression, but a [label](#loop-labels) can +be used to specify which enclosing loop is affected. Example: + +```rust +'outer: loop { + while true { + break 'outer; + } +} +``` + +A `break` expression is only permitted in the body of a loop, and has one of +the forms `break`, `break 'label` or ([see below](#break-and-loop-values)) +`break EXPR` or `break 'label EXPR`. + +## `continue` expressions + +When `continue` is encountered, the current iteration of the associated loop +body is immediately terminated, returning control to the loop *head*. In +the case of a `while` loop, the head is the conditional expression controlling +the loop. In the case of a `for` loop, the head is the call-expression +controlling the loop. + +Like `break`, `continue` is normally associated with the innermost enclosing +loop, but `continue 'label` may be used to specify the loop affected. +A `continue` expression is only permitted in the body of a loop. + +## `break` and loop values + +When associated with a `loop`, a break expression may be used to return a value +from that loop, via one of the forms `break EXPR` or `break 'label EXPR`, where +`EXPR` is an expression whose result is returned from the `loop`. For example: + +```rust +let (mut a, mut b) = (1, 1); +let result = loop { + if b > 10 { + break b; + } + let c = a + b; + a = b; + b = c; +}; +// first number in Fibonacci sequence over 10: +assert_eq!(result, 13); +``` + +In the case a `loop` has an associated `break`, it is not considered diverging, +and the `loop` must have a type compatible with each `break` expression. +`break` without an expression is considered identical to `break` with +expression `()`. diff --git a/src/doc/reference/src/expressions/match-expr.md b/src/doc/reference/src/expressions/match-expr.md new file mode 100644 index 0000000000..1a4de06a63 --- /dev/null +++ b/src/doc/reference/src/expressions/match-expr.md @@ -0,0 +1,128 @@ +# `match` expressions + +A `match` expression branches on a *pattern*. The exact form of matching that +occurs depends on the pattern. Patterns consist of some combination of +literals, destructured arrays or enum constructors, structs and tuples, +variable binding specifications, wildcards (`..`), and placeholders (`_`). A +`match` expression has a *head expression*, which is the value to compare to +the patterns. The type of the patterns must equal the type of the head +expression. + +A `match` behaves differently depending on whether or not the head expression +is an [lvalue or an rvalue](expressions.html#lvalues-and-rvalues). +If the head expression is an rvalue, it is first evaluated into a temporary +location, and the resulting value is sequentially compared to the patterns in +the arms until a match is found. The first arm with a matching pattern is +chosen as the branch target of the `match`, any variables bound by the pattern +are assigned to local variables in the arm's block, and control enters the +block. + +When the head expression is an lvalue, the match does not allocate a temporary +location (however, a by-value binding may copy or move from the lvalue). When +possible, it is preferable to match on lvalues, as the lifetime of these +matches inherits the lifetime of the lvalue, rather than being restricted to +the inside of the match. + +An example of a `match` expression: + +```rust +let x = 1; + +match x { + 1 => println!("one"), + 2 => println!("two"), + 3 => println!("three"), + 4 => println!("four"), + 5 => println!("five"), + _ => println!("something else"), +} +``` + +Patterns that bind variables default to binding to a copy or move of the +matched value (depending on the matched value's type). This can be changed to +bind to a reference by using the `ref` keyword, or to a mutable reference using +`ref mut`. + +Patterns can be used to *destructure* structs, enums, and tuples. Destructuring +breaks a value up into its component pieces. The syntax used is the same as +when creating such values. When destructing a data structure with named (but +not numbered) fields, it is allowed to write `fieldname` as a shorthand for +`fieldname: fieldname`. In a pattern whose head expression has a `struct`, +`enum` or `tupl` type, a placeholder (`_`) stands for a *single* data field, +whereas a wildcard `..` stands for *all* the fields of a particular variant. + +```rust +# enum Message { +# Quit, +# WriteString(String), +# Move { x: i32, y: i32 }, +# ChangeColor(u8, u8, u8), +# } +# let message = Message::Quit; +match message { + Message::Quit => println!("Quit"), + Message::WriteString(write) => println!("{}", &write), + Message::Move{ x, y: 0 } => println!("move {} horizontally", x), + Message::Move{ .. } => println!("other move"), + Message::ChangeColor { 0: red, 1: green, 2: _ } => { + println!("color change, red: {}, green: {}", red, green); + } +}; +``` + +Patterns can also dereference pointers by using the `&`, `&mut` and `box` +symbols, as appropriate. For example, these two matches on `x: &i32` are +equivalent: + +```rust +# let x = &3; +let y = match *x { 0 => "zero", _ => "some" }; +let z = match x { &0 => "zero", _ => "some" }; + +assert_eq!(y, z); +``` + +Subpatterns can also be bound to variables by the use of the syntax `variable @ +subpattern`. For example: + +```rust +let x = 1; + +match x { + e @ 1 ... 5 => println!("got a range element {}", e), + _ => println!("anything"), +} +``` + +Multiple match patterns may be joined with the `|` operator. A range of values +may be specified with `...`. For example: + +```rust +# let x = 2; +let message = match x { + 0 | 1 => "not many", + 2 ... 9 => "a few", + _ => "lots" +}; +``` + +Range patterns only work on scalar types (like integers and characters; not +like arrays and structs, which have sub-components). A range pattern may not be +a sub-range of another range pattern inside the same `match`. + +Finally, match patterns can accept *pattern guards* to further refine the +criteria for matching a case. Pattern guards appear after the pattern and +consist of a bool-typed expression following the `if` keyword. A pattern guard +may refer to the variables bound within the pattern they follow. + +```rust +# let maybe_digit = Some(0); +# fn process_digit(i: i32) { } +# fn process_other(i: i32) { } +let message = match maybe_digit { + Some(x) if x < 10 => process_digit(x), + Some(x) => process_other(x), + None => panic!(), +}; +``` + diff --git a/src/doc/reference/src/expressions/method-call-expr.md b/src/doc/reference/src/expressions/method-call-expr.md new file mode 100644 index 0000000000..0db205a31f --- /dev/null +++ b/src/doc/reference/src/expressions/method-call-expr.md @@ -0,0 +1,51 @@ +# Method-call expressions + +A _method call_ consists of an expression followed by a single dot, an +[identifier](identifiers.html), and a parenthesized expression-list. Method +calls are resolved to methods on specific traits, either statically dispatching +to a method if the exact `self`-type of the left-hand-side is known, or +dynamically dispatching if the left-hand-side expression is an indirect [trait +object](types.html#trait-objects). Method call expressions will automatically +take a shared or mutable borrow of the receiver if needed. + +```rust +let pi: Result = "3.14".parse(); +let log_pi = pi.unwrap_or(1.0).log(2.72); +# assert!(1.14 < log_pi && log_pi < 1.15) +``` + +When resolving method calls on an expression of type `A`, Rust will use the +following order: + +1. Inherent methods, with receiver of type `A`, `&A`, `&mut A`. +1. Trait methods with receiver of type `A`. +1. Trait methods with receiver of type `&A`. +1. Trait methods with receiver of type `&mut A`. +1. If it's possible, Rust will then repeat steps 1-5 with + `
::Target`, and insert a dereference operator. +1. If `A` is now an [array](types.html#array-and-slice-types) type, then + repeat steps 1-4 with the corresponding slice type. + +Note: that in steps 1-4 the receiver is used, not the type of `Self` nor the +type of `A`. For example + +```rust,ignore +// `Self` is `&A`, receiver is `&A`. +impl<'a> Trait for &'a A { + fn method(self) {} +} +// If `A` is `&B`, then `Self` is `B` and the receiver is `A`. +impl B { + fn method(&self) {} +} +``` + +Another note: this process does not use the mutability or lifetime of the +receiver, or whether `unsafe` methods can currently be called to resolve +methods. These constraints instead lead to compiler errors. + +If a step is reached where there is more than one possible method (where +generic methods or traits are considered the same), then it is a compiler +error. These cases require a [more specific +syntax.](expressions/call-expr.html#disambiguating-function-calls) for method +and function invocation. diff --git a/src/doc/reference/src/expressions/operator-expr.md b/src/doc/reference/src/expressions/operator-expr.md new file mode 100644 index 0000000000..c85d0438d9 --- /dev/null +++ b/src/doc/reference/src/expressions/operator-expr.md @@ -0,0 +1,359 @@ +# Operator expressions + +Operators are defined for built in types by the Rust language. Many of the +following operators can also be overloaded using traits in `std::ops` or +`std::cmp`. + +## Overflow + +Integer operators will panic when they overflow when compiled in debug mode. +The `-C debug-assertions` and `-C overflow-checks` compiler flags can be used +to control this more directly. The following things are considered to be +overflow: + +* When `+`, `*` or `-` create a value greater than the maximum value, or less + than the minimum value that can be stored. This includes unary `-` on the + smallest value of any signed integer type. +* Using `/` or `%`, where the left-hand argument is the smallest integer of a + signed integer type and the right-hand argument is `-1`. +* Using `<<` or `>>` where the right-hand argument is greater than or equal to + the number of bits in the type of the left-hand argument, or is negative. + +## Grouped expressions + +An expression enclosed in parentheses evaluates to the result of the enclosed +expression. Parentheses can be used to explicitly specify evaluation order +within an expression. + +An example of a parenthesized expression: + +```rust +let x: i32 = 2 + 3 * 4; +let y: i32 = (2 + 3) * 4; +assert_eq!(x, 14); +assert_eq!(y, 20); +``` + +## Borrow operators + +The `&` (shared borrow) and `&mut` (mutable borrow) operators are unary prefix +operators. When applied to an lvalue produce a reference (pointer) to the +location that the value refers to. The lvalue is also placed into a borrowed +state for the duration of the reference. For a shared borrow (`&`), this +implies that the lvalue may not be mutated, but it may be read or shared again. +For a mutable borrow (`&mut`), the lvalue may not be accessed in any way until +the borrow expires. `&mut` evaluates its operand in a mutable lvalue context. +If the `&` or `&mut` operators are applied to an rvalue, a temporary value is +created; the lifetime of this temporary value is defined by [syntactic +rules](expressions.html#temporary-lifetimes). These operators cannot be overloaded. + +```rust +{ + // a temporary with value 7 is created that lasts for this scope. + let shared_reference = &7; +} +let mut array = [-2, 3, 9]; +{ + // Mutably borrows `array` for this scope. + // `array` may only be used through `mutable_reference`. + let mutable_reference = &mut array; +} +``` + +## The dereference operator + +The `*` (dereference) operator is also a unary prefix operator. When applied to +a [pointer](types.html#pointer-types) it denotes the pointed-to location. If +the expression is of type `&mut T` and `*mut T`, and is either a local +variable, a (nested) field of a local variance or is a mutable lvalue, then the +resulting [lvalue](expressions.html#lvalues-and-rvalues) can be +assigned to. Dereferencing a raw pointer requires `unsafe`. + +On non-pointer types `*x` is equivalent to `*std::ops::Deref::deref(&x)` in an +[immutable lvalue context](expressions.html#mutability) and +`*std::ops::Deref::deref_mut(&mut x)` in a mutable lvalue context. + +```rust +let x = &7; +assert_eq!(*x, 7); +let y = &mut 9; +*y = 11; +assert_eq!(*y, 11); +``` + +## The `?` operator. + +The `?` ("question mark") operator can be applied to values of the `Result` type to propagate errors. If applied to `Err(e)` it will return +`Err(From::from(e))` from the enclosing function or closure. If applied to +`Ok(x)` it will unwrap the value to return `x`. Unlike other unary operators +`?` is written in postfix notation. `?` cannot be overloaded. + +```rust +# use std::num::ParseIntError; +fn try_to_parse() -> Result { + let x: i32 = "123".parse()?; // x = 123 + let y: i32 = "24a".parse()?; // returns an Err() immediately + Ok(x + y) // Doesn't run. +} + +let res = try_to_parse(); +println!("{:?}", res); +# assert!(res.is_err()) +``` + +## Negation operators + +These are the last two unary operators. This table summarizes the behavior of +them on primitive types and which traits are used to overload these operators +for other types. Remember that signed integers are always represented using +two's complement. The operands of all of these operators are evaluated in +rvalue context so are moved or copied. + +| Symbol | Integer | `bool` | Floating Point | Overloading Trait | +|--------|-------------|-------------|----------------|--------------------| +| `-` | Negation* | | Negation | `std::ops::Neg` | +| `!` | Bitwise NOT | Logical NOT | | `std::ops::Not` | + +\* Only for signed integer types. + +Here are some example of these operators + +```rust +let x = 6; +assert_eq!(-x, -6); +assert_eq!(!x, -7); +assert_eq!(true, !false); +``` + +## Arithmetic and Logical Binary Operators + +Binary operators expressions are all written with infix notation. This table +summarizes the behavior of arithmetic and logical binary operators on +primitive types and which traits are used to overload these operators for other +types. Remember that signed integers are always represented using two's +complement. The operands of all of these operators are evaluated in rvalue +context so are moved or copied. + +| Symbol | Integer | `bool` | Floating Point | Overloading Trait | +|--------|-------------------------|-------------|----------------|--------------------| +| `+` | Addition | | Addition | `std::ops::Add` | +| `-` | Subtraction | | Subtraction | `std::ops::Sub` | +| `*` | Multiplication | | Multiplication | `std::ops::Mul` | +| `/` | Division | | Division | `std::ops::Div` | +| `%` | Remainder | | Remainder | `std::ops::Rem` | +| `&` | Bitwise AND | Logical AND | | `std::ops::BitAnd` | +| | | Bitwise OR | Logical OR | | `std::ops::BitOr` | +| `^` | Bitwise XOR | Logical XOR | | `std::ops::BitXor` | +| `<<` | Left Shift | | | `std::ops::Shl` | +| `>>` | Right Shift* | | | `std::ops::Shr` | + +\* Arithmetic right shift on signed integer types, logical right shift on +unsigned integer types. + +Here are examples of these operators being used. + +```rust +assert_eq!(3 + 6, 9); +assert_eq!(5.5 - 1.25, 4.25); +assert_eq!(-5 * 14, -70); +assert_eq!(14 / 3, 4); +assert_eq!(100 % 7, 2); +assert_eq!(0b1010 & 0b1100, 0b1000); +assert_eq!(0b1010 | 0b1100, 0b1110); +assert_eq!(0b1010 ^ 0b1100, 0b110); +assert_eq!(13 << 3, 104); +assert_eq!(-10 >> 2, -3); +``` + +## Comparison Operators + +Comparison operators are also defined both for primitive types and many type in +the standard library. Parentheses are required when chaining comparison +operators. For example, the expression `a == b == c` is invalid and may be +written as `(a == b) == c`. + +Unlike arithmetic and logical operators, the traits for +overloading the operators the traits for these operators are used more +generally to show how a type may be compared and will likely be assumed to +define actual comparisons by functions that use these traits as bounds. Many +functions and macros in the standard library can then use that assumption +(although not to ensure safety). Unlike the arithmetic and logical operators +above, these operators implicitly take shared borrows of their operands, +evaluating them in lvalue context: + +```rust,ignore +a == b; +// is equivalent to +::std::cmp::PartialEq::eq(&a, &b); +``` + +This means that the operands don't have to be moved out of. + +| Symbol | Meaning | Overloading method | +|--------|--------------------------|----------------------------| +| `==` | Equal | `std::cmp::PartialEq::eq` | +| `!=` | Not equal | `std::cmp::PartialEq::ne` | +| `>` | Greater than | `std::cmp::PartialOrd::gt` | +| `<` | Less than | `std::cmp::PartialOrd::lt` | +| `>=` | Greater than or equal to | `std::cmp::PartialOrd::ge` | +| `<=` | Less than or equal to | `std::cmp::PartialOrd::le` | + +Here are examples of the comparison operators being used. + +```rust +assert!(123 == 123); +assert!(23 != -12); +assert!(12.5 > 12.2); +assert!([1, 2, 3] < [1, 3, 4]); +assert!('A' <= 'B'); +assert!("World" >= "Hello"); +``` + +## Lazy boolean operators + +The operators `||` and `&&` may be applied to operands of boolean type. The +`||` operator denotes logical 'or', and the `&&` operator denotes logical +'and'. They differ from `|` and `&` in that the right-hand operand is only +evaluated when the left-hand operand does not already determine the result of +the expression. That is, `||` only evaluates its right-hand operand when the +left-hand operand evaluates to `false`, and `&&` only when it evaluates to +`true`. + +```rust +let x = false || true; // true +let y = false && panic!(); // false, doesn't evaluate `panic!()` +``` + +## Type cast expressions + +A type cast expression is denoted with the binary operator `as`. + +Executing an `as` expression casts the value on the left-hand side to the type +on the right-hand side. + +An example of an `as` expression: + +```rust +# fn sum(values: &[f64]) -> f64 { 0.0 } +# fn len(values: &[f64]) -> i32 { 0 } +fn average(values: &[f64]) -> f64 { + let sum: f64 = sum(values); + let size: f64 = len(values) as f64; + sum / size +} +``` + +`as` can be used to explicitly perform [coercions](type-coercions.html), as +well as the following additional casts. Here `*T` means either `*const T` or +`*mut T`. + +| Type of `e` | `U` | Cast performed by `e as U` | +|-----------------------|-----------------------|----------------------------------| +| Integer or Float type | Integer or Float type | Numeric cast | +| C-like enum | Integer type | Enum cast | +| `bool` or `char` | Integer type | Primitive to integer cast | +| `u8` | `char` | `u8` to `char` cast | +| `*T` | `*V` where `V: Sized` \* | Pointer to pointer cast | +| `*T` where `T: Sized` | Numeric type | Pointer to address cast | +| Integer type | `*V` where `V: Sized` | Address to pointer cast | +| `&[T; n]` | `*const T` | Array to pointer cast | +| [Function pointer](types.html#function-pointer-types) | `*V` where `V: Sized` | Function pointer to pointer cast | +| Function pointer | Integer | Function pointer to address cast | + +\* or `T` and `V` are compatible unsized types, e.g., both slices, both the +same trait object. + +### Semantics + +* Numeric cast + * Casting between two integers of the same size (e.g. i32 -> u32) is a no-op + * Casting from a larger integer to a smaller integer (e.g. u32 -> u8) will + truncate + * Casting from a smaller integer to a larger integer (e.g. u8 -> u32) will + * zero-extend if the source is unsigned + * sign-extend if the source is signed + * Casting from a float to an integer will round the float towards zero + * **[NOTE: currently this will cause Undefined Behavior if the rounded + value cannot be represented by the target integer type][float-int]**. + This includes Inf and NaN. This is a bug and will be fixed. + * Casting from an integer to float will produce the floating point + representation of the integer, rounded if necessary (rounding strategy + unspecified) + * Casting from an f32 to an f64 is perfect and lossless + * Casting from an f64 to an f32 will produce the closest possible value + (rounding strategy unspecified) + * **[NOTE: currently this will cause Undefined Behavior if the value + is finite but larger or smaller than the largest or smallest finite + value representable by f32][float-float]**. This is a bug and will + be fixed. +* Enum cast + * Casts an enum to its discriminant, then uses a numeric cast if needed. +* Primitive to integer cast + * `false` casts to `0`, `true` casts to `1` + * `char` casts to the value of the code point, then uses a numeric cast if needed. +* `u8` to `char` cast + * Casts to the `char` with the corresponding code point. + +[float-int]: https://github.com/rust-lang/rust/issues/10184 +[float-float]: https://github.com/rust-lang/rust/issues/15536 + +## Assignment expressions + +An _assignment expression_ consists of an +[lvalue](expressions.html#lvalues-and-rvalues) expression followed +by an equals sign (`=`) and an +[rvalue](expressions.html#lvalues-and-rvalues) expression. + +Evaluating an assignment expression [either copies or +moves](expressions.html#moved-and-copied-types) its right-hand operand to its left-hand +operand. The left-hand operand must be an lvalue: using an rvalue results in a +compiler error, rather than promoting it to a temporary. + +```rust +# let mut x = 0; +# let y = 0; +x = y; +``` + +## Compound assignment expressions + +The `+`, `-`, `*`, `/`, `%`, `&`, `|`, `^`, `<<`, and `>>` operators may be +composed with the `=` operator. The expression `lval OP= val` is equivalent to +`lval = lval OP val`. For example, `x = x + 1` may be written as `x += 1`. +Any such expression always has the [`unit`](types.html#tuple-types) type. +These operators can all be overloaded using the trait with the same name as for +the normal operation followed by 'Assign', for example, `std::ops::AddAssign` +is used to overload `+=`. As with `=`, `lval` must be an lvalue. + +```rust +let mut x = 10; +x += 4; +assert_eq!(x, 14); +``` + +## Operator precedence + +The precedence of Rust operators is ordered as follows, going from strong to +weak. Binary Operators at the same precedence level are evaluated in the order +given by their associativity. + + +| Operator | Associativity | +|-----------------------------|---------------------| +| `?` | | +| Unary `-` `*` `!` `&` `&mut` | | +| `as` `:` | left to right | +| `*` `/` `%` | left to right | +| `+` `-` | left to right | +| `<<` `>>` | left to right | +| `&` | left to right | +| `^` | left to right | +| | | left to right | +| `==` `!=` `<` `>` `<=` `>=` | Require parentheses | +| `&&` | left to right | +| || | left to right | +| `..` `...` | Require parentheses | +| `<-` | right to left | +| `=` `+=` `-=` `*=` `/=` `%=`
`&=` |= `^=` `<<=` `>>=` | right to left | + diff --git a/src/doc/reference/src/expressions/path-expr.md b/src/doc/reference/src/expressions/path-expr.md new file mode 100644 index 0000000000..66bd3e7f80 --- /dev/null +++ b/src/doc/reference/src/expressions/path-expr.md @@ -0,0 +1,21 @@ +# Path expressions + +A [path](paths.html) used as an expression context denotes either a local +variable or an item. Path expressions that resolve to local or static variables +are [lvalues](expressions.html#lvalues-and-rvalues), other paths +are rvalues. Using a `static mut` variable requires an [`unsafe` +block](expressions/block-expr.html#unsafe-blocks). + +```rust +# mod globals { +# pub static STATIC_VAR: i32 = 5; +# pub static mut STATIC_MUT_VAR: i32 = 7; +# } +# let local_var = 3; +local_var; +globals::STATIC_VAR; +unsafe { globals::STATIC_MUT_VAR }; +let some_constructor = Some::; +let push_integer = Vec::::push; +let slice_reverse = <[i32]>::reverse; +``` diff --git a/src/doc/reference/src/expressions/range-expr.md b/src/doc/reference/src/expressions/range-expr.md new file mode 100644 index 0000000000..282f3adae7 --- /dev/null +++ b/src/doc/reference/src/expressions/range-expr.md @@ -0,0 +1,20 @@ +# Range expressions + +The `..` operator will construct an object of one of the `std::ops::Range` (or +`core::ops::Range`) variants. + +```rust +1..2; // std::ops::Range +3..; // std::ops::RangeFrom +..4; // std::ops::RangeTo +..; // std::ops::RangeFull +``` + +The following expressions are equivalent. + +```rust +let x = std::ops::Range {start: 0, end: 10}; +let y = 0..10; + +assert_eq!(x, y); +``` diff --git a/src/doc/reference/src/expressions/return-expr.md b/src/doc/reference/src/expressions/return-expr.md new file mode 100644 index 0000000000..b76a34a989 --- /dev/null +++ b/src/doc/reference/src/expressions/return-expr.md @@ -0,0 +1,17 @@ +# `return` expressions + +Return expressions are denoted with the keyword `return`. Evaluating a `return` +expression moves its argument into the designated output location for the +current function call, destroys the current function activation frame, and +transfers control to the caller frame. + +An example of a `return` expression: + +```rust +fn max(a: i32, b: i32) -> i32 { + if a > b { + return a; + } + return b; +} +``` diff --git a/src/doc/reference/src/expressions/struct-expr.md b/src/doc/reference/src/expressions/struct-expr.md new file mode 100644 index 0000000000..b656dc97c4 --- /dev/null +++ b/src/doc/reference/src/expressions/struct-expr.md @@ -0,0 +1,80 @@ +# Struct expressions + +There are several forms of struct expressions. A _struct expression_ consists +of the [path](paths.html) of a [struct item](items/structs.html), followed by a +brace-enclosed list of zero or more comma-separated name-value pairs, providing +the field values of a new instance of the struct. A field name can be any +[identifier](identifiers.html), and is separated from its value expression by a +colon. In the case of a tuple struct the field names are numbers corresponding +to the position of the field. The numbers must be written in decimal, +containing no underscores and with no leading zeros or integer suffix. A value +of a [union](items/unions.html) type can also be created using this syntax, +except that it must specify exactly one field. + +Struct expressions can't be used directly in the head of a [loop] +or an [if], [if let] or [match] expression. But struct expressions can still be +in used inside parentheses, for example. + +[loop]: expressions/loop-expr.html +[if]: expressions/if-expr.html#if-expressions +[if let]: expressions/if-expr.html#if-let-expressions +[match]: expressions/match-expr.html + +A _tuple struct expression_ consists of the path of a struct item, followed by +a parenthesized list of one or more comma-separated expressions (in other +words, the path of a struct item followed by a tuple expression). The struct +item must be a tuple struct item. + +A _unit-like struct expression_ consists only of the path of a struct item. + +The following are examples of struct expressions: + +```rust +# struct Point { x: f64, y: f64 } +# struct NothingInMe { } +# struct TuplePoint(f64, f64); +# mod game { pub struct User<'a> { pub name: &'a str, pub age: u32, pub score: usize } } +# struct Cookie; fn some_fn(t: T) {} +Point {x: 10.0, y: 20.0}; +NothingInMe {}; +TuplePoint(10.0, 20.0); +TuplePoint { 0: 10.0, 1: 20.0 }; // Results in the same value as the above line +let u = game::User {name: "Joe", age: 35, score: 100_000}; +some_fn::(Cookie); +``` + +A struct expression forms a new value of the named struct type. Note that for a +given *unit-like* struct type, this will always be the same value. + +A struct expression can terminate with the syntax `..` followed by an +expression to denote a functional update. The expression following `..` (the +base) must have the same struct type as the new struct type being formed. The +entire expression denotes the result of constructing a new struct (with the +same type as the base expression) with the given values for the fields that +were explicitly specified and the values in the base expression for all other +fields. Just as with all struct expressions, all of the fields of the struct +must be [visible](visibility-and-privacy.html), even those not explicitly +named. + +```rust +# struct Point3d { x: i32, y: i32, z: i32 } +let base = Point3d {x: 1, y: 2, z: 3}; +Point3d {y: 0, z: 10, .. base}; +``` + +## Struct field init shorthand + +When initializing a data structure (struct, enum, union) with named (but not +numbered) fields, it is allowed to write `fieldname` as a shorthand for +`fieldname: fieldname`. This allows a compact syntax with less duplication. + +Example: + +```rust +# struct Point3d { x: i32, y: i32, z: i32 } +# let x = 0; +# let y_value = 0; +# let z = 0; +Point3d { x: x, y: y_value, z: z }; +Point3d { x, y: y_value, z }; +``` diff --git a/src/doc/reference/src/expressions/tuple-expr.md b/src/doc/reference/src/expressions/tuple-expr.md new file mode 100644 index 0000000000..5519861428 --- /dev/null +++ b/src/doc/reference/src/expressions/tuple-expr.md @@ -0,0 +1,38 @@ +# Tuple and tuple indexing expressions + +## Tuple expressions + +Tuples are written by enclosing zero or more comma-separated expressions in +parentheses. They are used to create [tuple-typed](types.html#tuple-types) +values. + +```rust +(0.0, 4.5); +("a", 4usize, true); +(); +``` + +You can disambiguate a single-element tuple from a value in parentheses with a +comma: + +```rust +(0,); // single-element tuple +(0); // zero in parentheses +``` + +## Tuple indexing expressions + +[Tuples](types.html#tuple-types) and [struct tuples](items/structs.html) can be +indexed using the number corresponding to the position of the field. The index +must be written as a [decimal literal](tokens.html#integer-literals) with no +underscores or suffix. Tuple indexing expressions also differ from field +expressions in that they can unambiguously be called as a function. In all +other aspects they have the same behavior. + +```rust +# struct Point(f32, f32); +let pair = (1, 2); +assert_eq!(pair.1, 2); +let unit_x = Point(1.0, 0.0); +assert_eq!(unit_x.0, 1.0); +``` diff --git a/src/doc/reference/src/interior-mutability.md b/src/doc/reference/src/interior-mutability.md new file mode 100644 index 0000000000..6a697a7539 --- /dev/null +++ b/src/doc/reference/src/interior-mutability.md @@ -0,0 +1,30 @@ +# Interior Mutability + +Sometimes a type needs be mutated while having multiple aliases, in Rust this +is achieved using a pattern called _interior mutability_. A type has interior +mutability if its internal state can be changed through a [shared reference] to +it. This goes against the usual [requirement][ub] that the value pointed to by a +shared reference is not mutated. + +[`std::cell::UnsafeCell`] type is the only legal way in Rust to disable this +requirement. When `UnsafeCell` is immutably aliased, it is still safe to +mutate, or obtain a mutable reference to, the `T` it contains. As with all +other types, it is undefined behavior to have multiple `&mut UnsafeCell` +aliases. + +Other types with interior mutability can be created by using `UnsafeCell` as +a field. The standard library provides a variety of types that provide safe +interior mutability APIs. For example, [`std::cell::RefCell`] uses run-time +borrow checks to ensure the usual rules around multiple references. The +[`std::sync::atomic`] module contains types that wrap a value that is only +accessed with atomic operations, allowing the value to be shared and mutated +across threads. + +[shared reference]: types.html#shared-references- +[ub]: behavior-considered-undefined.html +[`std::mem::transmute`]: ../std/mem/fn.transmute.html +[`std::cell::UnsafeCell`]: ../std/cell/struct.UnsafeCell.html +[`std::cell::RefCell`]: ../std/cell/struct.RefCell.html +[`std::sync::atomic`]: ../std/sync/atomic/index.html + + diff --git a/src/doc/reference/src/introduction.md b/src/doc/reference/src/introduction.md index 38486c467c..a5dd6a5b96 100644 --- a/src/doc/reference/src/introduction.md +++ b/src/doc/reference/src/introduction.md @@ -26,6 +26,9 @@ is what we have for now. You may also be interested in the [grammar]. +You can contribute to this document by opening an issue or sending a pull +request to [the Rust Reference repository]. + N. B. This document may be incomplete. Documenting everything might take a while. We have a [big issue] to track documentation for every Rust feature, so check that out if you can't find something here. @@ -33,4 +36,5 @@ so check that out if you can't find something here. [book]: ../book/index.html [standard]: ../std/index.html [grammar]: ../grammar.html +[the Rust Reference repository]: https://github.com/rust-lang-nursery/reference/ [big issue]: https://github.com/rust-lang-nursery/reference/issues/9 diff --git a/src/doc/reference/src/items.md b/src/doc/reference/src/items.md index 4decc358f1..75da8be93b 100644 --- a/src/doc/reference/src/items.md +++ b/src/doc/reference/src/items.md @@ -5,27 +5,27 @@ nested set of [modules]. Every crate has a single "outermost" anonymous module; all further items within the crate have [paths] within the module tree of the crate. -[modules]: #modules +[modules]: items/modules.html [paths]: paths.html Items are entirely determined at compile-time, generally remain fixed during execution, and may reside in read-only memory. -There are several kinds of item: - -* [modules](#modules) -* [`extern crate` declarations](#extern-crate-declarations) -* [`use` declarations](#use-declarations) -* [function definitions](#functions) -* [type definitions](#type-aliases) -* [struct definitions](#structs) -* [enumeration definitions](#enumerations) -* [union definitions](#unions) -* [constant items](#constant-items) -* [static items](#static-items) -* [trait definitions](#traits) -* [implementations](#implementations) -* [`extern` blocks](#external-blocks) +There are several kinds of items: + +* [modules](items/modules.html) +* [`extern crate` declarations](items/extern-crates.html) +* [`use` declarations](items/use-declarations.html) +* [function definitions](items/functions.html) +* [type definitions](items/type-aliases.html) +* [struct definitions](items/structs.html) +* [enumeration definitions](items/enumerations.html) +* [union definitions](items/unions.html) +* [constant items](items/constant-items.html) +* [static items](items/static-items.html) +* [trait definitions](items/traits.html) +* [implementations](items/implementations.html) +* [`extern` blocks](items/external-blocks.html) Some items form an implicit scope for the declaration of sub-items. In other words, within a function or module, declarations of items can (in many cases) @@ -55,1291 +55,3 @@ of type abstraction: there are no higher-ranked (or "forall") types abstracted over other types, though higher-ranked types do exist for lifetimes. [path]: paths.html - -## Modules - -A module is a container for zero or more [items]. - -[items]: items.html - -A _module item_ is a module, surrounded in braces, named, and prefixed with the -keyword `mod`. A module item introduces a new, named module into the tree of -modules making up a crate. Modules can nest arbitrarily. - -An example of a module: - -```rust -mod math { - type Complex = (f64, f64); - fn sin(f: f64) -> f64 { - /* ... */ -# panic!(); - } - fn cos(f: f64) -> f64 { - /* ... */ -# panic!(); - } - fn tan(f: f64) -> f64 { - /* ... */ -# panic!(); - } -} -``` - -Modules and types share the same namespace. Declaring a named type with the -same name as a module in scope is forbidden: that is, a type definition, trait, -struct, enumeration, union, type parameter or crate can't shadow the name of a -module in scope, or vice versa. Items brought into scope with `use` also have -this restriction. - -A module without a body is loaded from an external file, by default with the -same name as the module, plus the `.rs` extension. When a nested submodule is -loaded from an external file, it is loaded from a subdirectory path that -mirrors the module hierarchy. - -```rust,ignore -// Load the `vec` module from `vec.rs` -mod vec; - -mod thread { - // Load the `local_data` module from `thread/local_data.rs` - // or `thread/local_data/mod.rs`. - mod local_data; -} -``` - -The directories and files used for loading external file modules can be -influenced with the `path` attribute. - -```rust,ignore -#[path = "thread_files"] -mod thread { - // Load the `local_data` module from `thread_files/tls.rs` - #[path = "tls.rs"] - mod local_data; -} -``` - -### Extern crate declarations - -An _`extern crate` declaration_ specifies a dependency on an external crate. -The external crate is then bound into the declaring scope as the `ident` -provided in the `extern_crate_decl`. - -The external crate is resolved to a specific `soname` at compile time, and a -runtime linkage requirement to that `soname` is passed to the linker for -loading at runtime. The `soname` is resolved at compile time by scanning the -compiler's library path and matching the optional `crateid` provided against -the `crateid` attributes that were declared on the external crate when it was -compiled. If no `crateid` is provided, a default `name` attribute is assumed, -equal to the `ident` given in the `extern_crate_decl`. - -Three examples of `extern crate` declarations: - -```rust,ignore -extern crate pcre; - -extern crate std; // equivalent to: extern crate std as std; - -extern crate std as ruststd; // linking to 'std' under another name -``` - -When naming Rust crates, hyphens are disallowed. However, Cargo packages may -make use of them. In such case, when `Cargo.toml` doesn't specify a crate name, -Cargo will transparently replace `-` with `_` (Refer to [RFC 940] for more -details). - -Here is an example: - -```rust,ignore -// Importing the Cargo package hello-world -extern crate hello_world; // hyphen replaced with an underscore -``` - -[RFC 940]: https://github.com/rust-lang/rfcs/blob/master/text/0940-hyphens-considered-harmful.md - -### Use declarations - -A _use declaration_ creates one or more local name bindings synonymous with -some other [path]. Usually a `use` declaration is used to shorten the path -required to refer to a module item. These declarations may appear in [modules] -and [blocks], usually at the top. - -[path]: paths.html -[modules]: #modules -[blocks]: expressions.html#block-expressions - -> **Note**: Unlike in many languages, `use` declarations in Rust do *not* -> declare linkage dependency with external crates. Rather, [`extern crate` -> declarations](#extern-crate-declarations) declare linkage dependencies. - -Use declarations support a number of convenient shortcuts: - -* Simultaneously binding a list of paths differing only in their final element, - using the glob-like brace syntax `use a::b::{c,d,e,f};` -* Simultaneously binding a list of paths differing only in their final element - and their immediate parent module, using the `self` keyword, such as `use - a::b::{self, c, d};` -* Rebinding the target name as a new local name, using the syntax `use p::q::r - as x;`. This can also be used with the last two features: `use a::b::{self as - ab, c as abc}`. -* Binding all paths matching a given prefix, using the asterisk wildcard syntax - `use a::b::*;` - -An example of `use` declarations: - -```rust -use std::option::Option::{Some, None}; -use std::collections::hash_map::{self, HashMap}; - -fn foo(_: T){} -fn bar(map1: HashMap, map2: hash_map::HashMap){} - -fn main() { - // Equivalent to 'foo(vec![std::option::Option::Some(1.0f64), - // std::option::Option::None]);' - foo(vec![Some(1.0f64), None]); - - // Both `hash_map` and `HashMap` are in scope. - let map1 = HashMap::new(); - let map2 = hash_map::HashMap::new(); - bar(map1, map2); -} -``` - -Like items, `use` declarations are private to the containing module, by -default. Also like items, a `use` declaration can be public, if qualified by -the `pub` keyword. Such a `use` declaration serves to _re-export_ a name. A -public `use` declaration can therefore _redirect_ some public name to a -different target definition: even a definition with a private canonical path, -inside a different module. If a sequence of such redirections form a cycle or -cannot be resolved unambiguously, they represent a compile-time error. - -An example of re-exporting: - -```rust -# fn main() { } -mod quux { - pub use quux::foo::{bar, baz}; - - pub mod foo { - pub fn bar() { } - pub fn baz() { } - } -} -``` - -In this example, the module `quux` re-exports two public names defined in -`foo`. - -Also note that the paths contained in `use` items are relative to the crate -root. So, in the previous example, the `use` refers to `quux::foo::{bar, baz}`, -and not simply to `foo::{bar, baz}`. This also means that top-level module -declarations should be at the crate root if direct usage of the declared -modules within `use` items is desired. It is also possible to use `self` and -`super` at the beginning of a `use` item to refer to the current and direct -parent modules respectively. All rules regarding accessing declared modules in -`use` declarations apply to both module declarations and `extern crate` -declarations. - -An example of what will and will not work for `use` items: - -```rust -# #![allow(unused_imports)] -use foo::baz::foobaz; // good: foo is at the root of the crate - -mod foo { - - mod example { - pub mod iter {} - } - - use foo::example::iter; // good: foo is at crate root -// use example::iter; // bad: example is not at the crate root - use self::baz::foobaz; // good: self refers to module 'foo' - use foo::bar::foobar; // good: foo is at crate root - - pub mod bar { - pub fn foobar() { } - } - - pub mod baz { - use super::bar::foobar; // good: super refers to module 'foo' - pub fn foobaz() { } - } -} - -fn main() {} -``` - -## Functions - -A _function_ consists of a [block], along with a name and a set of parameters. -Other than a name, all these are optional. Functions are declared with the -keyword `fn`. Functions may declare a set of *input* [*variables*][variables] -as parameters, through which the caller passes arguments into the function, and -the *output* [*type*][type] of the value the function will return to its caller -on completion. - -[block]: expressions.html#block-expressions -[variables]: variables.html -[type]: types.html - -When referred to, a _function_ yields a first-class *value* of the -corresponding zero-sized [*function item type*][function item type], which -when called evaluates to a direct call to the function. - -[function item type]: types.html#function-item-types - -For example, this is a simple function: -```rust -fn answer_to_life_the_universe_and_everything() -> i32 { - return 42; -} -``` - -As with `let` bindings, function arguments are irrefutable patterns, so any -pattern that is valid in a let binding is also valid as an argument: - -```rust -fn first((value, _): (i32, i32)) -> i32 { value } -``` - -The block of a function is conceptually wrapped in a block that binds the -argument patterns and then `return`s the value of the function's block. This -means that the tail expression of the block, if evaluated, ends up being -returned to the caller. As usual, an explicit return expression within -the body of the function will short-cut that implicit return, if reached. - -For example, the function above behaves as if it was written as: - -```rust,ignore -// argument_0 is the actual first argument passed from the caller -let (value, _) = argument_0; -return { - value -}; -``` - -### Generic functions - -A _generic function_ allows one or more _parameterized types_ to appear in its -signature. Each type parameter must be explicitly declared in an -angle-bracket-enclosed and comma-separated list, following the function name. - -```rust -// foo is generic over A and B - -fn foo(x: A, y: B) { -# } -``` - -Inside the function signature and body, the name of the type parameter can be -used as a type name. [Trait](#traits) bounds can be specified for type -parameters to allow methods with that trait to be called on values of that -type. This is specified using the `where` syntax: - -```rust -# use std::fmt::Debug; -fn foo(x: T) where T: Debug { -# } -``` - -When a generic function is referenced, its type is instantiated based on the -context of the reference. For example, calling the `foo` function here: - -```rust -use std::fmt::Debug; - -fn foo(x: &[T]) where T: Debug { - // details elided -} - -foo(&[1, 2]); -``` - -will instantiate type parameter `T` with `i32`. - -The type parameters can also be explicitly supplied in a trailing [path] -component after the function name. This might be necessary if there is not -sufficient context to determine the type parameters. For example, -`mem::size_of::() == 4`. - -[path]: paths.html - -### Diverging functions - -A special kind of function can be declared with a `!` character where the -output type would normally be. For example: - -```rust -fn my_err(s: &str) -> ! { - println!("{}", s); - panic!(); -} -``` - -We call such functions "diverging" because they never return a value to the -caller. Every control path in a diverging function must end with a `panic!()`, -a loop expression without an associated break expression, or a call to another -diverging function on every control path. The `!` annotation does *not* denote -a type. - -It might be necessary to declare a diverging function because as mentioned -previously, the typechecker checks that every control path in a function ends -with a [`return`] or diverging expression. So, if `my_err` were declared -without the `!` annotation, the following code would not typecheck: - -[`return`]: expressions.html#return-expressions - -```rust -# fn my_err(s: &str) -> ! { panic!() } - -fn f(i: i32) -> i32 { - if i == 42 { - return 42; - } - else { - my_err("Bad number!"); - } -} -``` - -This will not compile without the `!` annotation on `my_err`, since the `else` -branch of the conditional in `f` does not return an `i32`, as required by the -signature of `f`. Adding the `!` annotation to `my_err` informs the typechecker -that, should control ever enter `my_err`, no further type judgments about `f` -need to hold, since control will never resume in any context that relies on -those judgments. Thus the return type on `f` only needs to reflect the `if` -branch of the conditional. - -### Extern functions - -Extern functions are part of Rust's foreign function interface, providing the -opposite functionality to [external blocks](#external-blocks). Whereas external -blocks allow Rust code to call foreign code, extern functions with bodies -defined in Rust code _can be called by foreign code_. They are defined in the -same way as any other Rust function, except that they have the `extern` -modifier. - -```rust -// Declares an extern fn, the ABI defaults to "C" -extern fn new_i32() -> i32 { 0 } - -// Declares an extern fn with "stdcall" ABI -# #[cfg(target_arch = "x86_64")] -extern "stdcall" fn new_i32_stdcall() -> i32 { 0 } -``` - -Unlike normal functions, extern fns have type `extern "ABI" fn()`. This is the -same type as the functions declared in an extern block. - -```rust -# extern fn new_i32() -> i32 { 0 } -let fptr: extern "C" fn() -> i32 = new_i32; -``` - -## Type aliases - -A _type alias_ defines a new name for an existing [type]. Type aliases are -declared with the keyword `type`. Every value has a single, specific type, but -may implement several different traits, or be compatible with several different -type constraints. - -[type]: types.html - -For example, the following defines the type `Point` as a synonym for the type -`(u8, u8)`, the type of pairs of unsigned 8 bit integers: - -```rust -type Point = (u8, u8); -let p: Point = (41, 68); -``` - -A type alias to an enum type cannot be used to qualify the constructors: - -```rust -enum E { A } -type F = E; -let _: F = E::A; // OK -// let _: F = F::A; // Doesn't work -``` - -## Structs - -A _struct_ is a nominal [struct type] defined with the keyword `struct`. - -An example of a `struct` item and its use: - -```rust -struct Point {x: i32, y: i32} -let p = Point {x: 10, y: 11}; -let px: i32 = p.x; -``` - -A _tuple struct_ is a nominal [tuple type], also defined with the keyword -`struct`. For example: - -[struct type]: types.html#struct-types -[tuple type]: types.html#tuple-types - -```rust -struct Point(i32, i32); -let p = Point(10, 11); -let px: i32 = match p { Point(x, _) => x }; -``` - -A _unit-like struct_ is a struct without any fields, defined by leaving off the -list of fields entirely. Such a struct implicitly defines a constant of its -type with the same name. For example: - -```rust -struct Cookie; -let c = [Cookie, Cookie {}, Cookie, Cookie {}]; -``` - -is equivalent to - -```rust -struct Cookie {} -const Cookie: Cookie = Cookie {}; -let c = [Cookie, Cookie {}, Cookie, Cookie {}]; -``` - -The precise memory layout of a struct is not specified. One can specify a -particular layout using the [`repr` attribute]. - -[`repr` attribute]: attributes.html#ffi-attributes - -## Enumerations - -An _enumeration_ is a simultaneous definition of a nominal [enumerated type] as -well as a set of *constructors*, that can be used to create or pattern-match -values of the corresponding enumerated type. - -[enumerated type]: types.html#enumerated-types - -Enumerations are declared with the keyword `enum`. - -An example of an `enum` item and its use: - -```rust -enum Animal { - Dog, - Cat, -} - -let mut a: Animal = Animal::Dog; -a = Animal::Cat; -``` - -Enumeration constructors can have either named or unnamed fields: - -```rust -enum Animal { - Dog (String, f64), - Cat { name: String, weight: f64 }, -} - -let mut a: Animal = Animal::Dog("Cocoa".to_string(), 37.2); -a = Animal::Cat { name: "Spotty".to_string(), weight: 2.7 }; -``` - -In this example, `Cat` is a _struct-like enum variant_, whereas `Dog` is simply -called an enum variant. Each enum instance has a _discriminant_ which is an -integer associated to it that is used to determine which variant it holds. - -### C-like Enumerations - -If there is no data attached to *any* of the variants of an enumeration it is -called a *c-like enumeration*. If a discriminant isn't specified, they start at -zero, and add one for each variant, in order. Each enum value is just its -discriminant which you can specify explicitly: - -```rust -enum Foo { - Bar, // 0 - Baz = 123, - Quux, // 124 -} -``` - -The right hand side of the specification is interpreted as an `isize` value, -but the compiler is allowed to use a smaller type in the actual memory layout. -The [`repr` attribute] can be added in order to change the type of the right -hand side and specify the memory layout. - -[`repr` attribute]: attributes.html#ffi-attributes - -You can also cast a c-like enum to get its discriminant: - -```rust -# enum Foo { Baz = 123 } -let x = Foo::Baz as u32; // x is now 123u32 -``` - -This only works as long as none of the variants have data attached. If it were -`Baz(i32)`, this is disallowed. - -## Unions - -A union declaration uses the same syntax as a struct declaration, except with -`union` in place of `struct`. - -```rust -#[repr(C)] -union MyUnion { - f1: u32, - f2: f32, -} -``` - -The key property of unions is that all fields of a union share common storage. -As a result writes to one field of a union can overwrite its other fields, and -size of a union is determined by the size of its largest field. - -A value of a union type can be created using the same syntax that is used for -struct types, except that it must specify exactly one field: - -```rust -# union MyUnion { f1: u32, f2: f32 } -# -let u = MyUnion { f1: 1 }; -``` - -The expression above creates a value of type `MyUnion` with active field `f1`. -Active field of a union can be accessed using the same syntax as struct fields: - -```rust,ignore -let f = u.f1; -``` - -Inactive fields can be accessed as well (using the same syntax) if they are -sufficiently layout compatible with the current value kept by the union. -Reading incompatible fields results in undefined behavior. However, the active -field is not generally known statically, so all reads of union fields have to -be placed in `unsafe` blocks. - -```rust -# union MyUnion { f1: u32, f2: f32 } -# let u = MyUnion { f1: 1 }; -# -unsafe { - let f = u.f1; -} -``` - -Writes to `Copy` union fields do not require reads for running destructors, so -these writes don't have to be placed in `unsafe` blocks - -```rust -# union MyUnion { f1: u32, f2: f32 } -# let mut u = MyUnion { f1: 1 }; -# -u.f1 = 2; -``` - -Commonly, code using unions will provide safe wrappers around unsafe union -field accesses. - -Another way to access union fields is to use pattern matching. Pattern matching -on union fields uses the same syntax as struct patterns, except that the -pattern must specify exactly one field. Since pattern matching accesses -potentially inactive fields it has to be placed in `unsafe` blocks as well. - -```rust -# union MyUnion { f1: u32, f2: f32 } -# -fn f(u: MyUnion) { - unsafe { - match u { - MyUnion { f1: 10 } => { println!("ten"); } - MyUnion { f2 } => { println!("{}", f2); } - } - } -} -``` - -Pattern matching may match a union as a field of a larger structure. In -particular, when using a Rust union to implement a C tagged union via FFI, this -allows matching on the tag and the corresponding field simultaneously: - -```rust -#[repr(u32)] -enum Tag { I, F } - -#[repr(C)] -union U { - i: i32, - f: f32, -} - -#[repr(C)] -struct Value { - tag: Tag, - u: U, -} - -fn is_zero(v: Value) -> bool { - unsafe { - match v { - Value { tag: I, u: U { i: 0 } } => true, - Value { tag: F, u: U { f: 0.0 } } => true, - _ => false, - } - } -} -``` - -Since union fields share common storage, gaining write access to one field of a -union can give write access to all its remaining fields. Borrow checking rules -have to be adjusted to account for this fact. As a result, if one field of a -union is borrowed, all its remaining fields are borrowed as well for the same -lifetime. - -```rust,ignore -// ERROR: cannot borrow `u` (via `u.f2`) as mutable more than once at a time -fn test() { - let mut u = MyUnion { f1: 1 }; - unsafe { - let b1 = &mut u.f1; - ---- first mutable borrow occurs here (via `u.f1`) - let b2 = &mut u.f2; - ^^^^ second mutable borrow occurs here (via `u.f2`) - *b1 = 5; - } - - first borrow ends here - assert_eq!(unsafe { u.f1 }, 5); -} -``` - -As you could see, in many aspects (except for layouts, safety and ownership) -unions behave exactly like structs, largely as a consequence of inheriting -their syntactic shape from structs. This is also true for many unmentioned -aspects of Rust language (such as privacy, name resolution, type inference, -generics, trait implementations, inherent implementations, coherence, pattern -checking, etc etc etc). - -More detailed specification for unions, including unstable bits, can be found -in [RFC 1897 "Unions v1.2"](https://github.com/rust-lang/rfcs/pull/1897). - -## Constant items - -A *constant item* is a named _[constant value]_ which is not associated with a -specific memory location in the program. Constants are essentially inlined -wherever they are used, meaning that they are copied directly into the relevant -context when used. References to the same constant are not necessarily -guaranteed to refer to the same memory address. - -[constant value]: expressions.html#constant-expressions - -Constant values must not have destructors, and otherwise permit most forms of -data. Constants may refer to the address of other constants, in which case the -address will have elided lifetimes where applicable, otherwise – in most cases -– defaulting to the `static` lifetime. (See below on [static lifetime -elision].) The compiler is, however, still at liberty to translate the constant -many times, so the address referred to may not be stable. - -[static lifetime elision]: #static-lifetime-elision - -Constants must be explicitly typed. The type may be any type that doesn't -implement [`Drop`] and has a `'static` lifetime: any references it contains -must have `'static` lifetimes. - -[`Drop`]: the-drop-trait.html - -```rust -const BIT1: u32 = 1 << 0; -const BIT2: u32 = 1 << 1; - -const BITS: [u32; 2] = [BIT1, BIT2]; -const STRING: &'static str = "bitstring"; - -struct BitsNStrings<'a> { - mybits: [u32; 2], - mystring: &'a str, -} - -const BITS_N_STRINGS: BitsNStrings<'static> = BitsNStrings { - mybits: BITS, - mystring: STRING, -}; -``` - -## Static items - -A *static item* is similar to a *constant*, except that it represents a precise -memory location in the program. A static is never "inlined" at the usage site, -and all references to it refer to the same memory location. Static items have -the `static` lifetime, which outlives all other lifetimes in a Rust program. -Static items may be placed in read-only memory if they do not contain any -interior mutability. - -Statics may contain interior mutability through the `UnsafeCell` language item. -All access to a static is safe, but there are a number of restrictions on -statics: - -* Statics may not contain any destructors. -* The types of static values must ascribe to `Sync` to allow thread-safe - access. -* Statics may not refer to other statics by value, only by reference. -* Constants cannot refer to statics. - -Constants should in general be preferred over statics, unless large amounts of -data are being stored, or single-address and mutability properties are -required. - -### Mutable statics - -If a static item is declared with the `mut` keyword, then it is allowed to be -modified by the program. One of Rust's goals is to make concurrency bugs hard -to run into, and this is obviously a very large source of race conditions or -other bugs. For this reason, an `unsafe` block is required when either reading -or writing a mutable static variable. Care should be taken to ensure that -modifications to a mutable static are safe with respect to other threads -running in the same process. - -Mutable statics are still very useful, however. They can be used with C -libraries and can also be bound from C libraries (in an `extern` block). - -```rust -# fn atomic_add(_: &mut u32, _: u32) -> u32 { 2 } - -static mut LEVELS: u32 = 0; - -// This violates the idea of no shared state, and this doesn't internally -// protect against races, so this function is `unsafe` -unsafe fn bump_levels_unsafe1() -> u32 { - let ret = LEVELS; - LEVELS += 1; - return ret; -} - -// Assuming that we have an atomic_add function which returns the old value, -// this function is "safe" but the meaning of the return value may not be what -// callers expect, so it's still marked as `unsafe` -unsafe fn bump_levels_unsafe2() -> u32 { - return atomic_add(&mut LEVELS, 1); -} -``` - -Mutable statics have the same restrictions as normal statics, except that the -type of the value is not required to ascribe to `Sync`. - -#### `'static` lifetime elision - -Both constant and static declarations of reference types have *implicit* -`'static` lifetimes unless an explicit lifetime is specified. As such, the -constant declarations involving `'static` above may be written without the -lifetimes. Returning to our previous example: - -```rust -const BIT1: u32 = 1 << 0; -const BIT2: u32 = 1 << 1; - -const BITS: [u32; 2] = [BIT1, BIT2]; -const STRING: &str = "bitstring"; - -struct BitsNStrings<'a> { - mybits: [u32; 2], - mystring: &'a str, -} - -const BITS_N_STRINGS: BitsNStrings = BitsNStrings { - mybits: BITS, - mystring: STRING, -}; -``` - -Note that if the `static` or `const` items include function or closure -references, which themselves include references, the compiler will first try -the standard elision rules ([see discussion in the nomicon][elision-nomicon]). -If it is unable to resolve the lifetimes by its usual rules, it will default to -using the `'static` lifetime. By way of example: - -[elision-nomicon]: ../nomicon/lifetime-elision.html - -```rust,ignore -// Resolved as `fn<'a>(&'a str) -> &'a str`. -const RESOLVED_SINGLE: fn(&str) -> &str = .. - -// Resolved as `Fn<'a, 'b, 'c>(&'a Foo, &'b Bar, &'c Baz) -> usize`. -const RESOLVED_MULTIPLE: Fn(&Foo, &Bar, &Baz) -> usize = .. - -// There is insufficient information to bound the return reference lifetime -// relative to the argument lifetimes, so the signature is resolved as -// `Fn(&'static Foo, &'static Bar) -> &'static Baz`. -const RESOLVED_STATIC: Fn(&Foo, &Bar) -> &Baz = .. -``` - -## Traits - -A _trait_ describes an abstract interface that types can implement. This -interface consists of associated items, which come in three varieties: - -- [functions](#associated-functions-and-methods) -- [types](#associated-types) -- [constants](#associated-constants) - -All traits define an implicit type parameter `Self` that refers to "the type -that is implementing this interface". Traits may also contain additional type -parameters. These type parameters (including `Self`) may be constrained by -other traits and so forth as usual. - -Traits are implemented for specific types through separate [implementations]. - -### Associated functions and methods - -Associated functions whose first parameter is named `self` are called methods -and may be invoked using `.` notation (e.g., `x.foo()`) as well as the usual -function call notation (`foo(x)`). - -Consider the following trait: - -```rust -# type Surface = i32; -# type BoundingBox = i32; -trait Shape { - fn draw(&self, Surface); - fn bounding_box(&self) -> BoundingBox; -} -``` - -This defines a trait with two methods. All values that have [implementations] -of this trait in scope can have their `draw` and `bounding_box` methods called, -using `value.bounding_box()` [syntax]. Note that `&self` is short for `self: -&Self`, and similarly, `self` is short for `self: Self` and `&mut self` is -short for `self: &mut Self`. - -[trait object]: types.html#trait-objects -[implementations]: #implementations -[syntax]: expressions.html#method-call-expressions - -Traits can include default implementations of methods, as in: - -```rust -trait Foo { - fn bar(&self); - fn baz(&self) { println!("We called baz."); } -} -``` - -Here the `baz` method has a default implementation, so types that implement -`Foo` need only implement `bar`. It is also possible for implementing types to -override a method that has a default implementation. - -Type parameters can be specified for a trait to make it generic. These appear -after the trait name, using the same syntax used in [generic -functions](#generic-functions). - -```rust -trait Seq { - fn len(&self) -> u32; - fn elt_at(&self, n: u32) -> T; - fn iter(&self, F) where F: Fn(T); -} -``` - -Associated functions may lack a `self` argument, sometimes called 'static -methods'. This means that they can only be called with function call syntax -(`f(x)`) and not method call syntax (`obj.f()`). The way to refer to the name -of a static method is to qualify it with the trait name or type name, treating -the trait name like a module. For example: - -```rust -trait Num { - fn from_i32(n: i32) -> Self; -} -impl Num for f64 { - fn from_i32(n: i32) -> f64 { n as f64 } -} -let x: f64 = Num::from_i32(42); -let x: f64 = f64::from_i32(42); -``` - - -### Associated Types - -It is also possible to define associated types for a trait. Consider the -following example of a `Container` trait. Notice how the type is available for -use in the method signatures: - -```rust -trait Container { - type E; - fn empty() -> Self; - fn insert(&mut self, Self::E); -} -``` - -In order for a type to implement this trait, it must not only provide -implementations for every method, but it must specify the type `E`. Here's an -implementation of `Container` for the standard library type `Vec`: - -```rust -# trait Container { -# type E; -# fn empty() -> Self; -# fn insert(&mut self, Self::E); -# } -impl Container for Vec { - type E = T; - fn empty() -> Vec { Vec::new() } - fn insert(&mut self, x: T) { self.push(x); } -} -``` - -### Associated Constants - -A trait can define constants like this: - -```rust -trait Foo { - const ID: i32; -} - -impl Foo for i32 { - const ID: i32 = 1; -} - -fn main() { - assert_eq!(1, i32::ID); -} -``` - -Any implementor of `Foo` will have to define `ID`. Without the definition: - -```rust,compile_fail,E0046 -trait Foo { - const ID: i32; -} - -impl Foo for i32 { -} -``` - -gives - -```text -error: not all trait items implemented, missing: `ID` [E0046] - impl Foo for i32 { - } -``` - -A default value can be implemented as well: - -```rust -trait Foo { - const ID: i32 = 1; -} - -impl Foo for i32 { -} - -impl Foo for i64 { - const ID: i32 = 5; -} - -fn main() { - assert_eq!(1, i32::ID); - assert_eq!(5, i64::ID); -} -``` - -As you can see, when implementing `Foo`, you can leave it unimplemented, as -with `i32`. It will then use the default value. But, as in `i64`, we can also -add our own definition. - -Associated constants don’t have to be associated with a trait. An `impl` block -for a `struct` or an `enum` works fine too: - -```rust -struct Foo; - -impl Foo { - const FOO: u32 = 3; -} -``` - -### Trait bounds - -Generic functions may use traits as _bounds_ on their type parameters. This -will have three effects: - -- Only types that have the trait may instantiate the parameter. -- Within the generic function, the methods of the trait can be called on values - that have the parameter's type. Associated types can be used in the - function's signature, and associated constants can be used in expressions - within the function body. -- Generic functions and types with the same or weaker bounds can use the - generic type in the function body or signature. - -For example: - -```rust -# type Surface = i32; -# trait Shape { fn draw(&self, Surface); } -struct Figure(S, S); -fn draw_twice(surface: Surface, sh: T) { - sh.draw(surface); - sh.draw(surface); -} -fn draw_figure(surface: Surface, Figure(sh1, sh2): Figure) { - sh1.draw(surface); - draw_twice(surface, sh2); // Can call this since U: Shape -} -``` - -### Trait objects - -Traits also define a [trait object] with the same name as the trait. Values of -this type are created by coercing from a pointer of some specific type to a -pointer of trait type. For example, `&T` could be coerced to `&Shape` if `T: -Shape` holds (and similarly for `Box`). This coercion can either be implicit -or [explicit]. Here is an example of an explicit coercion: - -[trait object]: types.html#trait-objects -[explicit]: expressions.html#type-cast-expressions - -```rust -trait Shape { } -impl Shape for i32 { } -let mycircle = 0i32; -let myshape: Box = Box::new(mycircle) as Box; -``` - -The resulting value is a box containing the value that was cast, along with -information that identifies the methods of the implementation that was used. -Values with a trait type can have [methods called] on them, for any method in -the trait, and can be used to instantiate type parameters that are bounded by -the trait. - -[methods called]: expressions.html#method-call-expressions - -### Supertraits - - -Trait bounds on `Self` are considered "supertraits". These are required to be -acyclic. Supertraits are somewhat different from other constraints in that -they affect what methods are available in the vtable when the trait is used as -a [trait object]. Consider the following example: - -```rust -trait Shape { fn area(&self) -> f64; } -trait Circle : Shape { fn radius(&self) -> f64; } -``` - -The syntax `Circle : Shape` means that types that implement `Circle` must also -have an implementation for `Shape`. Multiple supertraits are separated by `+`, -`trait Circle : Shape + PartialEq { }`. In an implementation of `Circle` for a -given type `T`, methods can refer to `Shape` methods, since the typechecker -checks that any type with an implementation of `Circle` also has an -implementation of `Shape`: - -```rust -struct Foo; - -trait Shape { fn area(&self) -> f64; } -trait Circle : Shape { fn radius(&self) -> f64; } -impl Shape for Foo { - fn area(&self) -> f64 { - 0.0 - } -} -impl Circle for Foo { - fn radius(&self) -> f64 { - println!("calling area: {}", self.area()); - - 0.0 - } -} - -let c = Foo; -c.radius(); -``` - -In type-parameterized functions, methods of the supertrait may be called on -values of subtrait-bound type parameters. Referring to the previous example of -`trait Circle : Shape`: - -```rust -# trait Shape { fn area(&self) -> f64; } -# trait Circle : Shape { fn radius(&self) -> f64; } -fn radius_times_area(c: T) -> f64 { - // `c` is both a Circle and a Shape - c.radius() * c.area() -} -``` - -Likewise, supertrait methods may also be called on trait objects. - -```rust -# trait Shape { fn area(&self) -> f64; } -# trait Circle : Shape { fn radius(&self) -> f64; } -# impl Shape for i32 { fn area(&self) -> f64 { 0.0 } } -# impl Circle for i32 { fn radius(&self) -> f64 { 0.0 } } -# let mycircle = 0i32; -let mycircle = Box::new(mycircle) as Box; -let nonsense = mycircle.radius() * mycircle.area(); -``` - -## Implementations - -An _implementation_ is an item that can implement a [trait](#traits) for a -specific type. - -Implementations are defined with the keyword `impl`. - -```rust -# #[derive(Copy, Clone)] -# struct Point {x: f64, y: f64}; -# type Surface = i32; -# struct BoundingBox {x: f64, y: f64, width: f64, height: f64}; -# trait Shape { fn draw(&self, Surface); fn bounding_box(&self) -> BoundingBox; } -# fn do_draw_circle(s: Surface, c: Circle) { } -struct Circle { - radius: f64, - center: Point, -} - -impl Copy for Circle {} - -impl Clone for Circle { - fn clone(&self) -> Circle { *self } -} - -impl Shape for Circle { - fn draw(&self, s: Surface) { do_draw_circle(s, *self); } - fn bounding_box(&self) -> BoundingBox { - let r = self.radius; - BoundingBox { - x: self.center.x - r, - y: self.center.y - r, - width: 2.0 * r, - height: 2.0 * r, - } - } -} -``` - -It is possible to define an implementation without referring to a trait. The -methods in such an implementation can only be used as direct calls on the -values of the type that the implementation targets. In such an implementation, -the trait type and `for` after `impl` are omitted. Such implementations are -limited to nominal types (enums, structs, unions, trait objects), and the -implementation must appear in the same crate as the `Self` type: - -```rust -struct Point {x: i32, y: i32} - -impl Point { - fn log(&self) { - println!("Point is at ({}, {})", self.x, self.y); - } -} - -let my_point = Point {x: 10, y:11}; -my_point.log(); -``` - -When a trait _is_ specified in an `impl`, all methods declared as part of the -trait must be implemented, with matching types and type parameter counts. - -An implementation can take type and lifetime parameters, which can be used in -the rest of the implementation. Type parameters declared for an implementation -must be used at least once in either the trait or the type of an -implementation. Implementation parameters are written after the `impl` keyword. - -```rust -# trait Seq { fn dummy(&self, _: T) { } } -impl Seq for Vec { - /* ... */ -} -impl Seq for u32 { - /* Treat the integer as a sequence of bits */ -} -``` - -## External blocks - -External blocks form the basis for Rust's foreign function interface. -Declarations in an external block describe symbols in external, non-Rust -libraries. - -Functions within external blocks are declared in the same way as other Rust -functions, with the exception that they may not have a body and are instead -terminated by a semicolon. - -Functions within external blocks may be called by Rust code, just like -functions defined in Rust. The Rust compiler automatically translates between -the Rust ABI and the foreign ABI. - -Functions within external blocks may be variadic by specifying `...` after one -or more named arguments in the argument list: - -```rust,ignore -extern { - fn foo(x: i32, ...); -} -``` - -A number of [attributes] control the behavior of external blocks. - -[attributes]: attributes.html#ffi-attributes - -By default external blocks assume that the library they are calling uses the -standard C ABI on the specific platform. Other ABIs may be specified using an -`abi` string, as shown here: - -```rust,ignore -// Interface to the Windows API -extern "stdcall" { } -``` - -There are three ABI strings which are cross-platform, and which all compilers -are guaranteed to support: - -* `extern "Rust"` -- The default ABI when you write a normal `fn foo()` in any - Rust code. -* `extern "C"` -- This is the same as `extern fn foo()`; whatever the default - your C compiler supports. -* `extern "system"` -- Usually the same as `extern "C"`, except on Win32, in - which case it's `"stdcall"`, or what you should use to link to the Windows - API itself - -There are also some platform-specific ABI strings: - -* `extern "cdecl"` -- The default for x86\_32 C code. -* `extern "stdcall"` -- The default for the Win32 API on x86\_32. -* `extern "win64"` -- The default for C code on x86\_64 Windows. -* `extern "sysv64"` -- The default for C code on non-Windows x86\_64. -* `extern "aapcs"` -- The default for ARM. -* `extern "fastcall"` -- The `fastcall` ABI -- corresponds to MSVC's - `__fastcall` and GCC and clang's `__attribute__((fastcall))` -* `extern "vectorcall"` -- The `vectorcall` ABI -- corresponds to MSVC's - `__vectorcall` and clang's `__attribute__((vectorcall))` - -Finally, there are some rustc-specific ABI strings: - -* `extern "rust-intrinsic"` -- The ABI of rustc intrinsics. -* `extern "rust-call"` -- The ABI of the Fn::call trait functions. -* `extern "platform-intrinsic"` -- Specific platform intrinsics -- like, for - example, `sqrt` -- have this ABI. You should never have to deal with it. - -The `link` attribute allows the name of the library to be specified. When -specified the compiler will attempt to link against the native library of the -specified name. - -```rust,ignore -#[link(name = "crypto")] -extern { } -``` - -The type of a function declared in an extern block is `extern "abi" fn(A1, ..., -An) -> R`, where `A1...An` are the declared types of its arguments and `R` is -the declared return type. - -It is valid to add the `link` attribute on an empty extern block. You can use -this to satisfy the linking requirements of extern blocks elsewhere in your -code (including upstream crates) instead of adding the attribute to each extern -block. diff --git a/src/doc/reference/src/items/constant-items.md b/src/doc/reference/src/items/constant-items.md new file mode 100644 index 0000000000..182c7e0fb5 --- /dev/null +++ b/src/doc/reference/src/items/constant-items.md @@ -0,0 +1,42 @@ +# Constant items + +A *constant item* is a named _[constant value]_ which is not associated with a +specific memory location in the program. Constants are essentially inlined +wherever they are used, meaning that they are copied directly into the relevant +context when used. References to the same constant are not necessarily +guaranteed to refer to the same memory address. + +[constant value]: expressions.html#constant-expressions + +Constant values must not have destructors, and otherwise permit most forms of +data. Constants may refer to the address of other constants, in which case the +address will have elided lifetimes where applicable, otherwise – in most cases +– defaulting to the `static` lifetime. (See below on [static lifetime +elision].) The compiler is, however, still at liberty to translate the constant +many times, so the address referred to may not be stable. + +[static lifetime elision]: items/static-items.html#static-lifetime-elision + +Constants must be explicitly typed. The type may be any type that doesn't +implement [`Drop`] and has a `'static` lifetime: any references it contains +must have `'static` lifetimes. + +[`Drop`]: the-drop-trait.html + +```rust +const BIT1: u32 = 1 << 0; +const BIT2: u32 = 1 << 1; + +const BITS: [u32; 2] = [BIT1, BIT2]; +const STRING: &'static str = "bitstring"; + +struct BitsNStrings<'a> { + mybits: [u32; 2], + mystring: &'a str, +} + +const BITS_N_STRINGS: BitsNStrings<'static> = BitsNStrings { + mybits: BITS, + mystring: STRING, +}; +``` diff --git a/src/doc/reference/src/items/enumerations.md b/src/doc/reference/src/items/enumerations.md new file mode 100644 index 0000000000..81b92ec96c --- /dev/null +++ b/src/doc/reference/src/items/enumerations.md @@ -0,0 +1,69 @@ +# Enumerations + +An _enumeration_ is a simultaneous definition of a nominal [enumerated type] as +well as a set of *constructors*, that can be used to create or pattern-match +values of the corresponding enumerated type. + +[enumerated type]: types.html#enumerated-types + +Enumerations are declared with the keyword `enum`. + +An example of an `enum` item and its use: + +```rust +enum Animal { + Dog, + Cat, +} + +let mut a: Animal = Animal::Dog; +a = Animal::Cat; +``` + +Enumeration constructors can have either named or unnamed fields: + +```rust +enum Animal { + Dog (String, f64), + Cat { name: String, weight: f64 }, +} + +let mut a: Animal = Animal::Dog("Cocoa".to_string(), 37.2); +a = Animal::Cat { name: "Spotty".to_string(), weight: 2.7 }; +``` + +In this example, `Cat` is a _struct-like enum variant_, whereas `Dog` is simply +called an enum variant. Each enum instance has a _discriminant_ which is an +integer associated to it that is used to determine which variant it holds. + +## C-like Enumerations + +If there is no data attached to *any* of the variants of an enumeration it is +called a *c-like enumeration*. If a discriminant isn't specified, they start at +zero, and add one for each variant, in order. Each enum value is just its +discriminant which you can specify explicitly: + +```rust +enum Foo { + Bar, // 0 + Baz = 123, + Quux, // 124 +} +``` + +The right hand side of the specification is interpreted as an `isize` value, +but the compiler is allowed to use a smaller type in the actual memory layout. +The [`repr` attribute] can be added in order to change the type of the right +hand side and specify the memory layout. + +[`repr` attribute]: attributes.html#ffi-attributes + +You can also cast a c-like enum to get its discriminant: + +```rust +# enum Foo { Baz = 123 } +let x = Foo::Baz as u32; // x is now 123u32 +``` + +This only works as long as none of the variants have data attached. If it were +`Baz(i32)`, this is disallowed. diff --git a/src/doc/reference/src/items/extern-crates.md b/src/doc/reference/src/items/extern-crates.md new file mode 100644 index 0000000000..e12e37495c --- /dev/null +++ b/src/doc/reference/src/items/extern-crates.md @@ -0,0 +1,37 @@ +# Extern crate declarations + +An _`extern crate` declaration_ specifies a dependency on an external crate. +The external crate is then bound into the declaring scope as the `ident` +provided in the `extern_crate_decl`. + +The external crate is resolved to a specific `soname` at compile time, and a +runtime linkage requirement to that `soname` is passed to the linker for +loading at runtime. The `soname` is resolved at compile time by scanning the +compiler's library path and matching the optional `crateid` provided against +the `crateid` attributes that were declared on the external crate when it was +compiled. If no `crateid` is provided, a default `name` attribute is assumed, +equal to the `ident` given in the `extern_crate_decl`. + +Three examples of `extern crate` declarations: + +```rust,ignore +extern crate pcre; + +extern crate std; // equivalent to: extern crate std as std; + +extern crate std as ruststd; // linking to 'std' under another name +``` + +When naming Rust crates, hyphens are disallowed. However, Cargo packages may +make use of them. In such case, when `Cargo.toml` doesn't specify a crate name, +Cargo will transparently replace `-` with `_` (Refer to [RFC 940] for more +details). + +Here is an example: + +```rust,ignore +// Importing the Cargo package hello-world +extern crate hello_world; // hyphen replaced with an underscore +``` + +[RFC 940]: https://github.com/rust-lang/rfcs/blob/master/text/0940-hyphens-considered-harmful.md diff --git a/src/doc/reference/src/items/external-blocks.md b/src/doc/reference/src/items/external-blocks.md new file mode 100644 index 0000000000..edbe3be89f --- /dev/null +++ b/src/doc/reference/src/items/external-blocks.md @@ -0,0 +1,83 @@ +# External blocks + +External blocks form the basis for Rust's foreign function interface. +Declarations in an external block describe symbols in external, non-Rust +libraries. + +Functions within external blocks are declared in the same way as other Rust +functions, with the exception that they may not have a body and are instead +terminated by a semicolon. + +Functions within external blocks may be called by Rust code, just like +functions defined in Rust. The Rust compiler automatically translates between +the Rust ABI and the foreign ABI. + +Functions within external blocks may be variadic by specifying `...` after one +or more named arguments in the argument list: + +```rust,ignore +extern { + fn foo(x: i32, ...); +} +``` + +A number of [attributes] control the behavior of external blocks. + +[attributes]: attributes.html#ffi-attributes + +By default external blocks assume that the library they are calling uses the +standard C ABI on the specific platform. Other ABIs may be specified using an +`abi` string, as shown here: + +```rust,ignore +// Interface to the Windows API +extern "stdcall" { } +``` + +There are three ABI strings which are cross-platform, and which all compilers +are guaranteed to support: + +* `extern "Rust"` -- The default ABI when you write a normal `fn foo()` in any + Rust code. +* `extern "C"` -- This is the same as `extern fn foo()`; whatever the default + your C compiler supports. +* `extern "system"` -- Usually the same as `extern "C"`, except on Win32, in + which case it's `"stdcall"`, or what you should use to link to the Windows + API itself + +There are also some platform-specific ABI strings: + +* `extern "cdecl"` -- The default for x86\_32 C code. +* `extern "stdcall"` -- The default for the Win32 API on x86\_32. +* `extern "win64"` -- The default for C code on x86\_64 Windows. +* `extern "sysv64"` -- The default for C code on non-Windows x86\_64. +* `extern "aapcs"` -- The default for ARM. +* `extern "fastcall"` -- The `fastcall` ABI -- corresponds to MSVC's + `__fastcall` and GCC and clang's `__attribute__((fastcall))` +* `extern "vectorcall"` -- The `vectorcall` ABI -- corresponds to MSVC's + `__vectorcall` and clang's `__attribute__((vectorcall))` + +Finally, there are some rustc-specific ABI strings: + +* `extern "rust-intrinsic"` -- The ABI of rustc intrinsics. +* `extern "rust-call"` -- The ABI of the Fn::call trait functions. +* `extern "platform-intrinsic"` -- Specific platform intrinsics -- like, for + example, `sqrt` -- have this ABI. You should never have to deal with it. + +The `link` attribute allows the name of the library to be specified. When +specified the compiler will attempt to link against the native library of the +specified name. + +```rust,ignore +#[link(name = "crypto")] +extern { } +``` + +The type of a function declared in an extern block is `extern "abi" fn(A1, ..., +An) -> R`, where `A1...An` are the declared types of its arguments and `R` is +the declared return type. + +It is valid to add the `link` attribute on an empty extern block. You can use +this to satisfy the linking requirements of extern blocks elsewhere in your +code (including upstream crates) instead of adding the attribute to each extern +block. diff --git a/src/doc/reference/src/items/functions.md b/src/doc/reference/src/items/functions.md new file mode 100644 index 0000000000..7572ba331c --- /dev/null +++ b/src/doc/reference/src/items/functions.md @@ -0,0 +1,167 @@ +# Functions + +A _function_ consists of a [block], along with a name and a set of parameters. +Other than a name, all these are optional. Functions are declared with the +keyword `fn`. Functions may declare a set of *input* [*variables*][variables] +as parameters, through which the caller passes arguments into the function, and +the *output* [*type*][type] of the value the function will return to its caller +on completion. + +[block]: expressions/block-expr.html +[variables]: variables.html +[type]: types.html + +When referred to, a _function_ yields a first-class *value* of the +corresponding zero-sized [*function item type*][function item type], which +when called evaluates to a direct call to the function. + +[function item type]: types.html#function-item-types + +For example, this is a simple function: +```rust +fn answer_to_life_the_universe_and_everything() -> i32 { + return 42; +} +``` + +As with `let` bindings, function arguments are irrefutable patterns, so any +pattern that is valid in a let binding is also valid as an argument: + +```rust +fn first((value, _): (i32, i32)) -> i32 { value } +``` + +The block of a function is conceptually wrapped in a block that binds the +argument patterns and then `return`s the value of the function's block. This +means that the tail expression of the block, if evaluated, ends up being +returned to the caller. As usual, an explicit return expression within +the body of the function will short-cut that implicit return, if reached. + +For example, the function above behaves as if it was written as: + +```rust,ignore +// argument_0 is the actual first argument passed from the caller +let (value, _) = argument_0; +return { + value +}; +``` + +## Generic functions + +A _generic function_ allows one or more _parameterized types_ to appear in its +signature. Each type parameter must be explicitly declared in an +angle-bracket-enclosed and comma-separated list, following the function name. + +```rust +// foo is generic over A and B + +fn foo(x: A, y: B) { +# } +``` + +Inside the function signature and body, the name of the type parameter can be +used as a type name. [Trait](items/traits.html) bounds can be specified for type +parameters to allow methods with that trait to be called on values of that +type. This is specified using the `where` syntax: + +```rust +# use std::fmt::Debug; +fn foo(x: T) where T: Debug { +# } +``` + +When a generic function is referenced, its type is instantiated based on the +context of the reference. For example, calling the `foo` function here: + +```rust +use std::fmt::Debug; + +fn foo(x: &[T]) where T: Debug { + // details elided +} + +foo(&[1, 2]); +``` + +will instantiate type parameter `T` with `i32`. + +The type parameters can also be explicitly supplied in a trailing [path] +component after the function name. This might be necessary if there is not +sufficient context to determine the type parameters. For example, +`mem::size_of::() == 4`. + +[path]: paths.html + +## Diverging functions + +A special kind of function can be declared with a `!` character where the +output type would normally be. For example: + +```rust +fn my_err(s: &str) -> ! { + println!("{}", s); + panic!(); +} +``` + +We call such functions "diverging" because they never return a value to the +caller. Every control path in a diverging function must end with a `panic!()`, +a loop expression without an associated break expression, or a call to another +diverging function on every control path. The `!` annotation does *not* denote +a type. + +It might be necessary to declare a diverging function because as mentioned +previously, the typechecker checks that every control path in a function ends +with a [`return`] or diverging expression. So, if `my_err` were declared +without the `!` annotation, the following code would not typecheck: + +[`return`]: expressions/return-expr.html + +```rust +# fn my_err(s: &str) -> ! { panic!() } + +fn f(i: i32) -> i32 { + if i == 42 { + return 42; + } + else { + my_err("Bad number!"); + } +} +``` + +This will not compile without the `!` annotation on `my_err`, since the `else` +branch of the conditional in `f` does not return an `i32`, as required by the +signature of `f`. Adding the `!` annotation to `my_err` informs the typechecker +that, should control ever enter `my_err`, no further type judgments about `f` +need to hold, since control will never resume in any context that relies on +those judgments. Thus the return type on `f` only needs to reflect the `if` +branch of the conditional. + +## Extern functions + +Extern functions are part of Rust's foreign function interface, providing the +opposite functionality to [external blocks](items/external-blocks.html). Whereas external +blocks allow Rust code to call foreign code, extern functions with bodies +defined in Rust code _can be called by foreign code_. They are defined in the +same way as any other Rust function, except that they have the `extern` +modifier. + +```rust +// Declares an extern fn, the ABI defaults to "C" +extern fn new_i32() -> i32 { 0 } + +// Declares an extern fn with "stdcall" ABI +# #[cfg(target_arch = "x86_64")] +extern "stdcall" fn new_i32_stdcall() -> i32 { 0 } +``` + +Unlike normal functions, extern fns have type `extern "ABI" fn()`. This is the +same type as the functions declared in an extern block. + +```rust +# extern fn new_i32() -> i32 { 0 } +let fptr: extern "C" fn() -> i32 = new_i32; +``` + diff --git a/src/doc/reference/src/items/implementations.md b/src/doc/reference/src/items/implementations.md new file mode 100644 index 0000000000..51330570c0 --- /dev/null +++ b/src/doc/reference/src/items/implementations.md @@ -0,0 +1,76 @@ +# Implementations + +An _implementation_ is an item that can implement a [trait](items/traits.html) for a +specific type. + +Implementations are defined with the keyword `impl`. + +```rust +# #[derive(Copy, Clone)] +# struct Point {x: f64, y: f64}; +# type Surface = i32; +# struct BoundingBox {x: f64, y: f64, width: f64, height: f64}; +# trait Shape { fn draw(&self, Surface); fn bounding_box(&self) -> BoundingBox; } +# fn do_draw_circle(s: Surface, c: Circle) { } +struct Circle { + radius: f64, + center: Point, +} + +impl Copy for Circle {} + +impl Clone for Circle { + fn clone(&self) -> Circle { *self } +} + +impl Shape for Circle { + fn draw(&self, s: Surface) { do_draw_circle(s, *self); } + fn bounding_box(&self) -> BoundingBox { + let r = self.radius; + BoundingBox { + x: self.center.x - r, + y: self.center.y - r, + width: 2.0 * r, + height: 2.0 * r, + } + } +} +``` + +It is possible to define an implementation without referring to a trait. The +methods in such an implementation can only be used as direct calls on the +values of the type that the implementation targets. In such an implementation, +the trait type and `for` after `impl` are omitted. Such implementations are +limited to nominal types (enums, structs, unions, trait objects), and the +implementation must appear in the same crate as the `Self` type: + +```rust +struct Point {x: i32, y: i32} + +impl Point { + fn log(&self) { + println!("Point is at ({}, {})", self.x, self.y); + } +} + +let my_point = Point {x: 10, y:11}; +my_point.log(); +``` + +When a trait _is_ specified in an `impl`, all methods declared as part of the +trait must be implemented, with matching types and type parameter counts. + +An implementation can take type and lifetime parameters, which can be used in +the rest of the implementation. Type parameters declared for an implementation +must be used at least once in either the trait or the type of an +implementation. Implementation parameters are written after the `impl` keyword. + +```rust +# trait Seq { fn dummy(&self, _: T) { } } +impl Seq for Vec { + /* ... */ +} +impl Seq for u32 { + /* Treat the integer as a sequence of bits */ +} +``` diff --git a/src/doc/reference/src/items/modules.md b/src/doc/reference/src/items/modules.md new file mode 100644 index 0000000000..4ab8ea1287 --- /dev/null +++ b/src/doc/reference/src/items/modules.md @@ -0,0 +1,63 @@ +# Modules + +A module is a container for zero or more [items]. + +[items]: items.html + +A _module item_ is a module, surrounded in braces, named, and prefixed with the +keyword `mod`. A module item introduces a new, named module into the tree of +modules making up a crate. Modules can nest arbitrarily. + +An example of a module: + +```rust +mod math { + type Complex = (f64, f64); + fn sin(f: f64) -> f64 { + /* ... */ +# panic!(); + } + fn cos(f: f64) -> f64 { + /* ... */ +# panic!(); + } + fn tan(f: f64) -> f64 { + /* ... */ +# panic!(); + } +} +``` + +Modules and types share the same namespace. Declaring a named type with the +same name as a module in scope is forbidden: that is, a type definition, trait, +struct, enumeration, union, type parameter or crate can't shadow the name of a +module in scope, or vice versa. Items brought into scope with `use` also have +this restriction. + +A module without a body is loaded from an external file, by default with the +same name as the module, plus the `.rs` extension. When a nested submodule is +loaded from an external file, it is loaded from a subdirectory path that +mirrors the module hierarchy. + +```rust,ignore +// Load the `vec` module from `vec.rs` +mod vec; + +mod thread { + // Load the `local_data` module from `thread/local_data.rs` + // or `thread/local_data/mod.rs`. + mod local_data; +} +``` + +The directories and files used for loading external file modules can be +influenced with the `path` attribute. + +```rust,ignore +#[path = "thread_files"] +mod thread { + // Load the `local_data` module from `thread_files/tls.rs` + #[path = "tls.rs"] + mod local_data; +} +``` diff --git a/src/doc/reference/src/items/static-items.md b/src/doc/reference/src/items/static-items.md new file mode 100644 index 0000000000..bac030477f --- /dev/null +++ b/src/doc/reference/src/items/static-items.md @@ -0,0 +1,105 @@ +# Static items + +A *static item* is similar to a *constant*, except that it represents a precise +memory location in the program. A static is never "inlined" at the usage site, +and all references to it refer to the same memory location. Static items have +the `static` lifetime, which outlives all other lifetimes in a Rust program. +Static items may be placed in read-only memory if they do not contain any +interior mutability. + +Statics may contain interior mutability through the `UnsafeCell` language item. +All access to a static is safe, but there are a number of restrictions on +statics: + +* Statics may not contain any destructors. +* The types of static values must ascribe to `Sync` to allow thread-safe + access. +* Statics may not refer to other statics by value, only by reference. +* Constants cannot refer to statics. + +Constants should in general be preferred over statics, unless large amounts of +data are being stored, or single-address and mutability properties are +required. + +## Mutable statics + +If a static item is declared with the `mut` keyword, then it is allowed to be +modified by the program. One of Rust's goals is to make concurrency bugs hard +to run into, and this is obviously a very large source of race conditions or +other bugs. For this reason, an `unsafe` block is required when either reading +or writing a mutable static variable. Care should be taken to ensure that +modifications to a mutable static are safe with respect to other threads +running in the same process. + +Mutable statics are still very useful, however. They can be used with C +libraries and can also be bound from C libraries (in an `extern` block). + +```rust +# fn atomic_add(_: &mut u32, _: u32) -> u32 { 2 } + +static mut LEVELS: u32 = 0; + +// This violates the idea of no shared state, and this doesn't internally +// protect against races, so this function is `unsafe` +unsafe fn bump_levels_unsafe1() -> u32 { + let ret = LEVELS; + LEVELS += 1; + return ret; +} + +// Assuming that we have an atomic_add function which returns the old value, +// this function is "safe" but the meaning of the return value may not be what +// callers expect, so it's still marked as `unsafe` +unsafe fn bump_levels_unsafe2() -> u32 { + return atomic_add(&mut LEVELS, 1); +} +``` + +Mutable statics have the same restrictions as normal statics, except that the +type of the value is not required to ascribe to `Sync`. + +## `'static` lifetime elision + +Both constant and static declarations of reference types have *implicit* +`'static` lifetimes unless an explicit lifetime is specified. As such, the +constant declarations involving `'static` above may be written without the +lifetimes. Returning to our previous example: + +```rust +const BIT1: u32 = 1 << 0; +const BIT2: u32 = 1 << 1; + +const BITS: [u32; 2] = [BIT1, BIT2]; +const STRING: &str = "bitstring"; + +struct BitsNStrings<'a> { + mybits: [u32; 2], + mystring: &'a str, +} + +const BITS_N_STRINGS: BitsNStrings = BitsNStrings { + mybits: BITS, + mystring: STRING, +}; +``` + +Note that if the `static` or `const` items include function or closure +references, which themselves include references, the compiler will first try +the standard elision rules ([see discussion in the nomicon][elision-nomicon]). +If it is unable to resolve the lifetimes by its usual rules, it will default to +using the `'static` lifetime. By way of example: + +[elision-nomicon]: ../nomicon/lifetime-elision.html + +```rust,ignore +// Resolved as `fn<'a>(&'a str) -> &'a str`. +const RESOLVED_SINGLE: fn(&str) -> &str = .. + +// Resolved as `Fn<'a, 'b, 'c>(&'a Foo, &'b Bar, &'c Baz) -> usize`. +const RESOLVED_MULTIPLE: Fn(&Foo, &Bar, &Baz) -> usize = .. + +// There is insufficient information to bound the return reference lifetime +// relative to the argument lifetimes, so the signature is resolved as +// `Fn(&'static Foo, &'static Bar) -> &'static Baz`. +const RESOLVED_STATIC: Fn(&Foo, &Bar) -> &Baz = .. +``` diff --git a/src/doc/reference/src/items/structs.md b/src/doc/reference/src/items/structs.md new file mode 100644 index 0000000000..1a863411a8 --- /dev/null +++ b/src/doc/reference/src/items/structs.md @@ -0,0 +1,45 @@ +# Structs + +A _struct_ is a nominal [struct type] defined with the keyword `struct`. + +An example of a `struct` item and its use: + +```rust +struct Point {x: i32, y: i32} +let p = Point {x: 10, y: 11}; +let px: i32 = p.x; +``` + +A _tuple struct_ is a nominal [tuple type], also defined with the keyword +`struct`. For example: + +[struct type]: types.html#struct-types +[tuple type]: types.html#tuple-types + +```rust +struct Point(i32, i32); +let p = Point(10, 11); +let px: i32 = match p { Point(x, _) => x }; +``` + +A _unit-like struct_ is a struct without any fields, defined by leaving off the +list of fields entirely. Such a struct implicitly defines a constant of its +type with the same name. For example: + +```rust +struct Cookie; +let c = [Cookie, Cookie {}, Cookie, Cookie {}]; +``` + +is equivalent to + +```rust +struct Cookie {} +const Cookie: Cookie = Cookie {}; +let c = [Cookie, Cookie {}, Cookie, Cookie {}]; +``` + +The precise memory layout of a struct is not specified. One can specify a +particular layout using the [`repr` attribute]. + +[`repr` attribute]: attributes.html#ffi-attributes diff --git a/src/doc/reference/src/items/traits.md b/src/doc/reference/src/items/traits.md new file mode 100644 index 0000000000..4819401674 --- /dev/null +++ b/src/doc/reference/src/items/traits.md @@ -0,0 +1,308 @@ +# Traits + +A _trait_ describes an abstract interface that types can implement. This +interface consists of associated items, which come in three varieties: + +- [functions](#associated-functions-and-methods) +- [types](#associated-types) +- [constants](#associated-constants) + +All traits define an implicit type parameter `Self` that refers to "the type +that is implementing this interface". Traits may also contain additional type +parameters. These type parameters (including `Self`) may be constrained by +other traits and so forth as usual. + +Traits are implemented for specific types through separate [implementations]. + +## Associated functions and methods + +Associated functions whose first parameter is named `self` are called methods +and may be invoked using `.` notation (e.g., `x.foo()`) as well as the usual +function call notation (`foo(x)`). + +Consider the following trait: + +```rust +# type Surface = i32; +# type BoundingBox = i32; +trait Shape { + fn draw(&self, Surface); + fn bounding_box(&self) -> BoundingBox; +} +``` + +This defines a trait with two methods. All values that have [implementations] +of this trait in scope can have their `draw` and `bounding_box` methods called, +using `value.bounding_box()` [syntax]. Note that `&self` is short for `self: +&Self`, and similarly, `self` is short for `self: Self` and `&mut self` is +short for `self: &mut Self`. + +[trait object]: types.html#trait-objects +[implementations]: items/implementations.html +[syntax]: expressions/method-call-expr.html + +Traits can include default implementations of methods, as in: + +```rust +trait Foo { + fn bar(&self); + fn baz(&self) { println!("We called baz."); } +} +``` + +Here the `baz` method has a default implementation, so types that implement +`Foo` need only implement `bar`. It is also possible for implementing types to +override a method that has a default implementation. + +Type parameters can be specified for a trait to make it generic. These appear +after the trait name, using the same syntax used in [generic +functions](items/functions.html#generic-functions). + +```rust +trait Seq { + fn len(&self) -> u32; + fn elt_at(&self, n: u32) -> T; + fn iter(&self, F) where F: Fn(T); +} +``` + +Associated functions may lack a `self` argument, sometimes called 'static +methods'. This means that they can only be called with function call syntax +(`f(x)`) and not method call syntax (`obj.f()`). The way to refer to the name +of a static method is to qualify it with the trait name or type name, treating +the trait name like a module. For example: + +```rust +trait Num { + fn from_i32(n: i32) -> Self; +} +impl Num for f64 { + fn from_i32(n: i32) -> f64 { n as f64 } +} +let x: f64 = Num::from_i32(42); +let x: f64 = f64::from_i32(42); +``` + +## Associated Types + +It is also possible to define associated types for a trait. Consider the +following example of a `Container` trait. Notice how the type is available for +use in the method signatures: + +```rust +trait Container { + type E; + fn empty() -> Self; + fn insert(&mut self, Self::E); +} +``` + +In order for a type to implement this trait, it must not only provide +implementations for every method, but it must specify the type `E`. Here's an +implementation of `Container` for the standard library type `Vec`: + +```rust +# trait Container { +# type E; +# fn empty() -> Self; +# fn insert(&mut self, Self::E); +# } +impl Container for Vec { + type E = T; + fn empty() -> Vec { Vec::new() } + fn insert(&mut self, x: T) { self.push(x); } +} +``` + +## Associated Constants + +A trait can define constants like this: + +```rust +trait Foo { + const ID: i32; +} + +impl Foo for i32 { + const ID: i32 = 1; +} + +fn main() { + assert_eq!(1, i32::ID); +} +``` + +Any implementor of `Foo` will have to define `ID`. Without the definition: + +```rust,compile_fail,E0046 +trait Foo { + const ID: i32; +} + +impl Foo for i32 { +} +``` + +gives + +```text +error: not all trait items implemented, missing: `ID` [E0046] + impl Foo for i32 { + } +``` + +A default value can be implemented as well: + +```rust +trait Foo { + const ID: i32 = 1; +} + +impl Foo for i32 { +} + +impl Foo for i64 { + const ID: i32 = 5; +} + +fn main() { + assert_eq!(1, i32::ID); + assert_eq!(5, i64::ID); +} +``` + +As you can see, when implementing `Foo`, you can leave it unimplemented, as +with `i32`. It will then use the default value. But, as in `i64`, we can also +add our own definition. + +Associated constants don’t have to be associated with a trait. An `impl` block +for a `struct` or an `enum` works fine too: + +```rust +struct Foo; + +impl Foo { + const FOO: u32 = 3; +} +``` + +## Trait bounds + +Generic functions may use traits as _bounds_ on their type parameters. This +will have three effects: + +- Only types that have the trait may instantiate the parameter. +- Within the generic function, the methods of the trait can be called on values + that have the parameter's type. Associated types can be used in the + function's signature, and associated constants can be used in expressions + within the function body. +- Generic functions and types with the same or weaker bounds can use the + generic type in the function body or signature. + +For example: + +```rust +# type Surface = i32; +# trait Shape { fn draw(&self, Surface); } +struct Figure(S, S); +fn draw_twice(surface: Surface, sh: T) { + sh.draw(surface); + sh.draw(surface); +} +fn draw_figure(surface: Surface, Figure(sh1, sh2): Figure) { + sh1.draw(surface); + draw_twice(surface, sh2); // Can call this since U: Shape +} +``` + +## Trait objects + +Traits also define a [trait object] with the same name as the trait. Values of +this type are created by coercing from a pointer of some specific type to a +pointer of trait type. For example, `&T` could be coerced to `&Shape` if `T: +Shape` holds (and similarly for `Box`). This coercion can either be implicit +or [explicit]. Here is an example of an explicit coercion: + +[trait object]: types.html#trait-objects +[explicit]: expressions/operator-expr.html#type-cast-expressions + +```rust +trait Shape { } +impl Shape for i32 { } +let mycircle = 0i32; +let myshape: Box = Box::new(mycircle) as Box; +``` + +The resulting value is a box containing the value that was cast, along with +information that identifies the methods of the implementation that was used. +Values with a trait type can have [methods called] on them, for any method in +the trait, and can be used to instantiate type parameters that are bounded by +the trait. + +[methods called]: expressions/method-call-expr.html + +## Supertraits + +Trait bounds on `Self` are considered "supertraits". These are required to be +acyclic. Supertraits are somewhat different from other constraints in that +they affect what methods are available in the vtable when the trait is used as +a [trait object]. Consider the following example: + +```rust +trait Shape { fn area(&self) -> f64; } +trait Circle : Shape { fn radius(&self) -> f64; } +``` + +The syntax `Circle : Shape` means that types that implement `Circle` must also +have an implementation for `Shape`. Multiple supertraits are separated by `+`, +`trait Circle : Shape + PartialEq { }`. In an implementation of `Circle` for a +given type `T`, methods can refer to `Shape` methods, since the typechecker +checks that any type with an implementation of `Circle` also has an +implementation of `Shape`: + +```rust +struct Foo; + +trait Shape { fn area(&self) -> f64; } +trait Circle : Shape { fn radius(&self) -> f64; } +impl Shape for Foo { + fn area(&self) -> f64 { + 0.0 + } +} +impl Circle for Foo { + fn radius(&self) -> f64 { + println!("calling area: {}", self.area()); + + 0.0 + } +} + +let c = Foo; +c.radius(); +``` + +In type-parameterized functions, methods of the supertrait may be called on +values of subtrait-bound type parameters. Referring to the previous example of +`trait Circle : Shape`: + +```rust +# trait Shape { fn area(&self) -> f64; } +# trait Circle : Shape { fn radius(&self) -> f64; } +fn radius_times_area(c: T) -> f64 { + // `c` is both a Circle and a Shape + c.radius() * c.area() +} +``` + +Likewise, supertrait methods may also be called on trait objects. + +```rust +# trait Shape { fn area(&self) -> f64; } +# trait Circle : Shape { fn radius(&self) -> f64; } +# impl Shape for i32 { fn area(&self) -> f64 { 0.0 } } +# impl Circle for i32 { fn radius(&self) -> f64 { 0.0 } } +# let mycircle = 0i32; +let mycircle = Box::new(mycircle) as Box; +let nonsense = mycircle.radius() * mycircle.area(); +``` diff --git a/src/doc/reference/src/items/type-aliases.md b/src/doc/reference/src/items/type-aliases.md new file mode 100644 index 0000000000..532254372c --- /dev/null +++ b/src/doc/reference/src/items/type-aliases.md @@ -0,0 +1,25 @@ +# Type aliases + +A _type alias_ defines a new name for an existing [type]. Type aliases are +declared with the keyword `type`. Every value has a single, specific type, but +may implement several different traits, or be compatible with several different +type constraints. + +[type]: types.html + +For example, the following defines the type `Point` as a synonym for the type +`(u8, u8)`, the type of pairs of unsigned 8 bit integers: + +```rust +type Point = (u8, u8); +let p: Point = (41, 68); +``` + +A type alias to an enum type cannot be used to qualify the constructors: + +```rust +enum E { A } +type F = E; +let _: F = E::A; // OK +// let _: F = F::A; // Doesn't work +``` diff --git a/src/doc/reference/src/items/unions.md b/src/doc/reference/src/items/unions.md new file mode 100644 index 0000000000..90bce69842 --- /dev/null +++ b/src/doc/reference/src/items/unions.md @@ -0,0 +1,141 @@ +# Unions + +A union declaration uses the same syntax as a struct declaration, except with +`union` in place of `struct`. + +```rust +#[repr(C)] +union MyUnion { + f1: u32, + f2: f32, +} +``` + +The key property of unions is that all fields of a union share common storage. +As a result writes to one field of a union can overwrite its other fields, and +size of a union is determined by the size of its largest field. + +A value of a union type can be created using the same syntax that is used for +struct types, except that it must specify exactly one field: + +```rust +# union MyUnion { f1: u32, f2: f32 } +# +let u = MyUnion { f1: 1 }; +``` + +The expression above creates a value of type `MyUnion` with active field `f1`. +Active field of a union can be accessed using the same syntax as struct fields: + +```rust,ignore +let f = u.f1; +``` + +Inactive fields can be accessed as well (using the same syntax) if they are +sufficiently layout compatible with the current value kept by the union. +Reading incompatible fields results in undefined behavior. However, the active +field is not generally known statically, so all reads of union fields have to +be placed in `unsafe` blocks. + +```rust +# union MyUnion { f1: u32, f2: f32 } +# let u = MyUnion { f1: 1 }; +# +unsafe { + let f = u.f1; +} +``` + +Writes to `Copy` union fields do not require reads for running destructors, so +these writes don't have to be placed in `unsafe` blocks + +```rust +# union MyUnion { f1: u32, f2: f32 } +# let mut u = MyUnion { f1: 1 }; +# +u.f1 = 2; +``` + +Commonly, code using unions will provide safe wrappers around unsafe union +field accesses. + +Another way to access union fields is to use pattern matching. Pattern matching +on union fields uses the same syntax as struct patterns, except that the +pattern must specify exactly one field. Since pattern matching accesses +potentially inactive fields it has to be placed in `unsafe` blocks as well. + +```rust +# union MyUnion { f1: u32, f2: f32 } +# +fn f(u: MyUnion) { + unsafe { + match u { + MyUnion { f1: 10 } => { println!("ten"); } + MyUnion { f2 } => { println!("{}", f2); } + } + } +} +``` + +Pattern matching may match a union as a field of a larger structure. In +particular, when using a Rust union to implement a C tagged union via FFI, this +allows matching on the tag and the corresponding field simultaneously: + +```rust +#[repr(u32)] +enum Tag { I, F } + +#[repr(C)] +union U { + i: i32, + f: f32, +} + +#[repr(C)] +struct Value { + tag: Tag, + u: U, +} + +fn is_zero(v: Value) -> bool { + unsafe { + match v { + Value { tag: I, u: U { i: 0 } } => true, + Value { tag: F, u: U { f: 0.0 } } => true, + _ => false, + } + } +} +``` + +Since union fields share common storage, gaining write access to one field of a +union can give write access to all its remaining fields. Borrow checking rules +have to be adjusted to account for this fact. As a result, if one field of a +union is borrowed, all its remaining fields are borrowed as well for the same +lifetime. + +```rust,ignore +// ERROR: cannot borrow `u` (via `u.f2`) as mutable more than once at a time +fn test() { + let mut u = MyUnion { f1: 1 }; + unsafe { + let b1 = &mut u.f1; + ---- first mutable borrow occurs here (via `u.f1`) + let b2 = &mut u.f2; + ^^^^ second mutable borrow occurs here (via `u.f2`) + *b1 = 5; + } + - first borrow ends here + assert_eq!(unsafe { u.f1 }, 5); +} +``` + +As you could see, in many aspects (except for layouts, safety and ownership) +unions behave exactly like structs, largely as a consequence of inheriting +their syntactic shape from structs. This is also true for many unmentioned +aspects of Rust language (such as privacy, name resolution, type inference, +generics, trait implementations, inherent implementations, coherence, pattern +checking, etc etc etc). + +More detailed specification for unions, including unstable bits, can be found +in [RFC 1897 "Unions v1.2"](https://github.com/rust-lang/rfcs/pull/1897). diff --git a/src/doc/reference/src/items/use-declarations.md b/src/doc/reference/src/items/use-declarations.md new file mode 100644 index 0000000000..5ff173f897 --- /dev/null +++ b/src/doc/reference/src/items/use-declarations.md @@ -0,0 +1,113 @@ +# Use declarations + +A _use declaration_ creates one or more local name bindings synonymous with +some other [path]. Usually a `use` declaration is used to shorten the path +required to refer to a module item. These declarations may appear in [modules] +and [blocks], usually at the top. + +[path]: paths.html +[modules]: items/modules.html +[blocks]: expressions/block-expr.html + +> **Note**: Unlike in many languages, `use` declarations in Rust do *not* +> declare linkage dependency with external crates. Rather, [`extern crate` +> declarations](items/extern-crates.html) declare linkage dependencies. + +Use declarations support a number of convenient shortcuts: + +* Simultaneously binding a list of paths differing only in their final element, + using the glob-like brace syntax `use a::b::{c,d,e,f};` +* Simultaneously binding a list of paths differing only in their final element + and their immediate parent module, using the `self` keyword, such as `use + a::b::{self, c, d};` +* Rebinding the target name as a new local name, using the syntax `use p::q::r + as x;`. This can also be used with the last two features: `use a::b::{self as + ab, c as abc}`. +* Binding all paths matching a given prefix, using the asterisk wildcard syntax + `use a::b::*;` + +An example of `use` declarations: + +```rust +use std::option::Option::{Some, None}; +use std::collections::hash_map::{self, HashMap}; + +fn foo(_: T){} +fn bar(map1: HashMap, map2: hash_map::HashMap){} + +fn main() { + // Equivalent to 'foo(vec![std::option::Option::Some(1.0f64), + // std::option::Option::None]);' + foo(vec![Some(1.0f64), None]); + + // Both `hash_map` and `HashMap` are in scope. + let map1 = HashMap::new(); + let map2 = hash_map::HashMap::new(); + bar(map1, map2); +} +``` + +Like items, `use` declarations are private to the containing module, by +default. Also like items, a `use` declaration can be public, if qualified by +the `pub` keyword. Such a `use` declaration serves to _re-export_ a name. A +public `use` declaration can therefore _redirect_ some public name to a +different target definition: even a definition with a private canonical path, +inside a different module. If a sequence of such redirections form a cycle or +cannot be resolved unambiguously, they represent a compile-time error. + +An example of re-exporting: + +```rust +# fn main() { } +mod quux { + pub use quux::foo::{bar, baz}; + + pub mod foo { + pub fn bar() { } + pub fn baz() { } + } +} +``` + +In this example, the module `quux` re-exports two public names defined in +`foo`. + +Also note that the paths contained in `use` items are relative to the crate +root. So, in the previous example, the `use` refers to `quux::foo::{bar, baz}`, +and not simply to `foo::{bar, baz}`. This also means that top-level module +declarations should be at the crate root if direct usage of the declared +modules within `use` items is desired. It is also possible to use `self` and +`super` at the beginning of a `use` item to refer to the current and direct +parent modules respectively. All rules regarding accessing declared modules in +`use` declarations apply to both module declarations and `extern crate` +declarations. + +An example of what will and will not work for `use` items: + +```rust +# #![allow(unused_imports)] +use foo::baz::foobaz; // good: foo is at the root of the crate + +mod foo { + + mod example { + pub mod iter {} + } + + use foo::example::iter; // good: foo is at crate root +// use example::iter; // bad: example is not at the crate root + use self::baz::foobaz; // good: self refers to module 'foo' + use foo::bar::foobar; // good: foo is at crate root + + pub mod bar { + pub fn foobar() { } + } + + pub mod baz { + use super::bar::foobar; // good: super refers to module 'foo' + pub fn foobaz() { } + } +} + +fn main() {} +``` diff --git a/src/doc/reference/src/keywords.md b/src/doc/reference/src/keywords.md index bf743f6c42..41e708d865 100644 --- a/src/doc/reference/src/keywords.md +++ b/src/doc/reference/src/keywords.md @@ -1,6 +1,6 @@ # Keywords -Rust divides keywords in three categories: +Rust divides keywords into three categories: - [strict](#strict-keywords) - [weak](#weak-keywords) - [reserved](#reserved-keywords) @@ -45,7 +45,7 @@ not allowed to declare a variable with name `struct`. > KW_TYPE : `type` > KW_UNSAFE : `unsafe` > KW_USE : `use` -> KW_WHERE : `wher` +> KW_WHERE : `where` > KW_WHILE : `while` ## Weak keywords @@ -63,7 +63,7 @@ it is possible to declare a variable or method with the name `union`. These keywords aren't used yet, but they are reserved for future use. The reasoning behind this is to make current programs forward compatible with -future versions of rust by forbiding them to use these keywords. +future versions of Rust by forbidding them to use these keywords. > **Lexer** > KW_ABSTRACT : `abstract` diff --git a/src/doc/reference/src/macros-by-example.md b/src/doc/reference/src/macros-by-example.md index a007b232e4..5b224ca540 100644 --- a/src/doc/reference/src/macros-by-example.md +++ b/src/doc/reference/src/macros-by-example.md @@ -32,9 +32,9 @@ syntax named by _designator_. Valid designators are: * `meta`: the contents of an [attribute] [item]: items.html -[block]: expressions.html#block-expressions +[block]: expressions/block-expr.html [statement]: statements.html -[pattern]: expressions.html#match-expressions +[pattern]: expressions/match-expr.html [expression]: expressions.html [type]: types.html [identifier]: identifiers.html diff --git a/src/doc/reference/src/macros.md b/src/doc/reference/src/macros.md index df37e34b3c..53f73057a6 100644 --- a/src/doc/reference/src/macros.md +++ b/src/doc/reference/src/macros.md @@ -6,12 +6,12 @@ names, and invoked through a consistent syntax: `some_extension!(...)`. Users of `rustc` can define new macros in two ways: -* [Macros] define new syntax in a higher-level, +* [Macros by Example] define new syntax in a higher-level, declarative way. * [Procedural Macros] can be used to implement custom derive. And one unstable way: [compiler plugins]. -[Macros]: ../book/macros.html -[Procedural Macros]: ../book/procedural-macros.html +[Macros by Example]: macros-by-example.html +[Procedural Macros]: procedural-macros.html [compiler plugins]: ../unstable-book/language-features/plugin.html diff --git a/src/doc/reference/src/statements.md b/src/doc/reference/src/statements.md index ced9a850a1..56c7001e96 100644 --- a/src/doc/reference/src/statements.md +++ b/src/doc/reference/src/statements.md @@ -1,7 +1,7 @@ # Statements A _statement_ is a component of a block, which is in turn a component of an -outer [expression](expressions.html) or [function](items.html#functions). +outer [expression](expressions.html) or [function](items/functions.html). Rust has two kinds of statement: [declaration statements](#declaration-statements) and [expression @@ -40,7 +40,7 @@ An _expression statement_ is one that evaluates an [expression](expressions.html) and ignores its result. As a rule, an expression statement's purpose is to trigger the effects of evaluating its expression. An expression that consists of only a [block -expression](expressions.html#block-expressions) or control flow expression, +expression](expressions/block-expr.html) or control flow expression, that doesn't end a block and evaluates to `()` can also be used as an expression statement by omitting the trailing semicolon. diff --git a/src/doc/reference/src/string-table-productions.md b/src/doc/reference/src/string-table-productions.md index 3621f2d9ff..3fceb0847f 100644 --- a/src/doc/reference/src/string-table-productions.md +++ b/src/doc/reference/src/string-table-productions.md @@ -12,7 +12,7 @@ When such a string enclosed in double-quotes (`"`) occurs inside the grammar, it is an implicit reference to a single member of such a string table production. See [tokens] for more information. -[binary operators]: expressions.html#arithmetic-and-logical-binary-operators -[keywords]: ../grammar.html#keywords +[binary operators]: expressions/operator-expr.html#arithmetic-and-logical-binary-operators +[keywords]: keywords.html [tokens]: tokens.html -[unary operators]: expressions.html#borrow-operators \ No newline at end of file +[unary operators]: expressions/operator-expr.html#borrow-operators diff --git a/src/doc/reference/src/tokens.md b/src/doc/reference/src/tokens.md index 3163a9de6b..c5746459e6 100644 --- a/src/doc/reference/src/tokens.md +++ b/src/doc/reference/src/tokens.md @@ -21,13 +21,24 @@ evaluated (primarily) at compile time. | | Example | `#` sets | Characters | Escapes | |----------------------------------------------|-----------------|------------|-------------|---------------------| -| [Character](#character-literals) | `'H'` | `N/A` | All Unicode | [Quote](#quote-escapes) & [Byte](#byte-escapes) & [Unicode](#unicode-escapes) | -| [String](#string-literals) | `"hello"` | `N/A` | All Unicode | [Quote](#quote-escapes) & [Byte](#byte-escapes) & [Unicode](#unicode-escapes) | +| [Character](#character-literals) | `'H'` | `N/A` | All Unicode | [Quote](#quote-escapes) & [ASCII](#ascii-escapes) & [Unicode](#unicode-escapes) | +| [String](#string-literals) | `"hello"` | `N/A` | All Unicode | [Quote](#quote-escapes) & [ASCII](#ascii-escapes) & [Unicode](#unicode-escapes) | | [Raw](#raw-string-literals) | `r#"hello"#` | `0...` | All Unicode | `N/A` | | [Byte](#byte-literals) | `b'H'` | `N/A` | All ASCII | [Quote](#quote-escapes) & [Byte](#byte-escapes) | | [Byte string](#byte-string-literals) | `b"hello"` | `N/A` | All ASCII | [Quote](#quote-escapes) & [Byte](#byte-escapes) | | [Raw byte string](#raw-byte-string-literals) | `br#"hello"#` | `0...` | All ASCII | `N/A` | +#### ASCII escapes + +| | Name | +|---|------| +| `\x41` | 7-bit character code (exactly 2 digits, up to 0x7F) | +| `\n` | Newline | +| `\r` | Carriage return | +| `\t` | Tab | +| `\\` | Backslash | +| `\0` | Null | + #### Byte escapes | | Name | @@ -74,12 +85,39 @@ evaluated (primarily) at compile time. #### Character literals +> **Lexer** +> CHAR_LITERAL : +>    `'` ( ~[`'` `\` \\n \\r \\t] | QUOTE_ESCAPE | ASCII_ESCAPE | UNICODE_ESCAPE ) `'` +> +> QUOTE_ESCAPE : +>    `\'` | `\"` +> +> ASCII_ESCAPE : +>       `\x` OCT_DIGIT HEX_DIGIT +>    | `\n` | `\r` | `\t` | `\\` | `\0` +> +> UNICODE_ESCAPE : +>    `\u{` ( HEX_DIGIT `_`\* )1..6 `}` + A _character literal_ is a single Unicode character enclosed within two `U+0027` (single-quote) characters, with the exception of `U+0027` itself, which must be _escaped_ by a preceding `U+005C` character (`\`). #### String literals +> **Lexer** +> STRING_LITERAL : +>    `"` ( +>       ~[`"` `\` _IsolatedCR_] +>       | QUOTE_ESCAPE +>       | ASCII_ESCAPE +>       | UNICODE_ESCAPE +>       | STRING_CONTINUE +>    )\* `"` +> +> STRING_CONTINUE : +>    `\` _followed by_ \\n + A _string literal_ is a sequence of any Unicode characters enclosed within two `U+0022` (double-quote) characters, with the exception of `U+0022` itself, which must be _escaped_ by a preceding `U+005C` character (`\`). @@ -120,6 +158,14 @@ following forms: #### Raw string literals +> **Lexer** +> RAW_STRING_LITERAL : +>    `r` RAW_STRING_CONTENT +> +> RAW_STRING_CONTENT : +>       `"` ( ~ _IsolatedCR_ )* (non-greedy) `"` +>    | `#` RAW_STRING_CONTENT `#` + Raw string literals do not process any escapes. They start with the character `U+0072` (`r`), followed by zero or more of the character `U+0023` (`#`) and a `U+0022` (double-quote) character. The _raw string body_ can contain any sequence @@ -149,6 +195,17 @@ r##"foo #"# bar"##; // foo #"# bar #### Byte literals +> **Lexer** +> BYTE_LITERAL : +>    `b'` ( ASCII_FOR_CHAR | BYTE_ESCAPE ) `'` +> +> ASCII_FOR_CHAR : +>    _any ASCII (i.e. 0x00 to 0x7F), except_ `'`, `/`, \\n, \\r or \\t +> +> BYTE_ESCAPE : +>       `\x` HEX_DIGIT HEX_DIGIT +>    | `\n` | `\r` | `\t` | `\\` | `\0` + A _byte literal_ is a single ASCII character (in the `U+0000` to `U+007F` range) or a single _escape_ preceded by the characters `U+0062` (`b`) and `U+0027` (single-quote), and followed by the character `U+0027`. If the character @@ -158,6 +215,13 @@ _number literal_. #### Byte string literals +> **Lexer** +> BYTE_STRING_LITERAL : +>    `b"` ( ASCII_FOR_STRING | BYTE_ESCAPE | STRING_CONTINUE )\* `"` +> +> ASCII_FOR_STRING : +>    _any ASCII (i.e 0x00 to 0x7F), except_ `"`, `/` _and IsolatedCR_ + A non-raw _byte string literal_ is a sequence of ASCII characters and _escapes_, preceded by the characters `U+0062` (`b`) and `U+0022` (double-quote), and followed by the character `U+0022`. If the character `U+0022` is present within @@ -183,6 +247,17 @@ following forms: #### Raw byte string literals +> **Lexer** +> RAW_BYTE_STRING_LITERAL : +>    `br` RAW_BYTE_STRING_CONTENT +> +> RAW_BYTE_STRING_CONTENT : +>       `"` ASCII* (non-greedy) `"` +>    | `#` RAW_STRING_CONTENT `#` +> +> ASCII : +>    _any ASCII (i.e. 0x00 to 0x7F)_ + Raw byte string literals do not process any escapes. They start with the character `U+0062` (`b`), followed by `U+0072` (`r`), followed by zero or more of the character `U+0023` (`#`), and a `U+0022` (double-quote) character. The @@ -216,16 +291,51 @@ literal_. The grammar for recognizing the two kinds of literals is mixed. #### Integer literals +> **Lexer** +> INTEGER_LITERAL : +>    ( DEC_LITERAL | BIN_LITERAL | OCT_LITERAL | HEX_LITERAL ) +> INTEGER_SUFFIX? +> +> DEC_LITERAL : +>    DEC_DIGIT (DEC_DIGIT|`_`)\* +> +> BIN_LITERAL : +>    `0b` (BIN_DIGIT|`_`)\* BIN_DIGIT (BIN_DIGIT|`_`)\* +> +> OCT_LITERAL : +>    `0o` (OCT_DIGIT|`_`)\* OCT_DIGIT (OCT_DIGIT|`_`)\* +> +> HEX_LITERAL : +>    `0x` (HEX_DIGIT|`_`)\* HEX_DIGIT (HEX_DIGIT|`_`)\* +> +> BIN_DIGIT : [`0`-`1`] +> +> OCT_DIGIT : [`0`-`7`] +> +> DEC_DIGIT : [`0`-`9`] +> +> HEX_DIGIT : [`0`-`9` `a`-`f` `A`-`F`] +> +> INTEGER_SUFFIX : +>       `u8` | `u16` | `u32` | `u64` | `usize` +>    | `i8` | `u16` | `i32` | `i64` | `usize` + + + + An _integer literal_ has one of four forms: * A _decimal literal_ starts with a *decimal digit* and continues with any mixture of *decimal digits* and _underscores_. * A _hex literal_ starts with the character sequence `U+0030` `U+0078` - (`0x`) and continues as any mixture of hex digits and underscores. + (`0x`) and continues as any mixture (with at least one digit) of hex digits + and underscores. * An _octal literal_ starts with the character sequence `U+0030` `U+006F` - (`0o`) and continues as any mixture of octal digits and underscores. + (`0o`) and continues as any mixture (with at least one digit) of octal digits + and underscores. * A _binary literal_ starts with the character sequence `U+0030` `U+0062` - (`0b`) and continues as any mixture of binary digits and underscores. + (`0b`) and continues as any mixture (with at least one digit) of binary digits + and underscores. Like any literal, an integer literal may be followed (immediately, without any spaces) by an _integer suffix_, which forcibly sets the @@ -247,23 +357,73 @@ The type of an _unsuffixed_ integer literal is determined by type inference: Examples of integer literals of various forms: ```rust +123; // type i32 123i32; // type i32 123u32; // type u32 123_u32; // type u32 +let a: u64 = 123; // type u64 + +0xff; // type i32 0xff_u8; // type u8 + +0o70; // type i32 0o70_i16; // type i16 -0b1111_1111_1001_0000_i32; // type i32 + +0b1111_1111_1001_0000; // type i32 +0b1111_1111_1001_0000i32; // type i64 +0b________1; // type i32 + 0usize; // type usize ``` +Examples of invalid integer literals: + +```rust,ignore +// invalid suffixes + +0invalidSuffix; + +// uses numbers of the wrong base + +123AFB43; +0b0102; +0o0581; + +// integers too big for their type (they overflow) + +128_i8; +256_u8; + +// bin, hex and octal literals must have at least one digit + +0b_; +0b____; +``` + Note that the Rust syntax considers `-1i8` as an application of the [unary minus operator] to an integer literal `1i8`, rather than a single integer literal. -[unary minus operator]: expressions.html#negation-operators +[unary minus operator]: expressions/operator-expr.html#negation-operators #### Floating-point literals +> **Lexer** +> FLOAT_LITERAL : +>       DEC_LITERAL `.` +> _(not immediately followed by `.`, `_` or an identifier_) +>    | DEC_LITERAL FLOAT_EXPONENT +>    | DEC_LITERAL `.` DEC_LITERAL FLOAT_EXPONENT? +>    | DEC_LITERAL (`.` DEC_LITERAL)? +> FLOAT_EXPONENT? FLOAT_SUFFIX +> +> FLOAT_EXPONENT : +>    (`e`|`E`) (`+`|`-`)? +> (DEC_DIGIT|`_`)\* DEC_DIGIT (DEC_DIGIT|`_`)\* +> +> FLOAT_SUFFIX : +>    `f32` | `f64` + A _floating-point literal_ has one of two forms: * A _decimal literal_ followed by a period character `U+002E` (`.`). This is @@ -309,6 +469,11 @@ The representation semantics of floating-point numbers are described in ### Boolean literals +> **Lexer** +> BOOLEAN_LITERAL : +>       `true` +>    | `false` + The two values of the boolean type are written `true` and `false`. ## Symbols @@ -320,8 +485,8 @@ otherwise appear as [unary operators], [binary operators], or [keywords]. They are catalogued in [the Symbols section][symbols] of the Grammar document. -[unary operators]: expressions.html#borrow-operators -[binary operators]: expressions.html#arithmetic-and-logical-binary-operators +[unary operators]: expressions/operator-expr.html#borrow-operators +[binary operators]: expressions/operator-expr.html#arithmetic-and-logical-binary-operators [tokens]: #tokens [symbols]: ../grammar.html#symbols -[keywords]: ../grammar.html#keywords +[keywords]: keywords.html diff --git a/src/doc/reference/src/types.md b/src/doc/reference/src/types.md index 10d655f7c6..2a18617da3 100644 --- a/src/doc/reference/src/types.md +++ b/src/doc/reference/src/types.md @@ -3,21 +3,44 @@ Every variable, item and value in a Rust program has a type. The _type_ of a *value* defines the interpretation of the memory holding it. -Built-in types and type-constructors are tightly integrated into the language, -in nontrivial ways that are not possible to emulate in user-defined types. -User-defined types have limited capabilities. +Built-in types are tightly integrated into the language, in nontrivial ways +that are not possible to emulate in user-defined types. User-defined types have +limited capabilities. ## Primitive types -The primitive types are the following: +Some types are defined by the language, rather than as part of the standard +library, these are called _primitive types_. Some of these are individual +types: * The boolean type `bool` with values `true` and `false`. -* The machine types (integer and floating-point). -* The machine-dependent integer types. -* Arrays -* Tuples -* Slices -* Function pointers +* The [machine types] (integer and floating-point). +* The [machine-dependent integer types]. +* The [textual types] `char` and `str`. + +There are also some primitive constructs for generic types built in to the +language: + +* [Tuples] +* [Arrays] +* [Slices] +* [Function pointers] +* [References] +* [Pointers] + +[machine types]: #machine-types +[machine-dependent integer types]: #machine-dependent-integer-types +[textual types]: #textual-types +[Tuples]: #tuple-types +[Arrays]: #array-and-slice-types +[Slices]: #array-and-slice-types +[References]: #pointer-types +[Pointers]: #raw-pointers-const-and-mut +[Function pointers]: #function-pointer-types +[function]: #function-types +[closure]: #closure-types + +## Numeric types ### Machine types @@ -52,14 +75,14 @@ The types `char` and `str` hold textual data. A value of type `char` is a [Unicode scalar value]( http://www.unicode.org/glossary/#unicode_scalar_value) (i.e. a code point that -is not a surrogate) represented as a 32-bit unsigned word in the 0x0000 to -0xD7FF or 0xE000 to 0x10FFFF range. A `[char]` array is effectively a UCS-4 / -UTF-32 string. +is not a surrogate), represented as a 32-bit unsigned word in the 0x0000 to +0xD7FF or 0xE000 to 0x10FFFF range. A `[char]` is effectively a UCS-4 / UTF-32 +string. A value of type `str` is a Unicode string, represented as an array of 8-bit -unsigned bytes holding a sequence of UTF-8 code points. Since `str` is of -unknown size, it is not a _first-class_ type, but can only be instantiated -through a pointer type, such as `&str`. +unsigned bytes holding a sequence of UTF-8 code points. Since `str` is a +[dynamically sized type], it is not a _first-class_ type, but can only be +instantiated through a pointer type, such as `&str`. ## Tuple types @@ -70,8 +93,8 @@ Tuple types and values are denoted by listing the types or values of their elements, respectively, in a parenthesized, comma-separated list. Because tuple elements don't have a name, they can only be accessed by -pattern-matching or by using `N` directly as a field to access the -`N`th element. +pattern-matching or by using `N` directly as a field to access the `N`th +element. An example of a tuple type and its use: @@ -94,13 +117,18 @@ is often called ‘unit’ or ‘the unit type’. Rust has two different types for a list of items: * `[T; N]`, an 'array' -* `&[T]`, a 'slice' +* `[T]`, a 'slice' An array has a fixed size, and can be allocated on either the stack or the heap. -A slice is a 'view' into an array. It doesn't own the data it points -to, it borrows it. +A slice is a [dynamically sized type] representing a 'view' into an array. To +use a slice type it generally has to be used behind a pointer for example as + +* `&[T]`, a 'shared slice', often just called a 'slice', it doesn't own the + data it points to, it borrows it. +* `&mut [T]`, a 'mutable slice', mutably borrows the data it points to. +* `Box<[T]>`, a 'boxed slice' Examples: @@ -108,86 +136,104 @@ Examples: // A stack-allocated array let array: [i32; 3] = [1, 2, 3]; -// A heap-allocated array -let vector: Vec = vec![1, 2, 3]; +// A heap-allocated array, coerced to a slice +let boxed_array: Box<[i32]> = Box::new([1, 2, 3]); -// A slice into an array -let slice: &[i32] = &vector[..]; +// A (shared) slice into an array +let slice: &[i32] = &boxed_array[..]; ``` -As you can see, the `vec!` macro allows you to create a `Vec` easily. The -`vec!` macro is also part of the standard library, rather than the language. +All elements of arrays and slices are always initialized, and access to an +array or slice is always bounds-checked in safe methods and operators. + +The [`Vec`] standard library type provides a heap allocated resizable array +type. -All in-bounds elements of arrays and slices are always initialized, and access -to an array or slice is always bounds-checked. +[dynamically sized type]: dynamically-sized-types.html +[`Vec`]: ../std/vec/struct.Vec.html ## Struct types A `struct` *type* is a heterogeneous product of other types, called the *fields* of the type.[^structtype] -[^structtype]: `struct` types are analogous to `struct` types in C, - the *record* types of the ML family, - or the *struct* types of the Lisp family. - New instances of a `struct` can be constructed with a [struct -expression](expressions.html#struct-expressions). +expression](expressions/struct-expr.html). The memory layout of a `struct` is undefined by default to allow for compiler optimizations like field reordering, but it can be fixed with the -`#[repr(...)]` attribute. In either case, fields may be given in any order in -a corresponding struct *expression*; the resulting `struct` value will always +`#[repr(...)]` attribute. In either case, fields may be given in any order in a +corresponding struct *expression*; the resulting `struct` value will always have the same memory layout. The fields of a `struct` may be qualified by [visibility -modifiers](visibility-and-privacy.html), to allow access to data in a -struct outside a module. +modifiers](visibility-and-privacy.html), to allow access to data in a struct +outside a module. A _tuple struct_ type is just like a struct type, except that the fields are anonymous. -A _unit-like struct_ type is like a struct type, except that it has no -fields. The one value constructed by the associated [struct -expression](expressions.html#struct-expressions) is the only value that inhabits such a -type. +A _unit-like struct_ type is like a struct type, except that it has no fields. +The one value constructed by the associated [struct +expression](expressions/struct-expr.html) is the only value that +inhabits such a type. + +[^structtype]: `struct` types are analogous to `struct` types in C, the + *record* types of the ML family, or the *struct* types of the Lisp family. ## Enumerated types An *enumerated type* is a nominal, heterogeneous disjoint union type, denoted -by the name of an [`enum` item](items.html#enumerations). [^enumtype] +by the name of an [`enum` item](items/enumerations.html). [^enumtype] + +An [`enum` item](items/enumerations.html) declares both the type and a number +of *variants*, each of which is independently named and has the syntax of a +struct, tuple struct or unit-like struct. + +New instances of an `enum` can be constructed in an [enumeration variant +expression](expressions/enum-variant-expr.html). + +Any `enum` value consumes as much memory as the largest variant for its +corresponding `enum` type, as well as the size needed to store a discriminant. + +Enum types cannot be denoted *structurally* as types, but must be denoted by +named reference to an [`enum` item](items/enumerations.html). [^enumtype]: The `enum` type is analogous to a `data` constructor declaration in ML, or a *pick ADT* in Limbo. -An [`enum` item](items.html#enumerations) declares both the type and a number of *variant -constructors*, each of which is independently named and takes an optional tuple -of arguments. +## Union types -New instances of an `enum` can be constructed by calling one of the variant -constructors, in a [call expression](expressions.html#call-expressions). +A *union type* is a nominal, heterogeneous C-like union, denoted by the name of +a [`union` item](items/unions.html). -Any `enum` value consumes as much memory as the largest variant constructor for -its corresponding `enum` type. +A union contains the value of any one of its fields. Since the accessing the +wrong field can cause unexpected or undefined behaviour, `unsafe` is required +to read from a union field or to write to a field that doesn't implement +[`Copy`]. -Enum types cannot be denoted *structurally* as types, but must be denoted by -named reference to an [`enum` item](items.html#enumerations). +The memory layout of a `union` is undefined by default, but the `#[repr(...)]` +attribute can be used to fix a layout. + +[`Copy`]: the-copy-trait.html ## Recursive types -Nominal types — [enumerations](#enumerated-types) and -[structs](#struct-types) — may be recursive. That is, each `enum` -constructor or `struct` field may refer, directly or indirectly, to the -enclosing `enum` or `struct` type itself. Such recursion has restrictions: - -* Recursive types must include a nominal type in the recursion - (not mere [type definitions](../grammar.html#type-definitions), - or other structural types such as [arrays](#array-and-slice-types) or [tuples](#tuple-types)). -* A recursive `enum` item must have at least one non-recursive constructor - (in order to give the recursion a basis case). -* The size of a recursive type must be finite; - in other words the recursive fields of the type must be [pointer types](#pointer-types). -* Recursive type definitions can cross module boundaries, but not module *visibility* boundaries, - or crate boundaries (in order to simplify the module system and type checker). +Nominal types — [structs](#struct-types), +[enumerations](#enumerated-types) and [unions](#union-types) — may be +recursive. That is, each `enum` variant or `struct` or `union` field may refer, +directly or indirectly, to the enclosing `enum` or `struct` type itself. Such +recursion has restrictions: + +* Recursive types must include a nominal type in the recursion (not mere [type + definitions](../grammar.html#type-definitions), or other structural types + such as [arrays](#array-and-slice-types) or [tuples](#tuple-types)). So + `type Rec = &'static [Rec]` is not allowed. +* The size of a recursive type must be finite; in other words the recursive + fields of the type must be [pointer types](#pointer-types). +* Recursive type definitions can cross module boundaries, but not module + *visibility* boundaries, or crate boundaries (in order to simplify the module + system and type checker). An example of a *recursive* type and its use: @@ -202,62 +248,78 @@ let a: List = List::Cons(7, Box::new(List::Cons(13, Box::new(List::Nil)))); ## Pointer types -All pointers in Rust are explicit first-class values. They can be copied, -stored into data structs, and returned from functions. There are two -varieties of pointer in Rust: - -* References (`&`) - : These point to memory _owned by some other value_. - A reference type is written `&type`, - or `&'a type` when you need to specify an explicit lifetime. - Copying a reference is a "shallow" operation: - it involves only copying the pointer itself. - Releasing a reference has no effect on the value it points to, - but a reference of a temporary value will keep it alive during the scope - of the reference itself. - -* Raw pointers (`*`) - : Raw pointers are pointers without safety or liveness guarantees. - Raw pointers are written as `*const T` or `*mut T`, - for example `*const i32` means a raw pointer to a 32-bit integer. - Copying or dropping a raw pointer has no effect on the lifecycle of any - other value. Dereferencing a raw pointer or converting it to any other - pointer type is an [`unsafe` operation](unsafe-functions.html). - Raw pointers are generally discouraged in Rust code; - they exist to support interoperability with foreign code, - and writing performance-critical or low-level functions. +All pointers in Rust are explicit first-class values. They can be moved or +copied, stored into data structs, and returned from functions. + +### Shared references (`&`) + +These point to memory _owned by some other value_. When a shared reference to a +value is created it prevents direct mutation of the value. [Interior +mutability](interior-mutability.html) provides an exception for this in certain +circumstances. As the name suggests, any number of shared references to a value +may exit. A shared reference type is written `&type`, or `&'a type` when you +need to specify an explicit lifetime. Copying a reference is a "shallow" +operation: it involves only copying the pointer itself, that is, pointers are +`Copy`. Releasing a reference has no effect on the value it points to, but +referencing of a [temporary value](expressions.html#temporary-lifetimes) will +keep it alive during the scope of the reference itself. + +### Mutable references (`&mut`) + +These also point to memory owned by some other value. A mutable reference type +is written `&mut type` or `&'a mut type`. A mutable reference (that hasn't been +borrowed) is the only way to access the value it points to, so is not `Copy`. + +### Raw pointers (`*const` and `*mut`) + +Raw pointers are pointers without safety or liveness guarantees. Raw pointers +are written as `*const T` or `*mut T`, for example `*const i32` means a raw +pointer to a 32-bit integer. Copying or dropping a raw pointer has no effect on +the lifecycle of any other value. Dereferencing a raw pointer is an [`unsafe` +operation](unsafe-functions.html), this can also be used to convert a raw +pointer to a reference by reborrowing it (`&*` or `&mut *`). Raw pointers are +generally discouraged in Rust code; they exist to support interoperability with +foreign code, and writing performance-critical or low-level functions. + +When comparing pointers they are compared by their address, rather than by what +they point to. When comparing pointers to [dynamically sized +types](dynamically-sized-types.html) they also have their addition data +compared. + +### Smart Pointers The standard library contains additional 'smart pointer' types beyond references and raw pointers. ## Function item types -When referred to, a function item yields a zero-sized value of its -_function item type_. That type explicitly identifies the function - its name, -its type arguments, and its early-bound lifetime arguments (but not its -late-bound lifetime arguments, which are only assigned when the function -is called) - so the value does not need to contain an actual function pointer, -and no indirection is needed when the function is called. +When referred to, a function item, or the constructor of a tuple-like struct or +enum variant, yields a zero-sized value of its _function item type_. That type +explicitly identifies the function - its name, its type arguments, and its +early-bound lifetime arguments (but not its late-bound lifetime arguments, +which are only assigned when the function is called) - so the value does not +need to contain an actual function pointer, and no indirection is needed when +the function is called. There is currently no syntax that directly refers to a function item type, but -the compiler will display the type as something like `fn() {foo::}` in error -messages. +the compiler will display the type as something like `fn() {foo::}` in +error messages. Because the function item type explicitly identifies the function, the item types of different functions - different items, or the same item with different generics - are distinct, and mixing them will create a type error: -```rust,ignore +```rust,compile_fail,E0308 fn foo() { } let x = &mut foo::; *x = foo::; //~ ERROR mismatched types ``` -However, there is a [coercion] from function items to [function pointers](#function-pointer-types) -with the same signature, which is triggered not only when a function item -is used when a function pointer is directly expected, but also when different -function item types with the same signature meet in different arms of the same -`if` or `match`: +However, there is a [coercion] from function items to [function +pointers](#function-pointer-types) with the same signature, which is triggered +not only when a function item is used when a function pointer is directly +expected, but also when different function item types with the same signature +meet in different arms of the same `if` or `match`: [coercion]: type-coercions.html @@ -278,16 +340,16 @@ let foo_ptr_2 = if want_i32 { ## Function pointer types -Function pointer types, created using the `fn` type constructor, refer -to a function whose identity is not necessarily known at compile-time. They -can be created via a coercion from both [function items](#function-item-types) -and non-capturing [closures](#closure-types). +Function pointer types, written using the `fn` keyword, refer to a function +whose identity is not necessarily known at compile-time. They can be created +via a coercion from both [function items](#function-item-types) and +non-capturing [closures](#closure-types). A function pointer type consists of a possibly-empty set of function-type modifiers (such as `unsafe` or `extern`), a sequence of input types and an output type. -An example of a `fn` type: +An example where `Binop` is defined as a function pointer type: ```rust fn add(x: i32, y: i32) -> i32 { @@ -303,30 +365,31 @@ x = bo(5,7); ## Closure types -A [closure expression](expressions.html#closure-expressions) produces a closure +A [closure expression](expressions/closure-expr.html) produces a closure value with a unique, anonymous type that cannot be written out. Depending on the requirements of the closure, its type implements one or more of the closure traits: * `FnOnce` - : The closure can be called once. A closure called as `FnOnce` - can move out values from its environment. + : The closure can be called once. A closure called as `FnOnce` can move out + of its captured values. * `FnMut` : The closure can be called multiple times as mutable. A closure called as `FnMut` can mutate values from its environment. `FnMut` inherits from `FnOnce` (i.e. anything implementing `FnMut` also implements `FnOnce`). -* `Fn` - : The closure can be called multiple times through a shared reference. - A closure called as `Fn` can neither move out from nor mutate values - from its environment, but read-only access to such values is allowed. - `Fn` inherits from `FnMut`, which itself inherits from `FnOnce`. +* `Fn` : The closure can be called multiple times through a shared reference. A + closure called as `Fn` can neither move out from nor mutate captured + variables, but read-only access to such values is allowed. Using `move` to + capture variables by value is allowed so long as they aren't mutated or + moved in the body of the closure. `Fn` inherits from `FnMut`, which itself + inherits from `FnOnce`. -Closures that don't use anything from their environment ("non capturing closures") -can be coerced to function pointers (`fn`) with the matching signature. -To adopt the example from the section above: +Closures that don't use anything from their environment ("non capturing +closures") can be coerced to function pointers (`fn`) with the matching +signature. To adopt the example from the section above: ```rust let add = |x, y| x + y; @@ -340,8 +403,12 @@ x = bo(5,7); ## Trait objects -In Rust, a type like `&SomeTrait` or `Box` is called a _trait object_. -Each instance of a trait object includes: +In Rust, trait names also refer to [dynamically sized types] called _trait +objects_. Like all DSTs, trait +objects are used behind some kind of pointer: `&SomeTrait` or `Box`. +Each instance of a pointer to a trait object includes: + +[dynamically sized types]: dynamically-sized-types.html - a pointer to an instance of a type `T` that implements `SomeTrait` - a _virtual method table_, often just called a _vtable_, which contains, for @@ -354,11 +421,18 @@ function pointer is loaded from the trait object vtable and invoked indirectly. The actual implementation for each vtable entry can vary on an object-by-object basis. -Note that for a trait object to be instantiated, the trait must be -_object-safe_. Object safety rules are defined in [RFC 255]. +Note that trait object types only exist for _object-safe_ traits ([RFC 255]): [RFC 255]: https://github.com/rust-lang/rfcs/blob/master/text/0255-object-safety.md +* It must not require `Self: Sized` +* All associated functions must either have a `where Self: Sized` bound or + * Not have any type parameters (lifetime parameters are allowed) + * Must be a method: its first parameter must be called self, with type + `Self`, `&Self`, `&mut Self`, `Box`. + * `Self` may only be used in the type of the receiver. +* It must not have any associated constants. + Given a pointer-typed expression `E` of type `&T` or `Box`, where `T` implements trait `R`, casting `E` to the corresponding pointer type `&R` or `Box` results in a value of the _trait object_ `R`. This result is @@ -396,28 +470,45 @@ These were defined in [RFC 599] and amended in [RFC 1156]. [RFC 599]: https://github.com/rust-lang/rfcs/blob/master/text/0599-default-object-bound.md [RFC 1156]: https://github.com/rust-lang/rfcs/blob/master/text/1156-adjust-default-object-bounds.md -For traits that themselves have no lifetime parameters, the default bound is -based on what kind of trait object is used: +For traits that themselves have no lifetime parameters: +* If there is a unique bound from the containing type then that is the default +* If there is more than one bound from the containing type then an explicit bound must + be specified +* Otherwise the default bound is `'static` ```rust,ignore // For the following trait... trait Foo { } -// ...these two are the same: +// These two are the same as Box has no lifetime bound on T Box Box // ...and so are these: +impl Foo {} +impl Foo + 'static {} + +// ...so are these, because &'a T requires T: 'a &'a Foo &'a (Foo + 'a) + +// std::cell::Ref<'a, T> also requires T: 'a, so these are the same +std::cell::Ref<'a, Foo> +std::cell::Ref<'a, Foo + 'a> + +// This is an error: +struct TwoBounds<'a, 'b, T: ?Sized + 'a + 'b> +TwoBounds<'a, 'b, Foo> // Error: the lifetime bound for this object type cannot + // be deduced from context + ``` The `+ 'static` and `+ 'a` refer to the default bounds of those kinds of trait objects, and also to how you can directly override them. Note that the innermost object sets the bound, so `&'a Box` is still `&'a Box`. -For traits that have lifetime parameters of their own, the default bound is -based on that lifetime parameter: +For traits that have a single lifetime _bound_ of their own then, instead of +infering 'static as the default bound, the bound on the trait is used instead ```rust,ignore // For the following trait... @@ -426,15 +517,17 @@ trait Bar<'a>: 'a { } // ...these two are the same: Box> Box + 'a> -``` -The default for user-defined trait objects is based on the object type itself. -If a type parameter has a lifetime bound, then that lifetime bound becomes the -default bound for trait objects of that type. For example, `std::cell::Ref<'a, -T>` contains a `T: 'a` bound, therefore trait objects of type `Ref<'a, -SomeTrait>` are the same as `Ref<'a, (SomeTrait + 'a)>`. +// ...and so are these: +impl<'a> Foo<'a> {} +impl<'a> Foo<'a> + 'a {} + +// This is still an error: +struct TwoBounds<'a, 'b, T: ?Sized + 'a + 'b> +TwoBounds<'a, 'b, Foo<'c>> +``` -### Type parameters +## Type parameters Within the body of an item that has type parameter declarations, the names of its type parameters are types: @@ -451,14 +544,13 @@ fn to_vec(xs: &[A]) -> Vec
{ } ``` -Here, `first` has type `A`, referring to `to_vec`'s `A` type parameter; and `rest` -has type `Vec`, a vector with element type `A`. +Here, `first` has type `A`, referring to `to_vec`'s `A` type parameter; and +`rest` has type `Vec`, a vector with element type `A`. ## Self types -The special type `Self` has a meaning within traits and impls. In a trait definition, it refers -to an implicit type parameter representing the "implementing" type. In an impl, -it is an alias for the implementing type. For example, in: +The special type `Self` has a meaning within traits and impls: it refers to +the implementing type. For example, in: ```rust pub trait From { @@ -472,8 +564,8 @@ impl From for String { } ``` -The notation `Self` in the impl refers to the implementing type: `String`. In another -example: +The notation `Self` in the impl refers to the implementing type: `String`. In +another example: ```rust trait Printable { @@ -487,6 +579,4 @@ impl Printable for String { } ``` -The notation `&self` is a shorthand for `self: &Self`. In this case, -in the impl, `Self` refers to the value of type `String` that is the -receiver for a call to the method `make_string`. +The notation `&self` is a shorthand for `self: &Self`. diff --git a/src/doc/reference/src/unsafety.md b/src/doc/reference/src/unsafety.md index abb7a9eec5..a68353aede 100644 --- a/src/doc/reference/src/unsafety.md +++ b/src/doc/reference/src/unsafety.md @@ -7,5 +7,8 @@ The following language level features cannot be used in the safe subset of Rust: - Dereferencing a [raw pointer](types.html#pointer-types). -- Reading or writing a [mutable static variable](items.html#mutable-statics). +- Reading or writing a [mutable static variable](items/static-items.html#mutable-statics). +- Reading a field of a [`union`](items/unions.html), or writing to a field of a + union that isn't [`Copy`](the-copy-trait.html). - Calling an unsafe function (including an intrinsic or foreign function). +- Implementing an unsafe trait. diff --git a/src/doc/reference/theme/index.hbs b/src/doc/reference/theme/index.hbs new file mode 100644 index 0000000000..4af25fc10d --- /dev/null +++ b/src/doc/reference/theme/index.hbs @@ -0,0 +1,227 @@ + + + + + {{ title }} + + + + + + + + + + + + + + + + + + + + + + {{#each additional_css}} + + {{/each}} + + {{#if mathjax_support}} + + + {{/if}} + + + + + + + + + + + + + + {{#each additional_js}} + + {{/each}} + + + + + + + + + + + +
+ + + + {{#previous}} + + {{/previous}} + + {{#next}} + + {{/next}} + +
+ + + + + + + {{{livereload}}} + + {{#if google_analytics}} + + {{/if}} + + {{#if playpens_editable}} + + + + + + {{/if}} + + {{#if is_print}} + + {{/if}} + + + + + diff --git a/src/doc/rustdoc/src/documentation-tests.md b/src/doc/rustdoc/src/documentation-tests.md index 4f7736d8df..eb3e6a9dd5 100644 --- a/src/doc/rustdoc/src/documentation-tests.md +++ b/src/doc/rustdoc/src/documentation-tests.md @@ -5,7 +5,7 @@ that your tests are up to date and working. The basic idea is this: -```rust,ignore +```ignore /// # Examples /// /// ``` @@ -16,6 +16,19 @@ The basic idea is this: The triple backticks start and end code blocks. If this were in a file named `foo.rs`, running `rustdoc --test foo.rs` will extract this example, and then run it as a test. +Please note that by default, if no language is set for the block code, `rustdoc` +assumes it is `Rust` code. So the following: + +```rust +let x = 5; +``` + +is strictly equivalent to: + +``` +let x = 5; +``` + There's some subtlety though! Read on for more details. ## Pre-processing examples @@ -106,7 +119,7 @@ our source code: ```text First, we set `x` to five: - ```rust + ``` let x = 5; # let y = 6; # println!("{}", x + y); @@ -114,7 +127,7 @@ our source code: Next, we set `y` to six: - ```rust + ``` # let x = 5; let y = 6; # println!("{}", x + y); @@ -122,7 +135,7 @@ our source code: Finally, we print the sum of `x` and `y`: - ```rust + ``` # let x = 5; # let y = 6; println!("{}", x + y); @@ -136,7 +149,7 @@ explanation. Another case where the use of `#` is handy is when you want to ignore error handling. Lets say you want the following, -```rust,ignore +```ignore /// use std::io; /// let mut input = String::new(); /// io::stdin().read_line(&mut input)?; @@ -145,7 +158,7 @@ error handling. Lets say you want the following, The problem is that `?` returns a `Result` and test functions don't return anything so this will give a mismatched types error. -```rust,ignore +```ignore /// A doc test using ? /// /// ``` @@ -179,7 +192,7 @@ Here’s an example of documenting a macro: /// # } /// ``` /// -/// ```rust,should_panic +/// ```should_panic /// # #[macro_use] extern crate foo; /// # fn main() { /// panic_unless!(true == false, “I’m broken.”); @@ -224,7 +237,7 @@ only shows the part you care about. `should_panic` tells `rustdoc` that the code should compile correctly, but not actually pass as a test. -```rust +```text /// ```no_run /// loop { /// println!("Hello, world"); @@ -233,6 +246,18 @@ not actually pass as a test. # fn foo() {} ``` +`compile_fail` tells `rustdoc` that the compilation should fail. If it +compiles, then the test will fail. However please note that code failing +with the current Rust release may work in a future release, as new features +are added. + +```text +/// ```compile_fail +/// let x = 5; +/// x += 2; // shouldn't compile! +/// ``` +``` + The `no_run` attribute will compile your code, but not run it. This is important for examples such as "Here's how to retrieve a web page," which you would want to ensure compiles, but might be run in a test diff --git a/src/doc/unstable-book/src/language-features/doc-masked.md b/src/doc/unstable-book/src/language-features/doc-masked.md new file mode 100644 index 0000000000..609939bfc2 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/doc-masked.md @@ -0,0 +1,24 @@ +# `doc_masked` + +The tracking issue for this feature is: [#44027] + +----- + +The `doc_masked` feature allows a crate to exclude types from a given crate from appearing in lists +of trait implementations. The specifics of the feature are as follows: + +1. When rustdoc encounters an `extern crate` statement annotated with a `#[doc(masked)]` attribute, + it marks the crate as being masked. + +2. When listing traits a given type implements, rustdoc ensures that traits from masked crates are + not emitted into the documentation. + +3. When listing types that implement a given trait, rustdoc ensures that types from masked crates + are not emitted into the documentation. + +This feature was introduced in PR [#44026] to ensure that compiler-internal and +implementation-specific types and traits were not included in the standard library's documentation. +Such types would introduce broken links into the documentation. + +[#44026]: https://github.com/rust-lang/rust/pull/44026 +[#44027]: https://github.com/rust-lang/rust/pull/44027 diff --git a/src/doc/unstable-book/src/language-features/fn-must-use.md b/src/doc/unstable-book/src/language-features/fn-must-use.md new file mode 100644 index 0000000000..71b6cd663a --- /dev/null +++ b/src/doc/unstable-book/src/language-features/fn-must-use.md @@ -0,0 +1,30 @@ +# `fn_must_use` + +The tracking issue for this feature is [#43302]. + +[#43302]: https://github.com/rust-lang/rust/issues/43302 + +------------------------ + +The `fn_must_use` feature allows functions and methods to be annotated with +`#[must_use]`, indicating that the `unused_must_use` lint should require their +return values to be used (similarly to how types annotated with `must_use`, +most notably `Result`, are linted if not used). + +## Examples + +```rust +#![feature(fn_must_use)] + +#[must_use] +fn double(x: i32) -> i32 { + 2 * x +} + +fn main() { + double(4); // warning: unused return value of `double` which must be used + + let _ = double(4); // (no warning) +} + +``` diff --git a/src/doc/unstable-book/src/language-features/generators.md b/src/doc/unstable-book/src/language-features/generators.md new file mode 100644 index 0000000000..7a559a7bec --- /dev/null +++ b/src/doc/unstable-book/src/language-features/generators.md @@ -0,0 +1,245 @@ +# `generators` + +The tracking issue for this feature is: [#43122] + +[#43122]: https://github.com/rust-lang/rust/issues/43122 + +------------------------ + +The `generators` feature gate in Rust allows you to define generator or +coroutine literals. A generator is a "resumable function" that syntactically +resembles a closure but compiles to much different semantics in the compiler +itself. The primary feature of a generator is that it can be suspended during +execution to be resumed at a later date. Generators use the `yield` keyword to +"return", and then the caller can `resume` a generator to resume execution just +after the `yield` keyword. + +Generators are an extra-unstable feature in the compiler right now. Added in +[RFC 2033] they're mostly intended right now as a information/constraint +gathering phase. The intent is that experimentation can happen on the nightly +compiler before actual stabilization. A further RFC will be required to +stabilize generators/coroutines and will likely contain at least a few small +tweaks to the overall design. + +[RFC 2033]: https://github.com/rust-lang/rfcs/pull/2033 + +A syntactical example of a generator is: + +```rust +#![feature(generators, generator_trait)] + +use std::ops::{Generator, GeneratorState}; + +fn main() { + let mut generator = || { + yield 1; + return "foo" + }; + + match generator.resume() { + GeneratorState::Yielded(1) => {} + _ => panic!("unexpected value from resume"), + } + match generator.resume() { + GeneratorState::Complete("foo") => {} + _ => panic!("unexpected value from resume"), + } +} +``` + +Generators are closure-like literals which can contain a `yield` statement. The +`yield` statement takes an optional expression of a value to yield out of the +generator. All generator literals implement the `Generator` trait in the +`std::ops` module. The `Generator` trait has one main method, `resume`, which +resumes execution of the generator at the previous suspension point. + +An example of the control flow of generators is that the following example +prints all numbers in order: + +```rust +#![feature(generators, generator_trait)] + +use std::ops::Generator; + +fn main() { + let mut generator = || { + println!("2"); + yield; + println!("4"); + }; + + println!("1"); + generator.resume(); + println!("3"); + generator.resume(); + println!("5"); +} +``` + +At this time the main intended use case of generators is an implementation +primitive for async/await syntax, but generators will likely be extended to +ergonomic implementations of iterators and other primitives in the future. +Feedback on the design and usage is always appreciated! + +### The `Generator` trait + +The `Generator` trait in `std::ops` currently looks like: + +``` +# #![feature(generator_trait)] +# use std::ops::GeneratorState; + +pub trait Generator { + type Yield; + type Return; + fn resume(&mut self) -> GeneratorState; +} +``` + +The `Generator::Yield` type is the type of values that can be yielded with the +`yield` statement. The `Generator::Return` type is the returned type of the +generator. This is typically the last expression in a generator's definition or +any value passed to `return` in a generator. The `resume` function is the entry +point for executing the `Generator` itself. + +The return value of `resume`, `GeneratorState`, looks like: + +``` +pub enum GeneratorState { + Yielded(Y), + Complete(R), +} +``` + +The `Yielded` variant indicates that the generator can later be resumed. This +corresponds to a `yield` point in a generator. The `Complete` variant indicates +that the generator is complete and cannot be resumed again. Calling `resume` +after a generator has returned `Complete` will likely result in a panic of the +program. + +### Closure-like semantics + +The closure-like syntax for generators alludes to the fact that they also have +closure-like semantics. Namely: + +* When created, a generator executes no code. A closure literal does not + actually execute any of the closure's code on construction, and similarly a + generator literal does not execute any code inside the generator when + constructed. + +* Generators can capture outer variables by reference or by move, and this can + be tweaked with the `move` keyword at the beginning of the closure. Like + closures all generators will have an implicit environment which is inferred by + the compiler. Outer variables can be moved into a generator for use as the + generator progresses. + +* Generator literals produce a value with a unique type which implements the + `std::ops::Generator` trait. This allows actual execution of the generator + through the `Generator::resume` method as well as also naming it in return + types and such. + +* Traits like `Send` and `Sync` are automatically implemented for a `Generator` + depending on the captured variables of the environment. Unlike closures though + generators also depend on variables live across suspension points. This means + that although the ambient environment may be `Send` or `Sync`, the generator + itself may not be due to internal variables live across `yield` points being + not-`Send` or not-`Sync`. Note, though, that generators, like closures, do + not implement traits like `Copy` or `Clone` automatically. + +* Whenever a generator is dropped it will drop all captured environment + variables. + +Note that unlike closures generators at this time cannot take any arguments. +That is, generators must always look like `|| { ... }`. This restriction may be +lifted at a future date, the design is ongoing! + +### Generators as state machines + +In the compiler generators are currently compiled as state machines. Each +`yield` expression will correspond to a different state that stores all live +variables over that suspension point. Resumption of a generator will dispatch on +the current state and then execute internally until a `yield` is reached, at +which point all state is saved off in the generator and a value is returned. + +Let's take a look at an example to see what's going on here: + +```rust +#![feature(generators, generator_trait)] + +use std::ops::Generator; + +fn main() { + let ret = "foo"; + let mut generator = move || { + yield 1; + return ret + }; + + generator.resume(); + generator.resume(); +} +``` + +This generator literal will compile down to something similar to: + +```rust +#![feature(generators, generator_trait)] + +use std::ops::{Generator, GeneratorState}; + +fn main() { + let ret = "foo"; + let mut generator = { + enum __Generator { + Start(&'static str), + Yield1(&'static str), + Done, + } + + impl Generator for __Generator { + type Yield = i32; + type Return = &'static str; + + fn resume(&mut self) -> GeneratorState { + use std::mem; + match mem::replace(self, __Generator::Done) { + __Generator::Start(s) => { + *self = __Generator::Yield1(s); + GeneratorState::Yielded(1) + } + + __Generator::Yield1(s) => { + *self = __Generator::Done; + GeneratorState::Complete(s) + } + + __Generator::Done => { + panic!("generator resumed after completion") + } + } + } + } + + __Generator::Start(ret) + }; + + generator.resume(); + generator.resume(); +} +``` + +Notably here we can see that the compiler is generating a fresh type, +`__Generator` in this case. This type has a number of states (represented here +as an `enum`) corresponding to each of the conceptual states of the generator. +At the beginning we're closing over our outer variable `foo` and then that +variable is also live over the `yield` point, so it's stored in both states. + +When the generator starts it'll immediately yield 1, but it saves off its state +just before it does so indicating that it has reached the yield point. Upon +resuming again we'll execute the `return ret` which returns the `Complete` +state. + +Here we can also note that the `Done` state, if resumed, panics immediately as +it's invalid to resume a completed generator. It's also worth noting that this +is just a rough desugaring, not a normative specification for what the compiler +does. diff --git a/src/doc/unstable-book/src/language-features/inclusive-range-syntax.md b/src/doc/unstable-book/src/language-features/inclusive-range-syntax.md index 255445c318..56f5880315 100644 --- a/src/doc/unstable-book/src/language-features/inclusive-range-syntax.md +++ b/src/doc/unstable-book/src/language-features/inclusive-range-syntax.md @@ -7,13 +7,13 @@ The tracking issue for this feature is: [#28237] ------------------------ To get a range that goes from 0 to 10 and includes the value 10, you -can write `0...10`: +can write `0..=10`: ```rust #![feature(inclusive_range_syntax)] fn main() { - for i in 0...10 { + for i in 0..=10 { println!("{}", i); } } diff --git a/src/doc/unstable-book/src/language-features/match-beginning-vert.md b/src/doc/unstable-book/src/language-features/match-beginning-vert.md new file mode 100644 index 0000000000..f0a51af7fd --- /dev/null +++ b/src/doc/unstable-book/src/language-features/match-beginning-vert.md @@ -0,0 +1,23 @@ +# `match_beginning_vert` + +The tracking issue for this feature is [#44101]. + +With this feature enabled, you are allowed to add a '|' to the beginning of a +match arm: + +```rust +#![feature(match_beginning_vert)] + +enum Foo { A, B, C } + +fn main() { + let x = Foo::A; + match x { + | Foo::A + | Foo::B => println!("AB"), + | Foo::C => println!("C"), + } +} +``` + +[#44101]: https://github.com/rust-lang/rust/issues/44101 \ No newline at end of file diff --git a/src/doc/unstable-book/src/language-features/match_default_bindings.md b/src/doc/unstable-book/src/language-features/match_default_bindings.md new file mode 100644 index 0000000000..cc542931cb --- /dev/null +++ b/src/doc/unstable-book/src/language-features/match_default_bindings.md @@ -0,0 +1,58 @@ +# `match_default_bindings` + +The tracking issue for this feature is: [#42640] + +[#42640]: https://github.com/rust-lang/rust/issues/42640 + +------------------------ + +Match default bindings (also called "default binding modes in match") improves ergonomics for +pattern-matching on references by introducing automatic dereferencing (and a corresponding shift +in binding modes) for large classes of patterns that would otherwise not compile. + +For example, under match default bindings, + +```rust +#![feature(match_default_bindings)] + +fn main() { + let x: &Option<_> = &Some(0); + + match x { + Some(y) => { + println!("y={}", *y); + }, + None => {}, + } +} +``` + +compiles and is equivalent to either of the below: + +```rust +fn main() { + let x: &Option<_> = &Some(0); + + match *x { + Some(ref y) => { + println!("y={}", *y); + }, + None => {}, + } +} +``` + +or + +```rust +fn main() { + let x: &Option<_> = &Some(0); + + match x { + &Some(ref y) => { + println!("y={}", *y); + }, + &None => {}, + } +} +``` diff --git a/src/doc/unstable-book/src/language-features/trace-macros.md b/src/doc/unstable-book/src/language-features/trace-macros.md new file mode 100644 index 0000000000..41aa286e69 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/trace-macros.md @@ -0,0 +1,39 @@ +# `trace_macros` + +The tracking issue for this feature is [#29598]. + +[#29598]: https://github.com/rust-lang/rust/issues/29598 + +------------------------ + +With `trace_macros` you can trace the expansion of macros in your code. + +## Examples + +```rust +#![feature(trace_macros)] + +fn main() { + trace_macros!(true); + println!("Hello, Rust!"); + trace_macros!(false); +} +``` + +The `cargo build` output: + +```txt +note: trace_macro + --> src/main.rs:5:5 + | +5 | println!("Hello, Rust!"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: expanding `println! { "Hello, Rust!" }` + = note: to `print ! ( concat ! ( "Hello, Rust!" , "\n" ) )` + = note: expanding `print! { concat ! ( "Hello, Rust!" , "\n" ) }` + = note: to `$crate :: io :: _print ( format_args ! ( concat ! ( "Hello, Rust!" , "\n" ) ) + )` + + Finished dev [unoptimized + debuginfo] target(s) in 0.60 secs +``` diff --git a/src/doc/unstable-book/src/library-features/entry-and-modify.md b/src/doc/unstable-book/src/library-features/entry-and-modify.md new file mode 100644 index 0000000000..1280c71e83 --- /dev/null +++ b/src/doc/unstable-book/src/library-features/entry-and-modify.md @@ -0,0 +1,77 @@ +# `entry_and_modify` + +The tracking issue for this feature is: [#44733] + +[#44733]: https://github.com/rust-lang/rust/issues/44733 + +------------------------ + +This introduces a new method for the Entry API of maps +(`std::collections::HashMap` and `std::collections::BTreeMap`), so that +occupied entries can be modified before any potential inserts into the +map. + +For example: + +```rust +#![feature(entry_and_modify)] +# fn main() { +use std::collections::HashMap; + +struct Foo { + new: bool, +} + +let mut map: HashMap<&str, Foo> = HashMap::new(); + +map.entry("quux") + .and_modify(|e| e.new = false) + .or_insert(Foo { new: true }); +# } +``` + +This is not possible with the stable API alone since inserting a default +_before_ modifying the `new` field would mean we would lose the default state: + +```rust +# fn main() { +use std::collections::HashMap; + +struct Foo { + new: bool, +} + +let mut map: HashMap<&str, Foo> = HashMap::new(); + +map.entry("quux").or_insert(Foo { new: true }).new = false; +# } +``` + +In the above code the `new` field will never be `true`, even though we only +intended to update that field to `false` for previously extant entries. + +To achieve the same effect as `and_modify` we would have to manually match +against the `Occupied` and `Vacant` variants of the `Entry` enum, which is +a little less user-friendly, and much more verbose: + +```rust +# fn main() { +use std::collections::HashMap; +use std::collections::hash_map::Entry; + +struct Foo { + new: bool, +} + +let mut map: HashMap<&str, Foo> = HashMap::new(); + +match map.entry("quux") { + Entry::Occupied(entry) => { + entry.into_mut().new = false; + }, + Entry::Vacant(entry) => { + entry.insert(Foo { new: true }); + }, +}; +# } +``` diff --git a/src/doc/unstable-book/src/library-features/entry-or-default.md b/src/doc/unstable-book/src/library-features/entry-or-default.md new file mode 100644 index 0000000000..f8c8a2a7a7 --- /dev/null +++ b/src/doc/unstable-book/src/library-features/entry-or-default.md @@ -0,0 +1,13 @@ +# `entry_or_default` + +The tracking issue for this feature is: [#44324] + +[#44324]: https://github.com/rust-lang/rust/issues/44324 + +------------------------ + +The `entry_or_default` feature adds a new method to `hash_map::Entry` +and `btree_map::Entry`, `or_default`, when `V: Default`. This method is +semantically identical to `or_insert_with(Default::default)`, and will +insert the default value for the type if no entry exists for the current +key. diff --git a/src/doc/unstable-book/src/library-features/splice.md b/src/doc/unstable-book/src/library-features/splice.md index 3d33f02076..2e4bb1a525 100644 --- a/src/doc/unstable-book/src/library-features/splice.md +++ b/src/doc/unstable-book/src/library-features/splice.md @@ -17,7 +17,6 @@ let mut s = String::from("α is alpha, β is beta"); let beta_offset = s.find('β').unwrap_or(s.len()); // Replace the range up until the β from the string -let t: String = s.splice(..beta_offset, "Α is capital alpha; ").collect(); -assert_eq!(t, "α is alpha, "); +s.splice(..beta_offset, "Α is capital alpha; "); assert_eq!(s, "Α is capital alpha; β is beta"); ``` diff --git a/src/etc/htmldocck.py b/src/etc/htmldocck.py index 7e8fde2034..8a11c6f7cf 100644 --- a/src/etc/htmldocck.py +++ b/src/etc/htmldocck.py @@ -99,6 +99,8 @@ There are a number of supported commands: * `@count PATH XPATH COUNT' checks for the occurrence of given XPath in the given file. The number of occurrences must match the given count. +* `@has-dir PATH` checks for the existence of the given directory. + All conditions can be negated with `!`. `@!has foo/type.NoSuch.html` checks if the given file does not exist, for example. @@ -308,6 +310,12 @@ class CachedFiles(object): self.trees[path] = tree return self.trees[path] + def get_dir(self, path): + path = self.resolve_path(path) + abspath = os.path.join(self.root, path) + if not(os.path.exists(abspath) and os.path.isdir(abspath)): + raise FailedCheck('Directory does not exist {!r}'.format(path)) + def check_string(data, pat, regexp): if not pat: @@ -407,6 +415,16 @@ def check_command(c, cache): ret = expected == found else: raise InvalidCheck('Invalid number of @{} arguments'.format(c.cmd)) + elif c.cmd == 'has-dir': # has-dir test + if len(c.args) == 1: # @has-dir = has-dir test + try: + cache.get_dir(c.args[0]) + ret = True + except FailedCheck as err: + cerr = str(err) + ret = False + else: + raise InvalidCheck('Invalid number of @{} arguments'.format(c.cmd)) elif c.cmd == 'valid-html': raise InvalidCheck('Unimplemented @valid-html') diff --git a/src/etc/platform-intrinsics/powerpc.json b/src/etc/platform-intrinsics/powerpc.json index d615037b63..acb6813887 100644 --- a/src/etc/platform-intrinsics/powerpc.json +++ b/src/etc/platform-intrinsics/powerpc.json @@ -156,6 +156,139 @@ "llvm": "vupkh{1.kind}{1.data_type_short}", "ret": "s(16-32)", "args": ["0N"] + }, + { + "intrinsic": "madds", + "width": [128], + "llvm": "vmhaddshs", + "ret": "s16", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "msumu{1.data_type_short}m", + "width": [128], + "llvm": "vmsumu{1.data_type_short}m", + "ret": "u32", + "args": ["u(8-16)", "1", "u32"] + }, + { + "intrinsic": "msummbm", + "width": [128], + "llvm": "vmsummbm", + "ret": "s32", + "args": ["s8", "u8", "s32"] + }, + { + "intrinsic": "msumshm", + "width": [128], + "llvm": "vmsumshm", + "ret": "s32", + "args": ["s16", "s16", "s32"] + }, + { + "intrinsic": "msum{0.kind}hs", + "width": [128], + "llvm": "vmsum{0.kind}hs", + "ret": "i32", + "args": ["0N", "0N", "0"] + }, + { + "intrinsic": "sum2s", + "width": [128], + "llvm": "vsum2sws", + "ret": "s32", + "args": ["0", "0"] + }, + { + "intrinsic": "sum4{0.kind}bs", + "width": [128], + "llvm": "vsum4{0.kind}bs", + "ret": "i32", + "args": ["0NN", "0"] + }, + { + "intrinsic": "sum4shs", + "width": [128], + "llvm": "vsum4shs", + "ret": "s32", + "args": ["0N", "0"] + }, + { + "intrinsic": "sums", + "width": [128], + "llvm": "vsumsws", + "ret": "s32", + "args": ["0", "0"] + }, + { + "intrinsic": "madd", + "width": [128], + "llvm": "vmaddfp", + "ret": "f32", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "nmsub", + "width": [128], + "llvm": "vnmsubfp", + "ret": "f32", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "expte", + "width": [128], + "llvm": "vexptefp", + "ret": "f32", + "args": ["0"] + }, + { + "intrinsic": "floor", + "width": [128], + "llvm": "vrfim", + "ret": "f32", + "args": ["0"] + }, + { + "intrinsic": "ceil", + "width": [128], + "llvm": "vrfip", + "ret": "f32", + "args": ["0"] + }, + { + "intrinsic": "round", + "width": [128], + "llvm": "vrfin", + "ret": "f32", + "args": ["0"] + }, + { + "intrinsic": "trunc", + "width": [128], + "llvm": "vrfiz", + "ret": "f32", + "args": ["0"] + }, + { + "intrinsic": "loge", + "width": [128], + "llvm": "vlogefp", + "ret": "f32", + "args": ["0"] + }, + { + "intrinsic": "re", + "width": [128], + "llvm": "vrefp", + "ret": "f32", + "args": ["0"] + }, + { + "intrinsic": "rsqrte", + "width": [128], + "llvm": "vrsqrtefp", + "ret": "f32", + "args": ["0"] } ] } diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index d734ae6a2c..9481cd4e1a 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -22,7 +22,7 @@ use core::borrow; use core::fmt; use core::cmp::Ordering; use core::intrinsics::abort; -use core::mem::{self, size_of_val, uninitialized}; +use core::mem::{self, align_of_val, size_of_val, uninitialized}; use core::ops::Deref; use core::ops::CoerceUnsized; use core::ptr::{self, Shared}; @@ -52,8 +52,10 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// also destroyed. /// /// Shared references in Rust disallow mutation by default, and `Arc` is no -/// exception. If you need to mutate through an `Arc`, use [`Mutex`][mutex], -/// [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types. +/// exception: you cannot generally obtain a mutable reference to something +/// inside an `Arc`. If you need to mutate through an `Arc`, use +/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic] +/// types. /// /// ## Thread Safety /// @@ -72,13 +74,13 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// first: after all, isn't the point of `Arc` thread safety? The key is /// this: `Arc` makes it thread safe to have multiple ownership of the same /// data, but it doesn't add thread safety to its data. Consider -/// `Arc>`. `RefCell` isn't [`Sync`], and if `Arc` was always -/// [`Send`], `Arc>` would be as well. But then we'd have a problem: -/// `RefCell` is not thread safe; it keeps track of the borrowing count using +/// `Arc<`[`RefCell`]`>`. [`RefCell`] isn't [`Sync`], and if `Arc` was always +/// [`Send`], `Arc<`[`RefCell`]`>` would be as well. But then we'd have a problem: +/// [`RefCell`] is not thread safe; it keeps track of the borrowing count using /// non-atomic operations. /// /// In the end, this means that you may need to pair `Arc` with some sort of -/// `std::sync` type, usually `Mutex`. +/// [`std::sync`] type, usually [`Mutex`][mutex]. /// /// ## Breaking cycles with `Weak` /// @@ -106,7 +108,7 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// // a and b both point to the same memory location as foo. /// ``` /// -/// The `Arc::clone(&from)` syntax is the most idiomatic because it conveys more explicitly +/// The [`Arc::clone(&from)`] syntax is the most idiomatic because it conveys more explicitly /// the meaning of the code. In the example above, this syntax makes it easier to see that /// this code is creating a new reference rather than copying the whole content of foo. /// @@ -141,6 +143,9 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// [upgrade]: struct.Weak.html#method.upgrade /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [assoc]: ../../book/first-edition/method-syntax.html#associated-functions +/// [`RefCell`]: ../../std/cell/struct.RefCell.html +/// [`std::sync`]: ../../std/sync/index.html +/// [`Arc::clone(&from)`]: #method.clone /// /// # Examples /// @@ -324,7 +329,9 @@ impl Arc { Ok(elem) } } +} +impl Arc { /// Consumes the `Arc`, returning the wrapped pointer. /// /// To avoid a memory leak the pointer must be converted back to an `Arc` using @@ -378,16 +385,21 @@ impl Arc { /// ``` #[stable(feature = "rc_raw", since = "1.17.0")] pub unsafe fn from_raw(ptr: *const T) -> Self { - // To find the corresponding pointer to the `ArcInner` we need to subtract the offset of the - // `data` field from the pointer. - let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner, data)); + // Align the unsized value to the end of the ArcInner. + // Because it is ?Sized, it will always be the last field in memory. + let align = align_of_val(&*ptr); + let layout = Layout::new::>(); + let offset = (layout.size() + layout.padding_needed_for(align)) as isize; + + // Reverse the offset to find the original ArcInner. + let fake_ptr = ptr as *mut ArcInner; + let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)); + Arc { - ptr: Shared::new_unchecked(ptr as *mut u8 as *mut _), + ptr: Shared::new_unchecked(arc_ptr), } } -} -impl Arc { /// Creates a new [`Weak`][weak] pointer to this value. /// /// [weak]: struct.Weak.html @@ -1491,6 +1503,28 @@ mod tests { } } + #[test] + fn test_into_from_raw_unsized() { + use std::fmt::Display; + use std::string::ToString; + + let arc: Arc = Arc::from("foo"); + + let ptr = Arc::into_raw(arc.clone()); + let arc2 = unsafe { Arc::from_raw(ptr) }; + + assert_eq!(unsafe { &*ptr }, "foo"); + assert_eq!(arc, arc2); + + let arc: Arc = Arc::new(123); + + let ptr = Arc::into_raw(arc.clone()); + let arc2 = unsafe { Arc::from_raw(ptr) }; + + assert_eq!(unsafe { &*ptr }.to_string(), "123"); + assert_eq!(arc2.to_string(), "123"); + } + #[test] fn test_cowarc_clone_make_mut() { let mut cow0 = Arc::new(75); diff --git a/src/liballoc/binary_heap.rs b/src/liballoc/binary_heap.rs index 57640af816..94bbaf92ce 100644 --- a/src/liballoc/binary_heap.rs +++ b/src/liballoc/binary_heap.rs @@ -926,7 +926,7 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { } } -// FIXME(#19839) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 82aac4dbf6..35c8530b4d 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -62,11 +62,11 @@ use core::any::Any; use core::borrow; use core::cmp::Ordering; use core::fmt; -use core::hash::{self, Hash}; +use core::hash::{self, Hash, Hasher}; use core::iter::FusedIterator; use core::marker::{self, Unsize}; use core::mem; -use core::ops::{CoerceUnsized, Deref, DerefMut}; +use core::ops::{CoerceUnsized, Deref, DerefMut, Generator, GeneratorState}; use core::ops::{BoxPlace, Boxed, InPlace, Place, Placer}; use core::ptr::{self, Unique}; use core::convert::From; @@ -456,6 +456,52 @@ impl Hash for Box { } } +#[stable(feature = "indirect_hasher_impl", since = "1.22.0")] +impl Hasher for Box { + fn finish(&self) -> u64 { + (**self).finish() + } + fn write(&mut self, bytes: &[u8]) { + (**self).write(bytes) + } + fn write_u8(&mut self, i: u8) { + (**self).write_u8(i) + } + fn write_u16(&mut self, i: u16) { + (**self).write_u16(i) + } + fn write_u32(&mut self, i: u32) { + (**self).write_u32(i) + } + fn write_u64(&mut self, i: u64) { + (**self).write_u64(i) + } + fn write_u128(&mut self, i: u128) { + (**self).write_u128(i) + } + fn write_usize(&mut self, i: usize) { + (**self).write_usize(i) + } + fn write_i8(&mut self, i: i8) { + (**self).write_i8(i) + } + fn write_i16(&mut self, i: i16) { + (**self).write_i16(i) + } + fn write_i32(&mut self, i: i32) { + (**self).write_i32(i) + } + fn write_i64(&mut self, i: i64) { + (**self).write_i64(i) + } + fn write_i128(&mut self, i: i128) { + (**self).write_i128(i) + } + fn write_isize(&mut self, i: isize) { + (**self).write_isize(i) + } +} + #[stable(feature = "from_for_ptrs", since = "1.6.0")] impl From for Box { fn from(t: T) -> Self { @@ -482,9 +528,7 @@ impl<'a> From<&'a str> for Box { #[stable(feature = "boxed_str_conv", since = "1.19.0")] impl From> for Box<[u8]> { fn from(s: Box) -> Self { - unsafe { - mem::transmute(s) - } + unsafe { Box::from_raw(Box::into_raw(s) as *mut [u8]) } } } @@ -784,3 +828,14 @@ impl AsMut for Box { &mut **self } } + +#[unstable(feature = "generator_trait", issue = "43122")] +impl Generator for Box + where T: Generator + ?Sized +{ + type Yield = T::Yield; + type Return = T::Return; + fn resume(&mut self) -> GeneratorState { + (**self).resume() + } +} diff --git a/src/liballoc/btree/map.rs b/src/liballoc/btree/map.rs index f733c3332e..b114dc640f 100644 --- a/src/liballoc/btree/map.rs +++ b/src/liballoc/btree/map.rs @@ -2102,6 +2102,67 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { Vacant(ref entry) => entry.key(), } } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// #![feature(entry_and_modify)] + /// use std::collections::BTreeMap; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// + /// map.entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[unstable(feature = "entry_and_modify", issue = "44733")] + pub fn and_modify(self, mut f: F) -> Self + where F: FnMut(&mut V) + { + match self { + Occupied(mut entry) => { + f(entry.get_mut()); + Occupied(entry) + }, + Vacant(entry) => Vacant(entry), + } + } +} + +impl<'a, K: Ord, V: Default> Entry<'a, K, V> { + #[unstable(feature = "entry_or_default", issue = "44324")] + /// Ensures a value is in the entry by inserting the default value if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// #![feature(entry_or_default)] + /// # fn main() { + /// use std::collections::BTreeMap; + /// + /// let mut map: BTreeMap<&str, Option> = BTreeMap::new(); + /// map.entry("poneyland").or_default(); + /// + /// assert_eq!(map["poneyland"], None); + /// # } + /// ``` + pub fn or_default(self) -> &'a mut V { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(Default::default()), + } + } + } impl<'a, K: Ord, V> VacantEntry<'a, K, V> { diff --git a/src/liballoc/btree/set.rs b/src/liballoc/btree/set.rs index d32460da93..7da6371cc1 100644 --- a/src/liballoc/btree/set.rs +++ b/src/liballoc/btree/set.rs @@ -1110,15 +1110,13 @@ impl<'a, T: Ord> Iterator for Union<'a, T> { type Item = &'a T; fn next(&mut self) -> Option<&'a T> { - loop { - match cmp_opt(self.a.peek(), self.b.peek(), Greater, Less) { - Less => return self.a.next(), - Equal => { - self.b.next(); - return self.a.next(); - } - Greater => return self.b.next(), + match cmp_opt(self.a.peek(), self.b.peek(), Greater, Less) { + Less => self.a.next(), + Equal => { + self.b.next(); + self.a.next() } + Greater => self.b.next(), } } diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 8b0dd4e56c..d51aaa23c6 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -80,6 +80,7 @@ #![cfg_attr(not(test), feature(core_float))] #![cfg_attr(not(test), feature(exact_size_is_empty))] #![cfg_attr(not(test), feature(slice_rotate))] +#![cfg_attr(not(test), feature(generator_trait))] #![cfg_attr(test, feature(rand, test))] #![feature(allow_internal_unstable)] #![feature(box_patterns)] @@ -97,6 +98,7 @@ #![feature(generic_param_attrs)] #![feature(i128_type)] #![feature(inclusive_range)] +#![feature(iter_rfold)] #![feature(lang_items)] #![feature(needs_allocator)] #![feature(nonzero)] @@ -120,7 +122,7 @@ #![feature(unsize)] #![feature(allocator_internals)] -#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol, swap_with_slice))] +#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol, swap_with_slice, i128))] #![cfg_attr(test, feature(test, box_heap))] // Allow testing this library diff --git a/src/liballoc/linked_list.rs b/src/liballoc/linked_list.rs index f9512cbe97..f897feb7af 100644 --- a/src/liballoc/linked_list.rs +++ b/src/liballoc/linked_list.rs @@ -80,7 +80,7 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { } } -// FIXME #19839: deriving is too aggressive on the bounds (T doesn't need to be Clone). +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Self { diff --git a/src/liballoc/macros.rs b/src/liballoc/macros.rs index 763f04fcd0..c2a3019515 100644 --- a/src/liballoc/macros.rs +++ b/src/liballoc/macros.rs @@ -8,12 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/// Creates a `Vec` containing the arguments. +/// Creates a [`Vec`] containing the arguments. /// /// `vec!` allows `Vec`s to be defined with the same syntax as array expressions. /// There are two forms of this macro: /// -/// - Create a `Vec` containing a given list of elements: +/// - Create a [`Vec`] containing a given list of elements: /// /// ``` /// let v = vec![1, 2, 3]; @@ -22,7 +22,7 @@ /// assert_eq!(v[2], 3); /// ``` /// -/// - Create a `Vec` from a given element and size: +/// - Create a [`Vec`] from a given element and size: /// /// ``` /// let v = vec![1; 3]; @@ -30,14 +30,17 @@ /// ``` /// /// Note that unlike array expressions this syntax supports all elements -/// which implement `Clone` and the number of elements doesn't have to be +/// which implement [`Clone`] and the number of elements doesn't have to be /// a constant. /// -/// This will use `clone()` to duplicate an expression, so one should be careful +/// This will use `clone` to duplicate an expression, so one should be careful /// using this with types having a nonstandard `Clone` implementation. For /// example, `vec![Rc::new(1); 5]` will create a vector of five references /// to the same boxed integer value, not five references pointing to independently /// boxed integers. +/// +/// [`Vec`]: ../std/vec/struct.Vec.html +/// [`Clone`]: ../std/clone/trait.Clone.html #[cfg(not(test))] #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] @@ -67,10 +70,22 @@ macro_rules! vec { ($($x:expr,)*) => (vec![$($x),*]) } -/// Use the syntax described in `std::fmt` to create a value of type `String`. -/// See [`std::fmt`][fmt] for more information. +/// Creates a `String` using interpolation of runtime expressions. +/// +/// The first argument `format!` recieves is a format string. This must be a string +/// literal. The power of the formatting string is in the `{}`s contained. +/// +/// Additional parameters passed to `format!` replace the `{}`s within the +/// formatting string in the order given unless named or positional parameters +/// are used, see [`std::fmt`][fmt] for more information. +/// +/// A common use for `format!` is concatenation and interpolation of strings. +/// The same convention is used with [`print!`] and [`write!`] macros, +/// depending on the intended destination of the string. /// /// [fmt]: ../std/fmt/index.html +/// [`print!`]: ../std/macro.print.html +/// [`write!`]: ../std/macro.write.html /// /// # Panics /// @@ -90,22 +105,3 @@ macro_rules! vec { macro_rules! format { ($($arg:tt)*) => ($crate::fmt::format(format_args!($($arg)*))) } - -// Private macro to get the offset of a struct field in bytes from the address of the struct. -macro_rules! offset_of { - ($container:path, $field:ident) => {{ - // Make sure the field actually exists. This line ensures that a compile-time error is - // generated if $field is accessed through a Deref impl. - let $container { $field : _, .. }; - - // Create an (invalid) instance of the container and calculate the offset to its - // field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to - // be nullptr deref. - let invalid: $container = ::core::mem::uninitialized(); - let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize; - - // Do not run destructors on the made up invalid instance. - ::core::mem::forget(invalid); - offset as isize - }}; -} diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 47f537caf3..2f8620cc75 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -19,7 +19,7 @@ //! given value is destroyed, the pointed-to value is also destroyed. //! //! Shared references in Rust disallow mutation by default, and [`Rc`] -//! is no exception: you cannot obtain a mutable reference to +//! is no exception: you cannot generally obtain a mutable reference to //! something inside an [`Rc`]. If you need mutability, put a [`Cell`] //! or [`RefCell`] inside the [`Rc`]; see [an example of mutability //! inside an Rc][mutability]. @@ -244,6 +244,7 @@ use boxed::Box; #[cfg(test)] use std::boxed::Box; +use core::any::Any; use core::borrow; use core::cell::Cell; use core::cmp::Ordering; @@ -252,7 +253,7 @@ use core::hash::{Hash, Hasher}; use core::intrinsics::abort; use core::marker; use core::marker::Unsize; -use core::mem::{self, forget, size_of_val, uninitialized}; +use core::mem::{self, align_of_val, forget, size_of_val, uninitialized}; use core::ops::Deref; use core::ops::CoerceUnsized; use core::ptr::{self, Shared}; @@ -358,7 +359,9 @@ impl Rc { Err(this) } } +} +impl Rc { /// Consumes the `Rc`, returning the wrapped pointer. /// /// To avoid a memory leak the pointer must be converted back to an `Rc` using @@ -412,17 +415,21 @@ impl Rc { /// ``` #[stable(feature = "rc_raw", since = "1.17.0")] pub unsafe fn from_raw(ptr: *const T) -> Self { - // To find the corresponding pointer to the `RcBox` we need to subtract the offset of the - // `value` field from the pointer. + // Align the unsized value to the end of the RcBox. + // Because it is ?Sized, it will always be the last field in memory. + let align = align_of_val(&*ptr); + let layout = Layout::new::>(); + let offset = (layout.size() + layout.padding_needed_for(align)) as isize; + + // Reverse the offset to find the original RcBox. + let fake_ptr = ptr as *mut RcBox; + let rc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)); - let ptr = (ptr as *const u8).offset(-offset_of!(RcBox, value)); Rc { - ptr: Shared::new_unchecked(ptr as *mut u8 as *mut _) + ptr: Shared::new_unchecked(rc_ptr), } } -} -impl Rc { /// Creates a new [`Weak`][weak] pointer to this value. /// /// [weak]: struct.Weak.html @@ -608,6 +615,46 @@ impl Rc { } } +impl Rc { + #[inline] + #[unstable(feature = "rc_downcast", issue = "44608")] + /// Attempt to downcast the `Rc` to a concrete type. + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_downcast)] + /// use std::any::Any; + /// use std::rc::Rc; + /// + /// fn print_if_string(value: Rc) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// fn main() { + /// let my_string = "Hello World".to_string(); + /// print_if_string(Rc::new(my_string)); + /// print_if_string(Rc::new(0i8)); + /// } + /// ``` + pub fn downcast(self) -> Result, Rc> { + if (*self).is::() { + // avoid the pointer arithmetic in from_raw + unsafe { + let raw: *const RcBox = self.ptr.as_ptr(); + forget(self); + Ok(Rc { + ptr: Shared::new_unchecked(raw as *const RcBox as *mut _), + }) + } + } else { + Err(self) + } + } +} + impl Rc { // Allocates an `RcBox` with sufficient space for an unsized value unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox { @@ -1481,6 +1528,28 @@ mod tests { } } + #[test] + fn test_into_from_raw_unsized() { + use std::fmt::Display; + use std::string::ToString; + + let rc: Rc = Rc::from("foo"); + + let ptr = Rc::into_raw(rc.clone()); + let rc2 = unsafe { Rc::from_raw(ptr) }; + + assert_eq!(unsafe { &*ptr }, "foo"); + assert_eq!(rc, rc2); + + let rc: Rc = Rc::new(123); + + let ptr = Rc::into_raw(rc.clone()); + let rc2 = unsafe { Rc::from_raw(ptr) }; + + assert_eq!(unsafe { &*ptr }.to_string(), "123"); + assert_eq!(rc2.to_string(), "123"); + } + #[test] fn get_mut() { let mut x = Rc::new(3); @@ -1696,6 +1765,26 @@ mod tests { assert_eq!(&r[..], [1, 2, 3]); } + + #[test] + fn test_downcast() { + use std::any::Any; + + let r1: Rc = Rc::new(i32::max_value()); + let r2: Rc = Rc::new("abc"); + + assert!(r1.clone().downcast::().is_err()); + + let r1i32 = r1.downcast::(); + assert!(r1i32.is_ok()); + assert_eq!(r1i32.unwrap(), Rc::new(i32::max_value())); + + assert!(r2.clone().downcast::().is_err()); + + let r2str = r2.downcast::<&'static str>(); + assert!(r2str.is_ok()); + assert_eq!(r2str.unwrap(), Rc::new("abc")); + } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs index 7787ace944..2045d5ddd9 100644 --- a/src/liballoc/slice.rs +++ b/src/liballoc/slice.rs @@ -671,10 +671,25 @@ impl [T] { /// # Examples /// /// ``` - /// let v = [10, 40, 30, 20, 50]; - /// let (v1, v2) = v.split_at(2); - /// assert_eq!([10, 40], v1); - /// assert_eq!([30, 20, 50], v2); + /// let v = [1, 2, 3, 4, 5, 6]; + /// + /// { + /// let (left, right) = v.split_at(0); + /// assert!(left == []); + /// assert!(right == [1, 2, 3, 4, 5, 6]); + /// } + /// + /// { + /// let (left, right) = v.split_at(2); + /// assert!(left == [1, 2]); + /// assert!(right == [3, 4, 5, 6]); + /// } + /// + /// { + /// let (left, right) = v.split_at(6); + /// assert!(left == [1, 2, 3, 4, 5, 6]); + /// assert!(right == []); + /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -695,26 +710,16 @@ impl [T] { /// # Examples /// /// ``` - /// let mut v = [1, 2, 3, 4, 5, 6]; - /// + /// let mut v = [1, 0, 3, 0, 5, 6]; /// // scoped to restrict the lifetime of the borrows /// { - /// let (left, right) = v.split_at_mut(0); - /// assert!(left == []); - /// assert!(right == [1, 2, 3, 4, 5, 6]); - /// } - /// - /// { /// let (left, right) = v.split_at_mut(2); - /// assert!(left == [1, 2]); - /// assert!(right == [3, 4, 5, 6]); - /// } - /// - /// { - /// let (left, right) = v.split_at_mut(6); - /// assert!(left == [1, 2, 3, 4, 5, 6]); - /// assert!(right == []); + /// assert!(left == [1, 0]); + /// assert!(right == [3, 0, 5, 6]); + /// left[1] = 2; + /// right[1] = 4; /// } + /// assert!(v == [1, 2, 3, 4, 5, 6]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index 79b2bbce2a..830128f2b9 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -297,6 +297,34 @@ impl str { /// [`str::from_utf8_mut`] function. /// /// [`str::from_utf8_mut`]: ./str/fn.from_utf8_mut.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = String::from("Hello"); + /// let bytes = unsafe { s.as_bytes_mut() }; + /// + /// assert_eq!(b"Hello", bytes); + /// ``` + /// + /// Mutability: + /// + /// ``` + /// let mut s = String::from("🗻∈🌏"); + /// + /// unsafe { + /// let bytes = s.as_bytes_mut(); + /// + /// bytes[0] = 0xF0; + /// bytes[1] = 0x9F; + /// bytes[2] = 0x8D; + /// bytes[3] = 0x94; + /// } + /// + /// assert_eq!("🍔∈🌏", s); + /// ``` #[stable(feature = "str_mut_extras", since = "1.20.0")] #[inline(always)] pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] { @@ -362,16 +390,25 @@ impl str { /// # Examples /// /// ``` - /// let mut v = String::from("🗻∈🌏"); - /// - /// assert_eq!(Some("🗻"), v.get_mut(0..4).map(|v| &*v)); - /// - /// // indices not on UTF-8 sequence boundaries - /// assert!(v.get_mut(1..).is_none()); - /// assert!(v.get_mut(..8).is_none()); + /// use std::ascii::AsciiExt; /// + /// let mut v = String::from("hello"); + /// // correct length + /// assert!(v.get_mut(0..5).is_some()); /// // out of bounds /// assert!(v.get_mut(..42).is_none()); + /// assert_eq!(Some("he"), v.get_mut(0..2).map(|v| &*v)); + /// + /// assert_eq!("hello", v); + /// { + /// let s = v.get_mut(0..2); + /// let s = s.map(|s| { + /// s.make_ascii_uppercase(); + /// &*s + /// }); + /// assert_eq!(Some("HE"), s); + /// } + /// assert_eq!("HEllo", v); /// ``` #[stable(feature = "str_checked_slicing", since = "1.20.0")] #[inline] @@ -818,6 +855,19 @@ impl str { } /// Returns an iterator of `u16` over the string encoded as UTF-16. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let text = "Zażółć gęślą jaźń"; + /// + /// let utf8_len = text.len(); + /// let utf16_len = text.encode_utf16().count(); + /// + /// assert!(utf16_len <= utf8_len); + /// ``` #[stable(feature = "encode_utf16", since = "1.8.0")] pub fn encode_utf16(&self) -> EncodeUtf16 { EncodeUtf16 { encoder: Utf16Encoder::new(self[..].chars()) } @@ -1746,6 +1796,17 @@ impl str { } /// Converts a `Box` into a `Box<[u8]>` without copying or allocating. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = "this is a string"; + /// let boxed_str = s.to_owned().into_boxed_str(); + /// let boxed_bytes = boxed_str.into_boxed_bytes(); + /// assert_eq!(*boxed_bytes, *s.as_bytes()); + /// ``` #[stable(feature = "str_box_extras", since = "1.20.0")] pub fn into_boxed_bytes(self: Box) -> Box<[u8]> { self.into() @@ -1986,10 +2047,8 @@ impl str { /// ``` #[stable(feature = "box_str", since = "1.4.0")] pub fn into_string(self: Box) -> String { - unsafe { - let slice = mem::transmute::, Box<[u8]>>(self); - String::from_utf8_unchecked(slice.into_vec()) - } + let slice = Box::<[u8]>::from(self); + unsafe { String::from_utf8_unchecked(slice.into_vec()) } } /// Create a [`String`] by repeating a string `n` times. @@ -2013,7 +2072,18 @@ impl str { /// Converts a boxed slice of bytes to a boxed string slice without checking /// that the string contains valid UTF-8. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let smile_utf8 = Box::new([226, 152, 186]); +/// let smile = unsafe { std::str::from_boxed_utf8_unchecked(smile_utf8) }; +/// +/// assert_eq!("☺", &*smile); +/// ``` #[stable(feature = "str_box_extras", since = "1.20.0")] pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box { - mem::transmute(v) + Box::from_raw(Box::into_raw(v) as *mut str) } diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs index db8b4f1f23..6d0bb264df 100644 --- a/src/liballoc/string.rs +++ b/src/liballoc/string.rs @@ -622,6 +622,13 @@ impl String { /// Decode a UTF-16 encoded slice `v` into a `String`, replacing /// invalid data with the replacement character (U+FFFD). /// + /// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`], + /// `from_utf16_lossy` returns a `String` since the UTF-16 to UTF-8 + /// conversion requires a memory allocation. + /// + /// [`from_utf8_lossy`]: #method.from_utf8_lossy + /// [`Cow<'a, str>`]: ../borrow/enum.Cow.html + /// /// # Examples /// /// Basic usage: @@ -743,13 +750,38 @@ impl String { } /// Extracts a string slice containing the entire string. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = String::from("foo"); + /// + /// assert_eq!("foo", s.as_str()); + /// ``` #[inline] #[stable(feature = "string_as_str", since = "1.7.0")] pub fn as_str(&self) -> &str { self } - /// Extracts a string slice containing the entire string. + /// Converts a `String` into a mutable string slice. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::ascii::AsciiExt; + /// + /// let mut s = String::from("foobar"); + /// let s_mut_str = s.as_mut_str(); + /// + /// s_mut_str.make_ascii_uppercase(); + /// + /// assert_eq!("FOOBAR", s_mut_str); + /// ``` #[inline] #[stable(feature = "string_as_str", since = "1.7.0")] pub fn as_mut_str(&mut self) -> &mut str { @@ -1392,11 +1424,11 @@ impl String { } /// Creates a splicing iterator that removes the specified range in the string, - /// replaces with the given string, and yields the removed chars. - /// The given string doesn’t need to be the same length as the range. + /// and replaces it with the given string. + /// The given string doesn't need to be the same length as the range. /// - /// Note: The element range is removed when the [`Splice`] is dropped, - /// even if the iterator is not consumed until the end. + /// Note: Unlike [`Vec::splice`], the replacement happens eagerly, and this + /// method does not return the removed chars. /// /// # Panics /// @@ -1404,7 +1436,7 @@ impl String { /// boundary, or if they're out of bounds. /// /// [`char`]: ../../std/primitive.char.html - /// [`Splice`]: ../../std/string/struct.Splice.html + /// [`Vec::splice`]: ../../std/vec/struct.Vec.html#method.splice /// /// # Examples /// @@ -1416,45 +1448,32 @@ impl String { /// let beta_offset = s.find('β').unwrap_or(s.len()); /// /// // Replace the range up until the β from the string - /// let t: String = s.splice(..beta_offset, "Α is capital alpha; ").collect(); - /// assert_eq!(t, "α is alpha, "); + /// s.splice(..beta_offset, "Α is capital alpha; "); /// assert_eq!(s, "Α is capital alpha; β is beta"); /// ``` #[unstable(feature = "splice", reason = "recently added", issue = "44643")] - pub fn splice<'a, 'b, R>(&'a mut self, range: R, replace_with: &'b str) -> Splice<'a, 'b> + pub fn splice(&mut self, range: R, replace_with: &str) where R: RangeArgument { // Memory safety // // The String version of Splice does not have the memory safety issues // of the vector version. The data is just plain bytes. - // Because the range removal happens in Drop, if the Splice iterator is leaked, - // the removal will not happen. - let len = self.len(); - let start = match range.start() { - Included(&n) => n, - Excluded(&n) => n + 1, - Unbounded => 0, + + match range.start() { + Included(&n) => assert!(self.is_char_boundary(n)), + Excluded(&n) => assert!(self.is_char_boundary(n + 1)), + Unbounded => {}, }; - let end = match range.end() { - Included(&n) => n + 1, - Excluded(&n) => n, - Unbounded => len, + match range.end() { + Included(&n) => assert!(self.is_char_boundary(n + 1)), + Excluded(&n) => assert!(self.is_char_boundary(n)), + Unbounded => {}, }; - // Take out two simultaneous borrows. The &mut String won't be accessed - // until iteration is over, in Drop. - let self_ptr = self as *mut _; - // slicing does the appropriate bounds checks - let chars_iter = self[start..end].chars(); - - Splice { - start, - end, - iter: chars_iter, - string: self_ptr, - replace_with, - } + unsafe { + self.as_mut_vec() + }.splice(range, replace_with.bytes()); } /// Converts this `String` into a [`Box`]`<`[`str`]`>`. @@ -2241,61 +2260,3 @@ impl<'a> DoubleEndedIterator for Drain<'a> { #[unstable(feature = "fused", issue = "35602")] impl<'a> FusedIterator for Drain<'a> {} - -/// A splicing iterator for `String`. -/// -/// This struct is created by the [`splice()`] method on [`String`]. See its -/// documentation for more. -/// -/// [`splice()`]: struct.String.html#method.splice -/// [`String`]: struct.String.html -#[derive(Debug)] -#[unstable(feature = "splice", reason = "recently added", issue = "44643")] -pub struct Splice<'a, 'b> { - /// Will be used as &'a mut String in the destructor - string: *mut String, - /// Start of part to remove - start: usize, - /// End of part to remove - end: usize, - /// Current remaining range to remove - iter: Chars<'a>, - replace_with: &'b str, -} - -#[unstable(feature = "splice", reason = "recently added", issue = "44643")] -unsafe impl<'a, 'b> Sync for Splice<'a, 'b> {} -#[unstable(feature = "splice", reason = "recently added", issue = "44643")] -unsafe impl<'a, 'b> Send for Splice<'a, 'b> {} - -#[unstable(feature = "splice", reason = "recently added", issue = "44643")] -impl<'a, 'b> Drop for Splice<'a, 'b> { - fn drop(&mut self) { - unsafe { - let vec = (*self.string).as_mut_vec(); - vec.splice(self.start..self.end, self.replace_with.bytes()); - } - } -} - -#[unstable(feature = "splice", reason = "recently added", issue = "44643")] -impl<'a, 'b> Iterator for Splice<'a, 'b> { - type Item = char; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -#[unstable(feature = "splice", reason = "recently added", issue = "44643")] -impl<'a, 'b> DoubleEndedIterator for Splice<'a, 'b> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back() - } -} diff --git a/src/liballoc/tests/btree/map.rs b/src/liballoc/tests/btree/map.rs index 2c899d9694..2393101040 100644 --- a/src/liballoc/tests/btree/map.rs +++ b/src/liballoc/tests/btree/map.rs @@ -182,7 +182,7 @@ fn test_range_small() { fn test_range_inclusive() { let size = 500; - let map: BTreeMap<_, _> = (0...size).map(|i| (i, i)).collect(); + let map: BTreeMap<_, _> = (0..=size).map(|i| (i, i)).collect(); fn check<'a, L, R>(lhs: L, rhs: R) where L: IntoIterator, @@ -193,18 +193,18 @@ fn test_range_inclusive() { assert_eq!(lhs, rhs); } - check(map.range(size + 1...size + 1), vec![]); - check(map.range(size...size), vec![(&size, &size)]); - check(map.range(size...size + 1), vec![(&size, &size)]); - check(map.range(0...0), vec![(&0, &0)]); - check(map.range(0...size - 1), map.range(..size)); - check(map.range(-1...-1), vec![]); - check(map.range(-1...size), map.range(..)); - check(map.range(...size), map.range(..)); - check(map.range(...200), map.range(..201)); - check(map.range(5...8), vec![(&5, &5), (&6, &6), (&7, &7), (&8, &8)]); - check(map.range(-1...0), vec![(&0, &0)]); - check(map.range(-1...2), vec![(&0, &0), (&1, &1), (&2, &2)]); + check(map.range(size + 1..=size + 1), vec![]); + check(map.range(size..=size), vec![(&size, &size)]); + check(map.range(size..=size + 1), vec![(&size, &size)]); + check(map.range(0..=0), vec![(&0, &0)]); + check(map.range(0..=size - 1), map.range(..size)); + check(map.range(-1..=-1), vec![]); + check(map.range(-1..=size), map.range(..)); + check(map.range(..=size), map.range(..)); + check(map.range(..=200), map.range(..201)); + check(map.range(5..=8), vec![(&5, &5), (&6, &6), (&7, &7), (&8, &8)]); + check(map.range(-1..=0), vec![(&0, &0)]); + check(map.range(-1..=2), vec![(&0, &0), (&1, &1), (&2, &2)]); } #[test] @@ -212,7 +212,7 @@ fn test_range_inclusive_max_value() { let max = ::std::usize::MAX; let map: BTreeMap<_, _> = vec![(max, 0)].into_iter().collect(); - assert_eq!(map.range(max...max).collect::>(), &[(&max, &0)]); + assert_eq!(map.range(max..=max).collect::>(), &[(&max, &0)]); } #[test] diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs index 8f3e71ef79..c5beb63d12 100644 --- a/src/liballoc/tests/lib.rs +++ b/src/liballoc/tests/lib.rs @@ -50,3 +50,19 @@ fn hash(t: &T) -> u64 { t.hash(&mut s); s.finish() } + +// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten. +// See https://github.com/kripken/emscripten-fastcomp/issues/169 +#[cfg(not(target_os = "emscripten"))] +#[test] +fn test_boxed_hasher() { + let ordinary_hash = hash(&5u32); + + let mut hasher_1 = Box::new(DefaultHasher::new()); + 5u32.hash(&mut hasher_1); + assert_eq!(ordinary_hash, hasher_1.finish()); + + let mut hasher_2 = Box::new(DefaultHasher::new()) as Box; + 5u32.hash(&mut hasher_2); + assert_eq!(ordinary_hash, hasher_2.finish()); +} diff --git a/src/liballoc/tests/str.rs b/src/liballoc/tests/str.rs index 9d8ca38b20..b317806450 100644 --- a/src/liballoc/tests/str.rs +++ b/src/liballoc/tests/str.rs @@ -361,13 +361,13 @@ fn test_slice_fail() { #[test] #[should_panic] fn test_str_slice_rangetoinclusive_max_panics() { - &"hello"[...usize::max_value()]; + &"hello"[..=usize::max_value()]; } #[test] #[should_panic] fn test_str_slice_rangeinclusive_max_panics() { - &"hello"[1...usize::max_value()]; + &"hello"[1..=usize::max_value()]; } #[test] @@ -375,7 +375,7 @@ fn test_str_slice_rangeinclusive_max_panics() { fn test_str_slicemut_rangetoinclusive_max_panics() { let mut s = "hello".to_owned(); let s: &mut str = &mut s; - &mut s[...usize::max_value()]; + &mut s[..=usize::max_value()]; } #[test] @@ -383,7 +383,7 @@ fn test_str_slicemut_rangetoinclusive_max_panics() { fn test_str_slicemut_rangeinclusive_max_panics() { let mut s = "hello".to_owned(); let s: &mut str = &mut s; - &mut s[1...usize::max_value()]; + &mut s[1..=usize::max_value()]; } #[test] @@ -391,13 +391,13 @@ fn test_str_get_maxinclusive() { let mut s = "hello".to_owned(); { let s: &str = &s; - assert_eq!(s.get(...usize::max_value()), None); - assert_eq!(s.get(1...usize::max_value()), None); + assert_eq!(s.get(..=usize::max_value()), None); + assert_eq!(s.get(1..=usize::max_value()), None); } { let s: &mut str = &mut s; - assert_eq!(s.get(...usize::max_value()), None); - assert_eq!(s.get(1...usize::max_value()), None); + assert_eq!(s.get(..=usize::max_value()), None); + assert_eq!(s.get(1..=usize::max_value()), None); } } diff --git a/src/liballoc/tests/string.rs b/src/liballoc/tests/string.rs index f5c124c6b4..ef6f5e10a7 100644 --- a/src/liballoc/tests/string.rs +++ b/src/liballoc/tests/string.rs @@ -442,9 +442,8 @@ fn test_drain() { #[test] fn test_splice() { let mut s = "Hello, world!".to_owned(); - let t: String = s.splice(7..12, "世界").collect(); + s.splice(7..12, "世界"); assert_eq!(s, "Hello, 世界!"); - assert_eq!(t, "world"); } #[test] @@ -457,12 +456,10 @@ fn test_splice_char_boundary() { #[test] fn test_splice_inclusive_range() { let mut v = String::from("12345"); - let t: String = v.splice(2...3, "789").collect(); + v.splice(2..=3, "789"); assert_eq!(v, "127895"); - assert_eq!(t, "34"); - let t2: String = v.splice(1...2, "A").collect(); + v.splice(1..=2, "A"); assert_eq!(v, "1A895"); - assert_eq!(t2, "27"); } #[test] @@ -476,30 +473,21 @@ fn test_splice_out_of_bounds() { #[should_panic] fn test_splice_inclusive_out_of_bounds() { let mut s = String::from("12345"); - s.splice(5...5, "789"); + s.splice(5..=5, "789"); } #[test] fn test_splice_empty() { let mut s = String::from("12345"); - let t: String = s.splice(1..2, "").collect(); + s.splice(1..2, ""); assert_eq!(s, "1345"); - assert_eq!(t, "2"); } #[test] fn test_splice_unbounded() { let mut s = String::from("12345"); - let t: String = s.splice(.., "").collect(); + s.splice(.., ""); assert_eq!(s, ""); - assert_eq!(t, "12345"); -} - -#[test] -fn test_splice_forget() { - let mut s = String::from("12345"); - ::std::mem::forget(s.splice(2..4, "789")); - assert_eq!(s, "12345"); } #[test] diff --git a/src/liballoc/tests/vec.rs b/src/liballoc/tests/vec.rs index 670ea8089f..0e25da5bd3 100644 --- a/src/liballoc/tests/vec.rs +++ b/src/liballoc/tests/vec.rs @@ -537,27 +537,27 @@ fn test_drain_range() { #[test] fn test_drain_inclusive_range() { let mut v = vec!['a', 'b', 'c', 'd', 'e']; - for _ in v.drain(1...3) { + for _ in v.drain(1..=3) { } assert_eq!(v, &['a', 'e']); - let mut v: Vec<_> = (0...5).map(|x| x.to_string()).collect(); - for _ in v.drain(1...5) { + let mut v: Vec<_> = (0..=5).map(|x| x.to_string()).collect(); + for _ in v.drain(1..=5) { } assert_eq!(v, &["0".to_string()]); - let mut v: Vec = (0...5).map(|x| x.to_string()).collect(); - for _ in v.drain(0...5) { + let mut v: Vec = (0..=5).map(|x| x.to_string()).collect(); + for _ in v.drain(0..=5) { } assert_eq!(v, Vec::::new()); - let mut v: Vec<_> = (0...5).map(|x| x.to_string()).collect(); - for _ in v.drain(0...3) { + let mut v: Vec<_> = (0..=5).map(|x| x.to_string()).collect(); + for _ in v.drain(0..=3) { } assert_eq!(v, &["4".to_string(), "5".to_string()]); - let mut v: Vec<_> = (0...1).map(|x| x.to_string()).collect(); - for _ in v.drain(...0) { + let mut v: Vec<_> = (0..=1).map(|x| x.to_string()).collect(); + for _ in v.drain(..=0) { } assert_eq!(v, &["1".to_string()]); } @@ -572,7 +572,7 @@ fn test_drain_max_vec_size() { let mut v = Vec::<()>::with_capacity(usize::max_value()); unsafe { v.set_len(usize::max_value()); } - for _ in v.drain(usize::max_value() - 1...usize::max_value() - 1) { + for _ in v.drain(usize::max_value() - 1..=usize::max_value() - 1) { } assert_eq!(v.len(), usize::max_value() - 1); } @@ -581,7 +581,7 @@ fn test_drain_max_vec_size() { #[should_panic] fn test_drain_inclusive_out_of_bounds() { let mut v = vec![1, 2, 3, 4, 5]; - v.drain(5...5); + v.drain(5..=5); } #[test] @@ -598,10 +598,10 @@ fn test_splice() { fn test_splice_inclusive_range() { let mut v = vec![1, 2, 3, 4, 5]; let a = [10, 11, 12]; - let t1: Vec<_> = v.splice(2...3, a.iter().cloned()).collect(); + let t1: Vec<_> = v.splice(2..=3, a.iter().cloned()).collect(); assert_eq!(v, &[1, 2, 10, 11, 12, 5]); assert_eq!(t1, &[3, 4]); - let t2: Vec<_> = v.splice(1...2, Some(20)).collect(); + let t2: Vec<_> = v.splice(1..=2, Some(20)).collect(); assert_eq!(v, &[1, 20, 11, 12, 5]); assert_eq!(t2, &[2, 10]); } @@ -619,7 +619,7 @@ fn test_splice_out_of_bounds() { fn test_splice_inclusive_out_of_bounds() { let mut v = vec![1, 2, 3, 4, 5]; let a = [10, 11, 12]; - v.splice(5...5, a.iter().cloned()); + v.splice(5..=5, a.iter().cloned()); } #[test] diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index 3bfd78e6c0..861291194f 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -370,6 +370,7 @@ impl Vec { /// /// * `ptr` needs to have been previously allocated via [`String`]/`Vec` /// (at least, it's highly likely to be incorrect if it wasn't). + /// * `ptr`'s `T` needs to have the same size and alignment as it was allocated with. /// * `length` needs to be less than or equal to `capacity`. /// * `capacity` needs to be the capacity that the pointer was allocated with. /// @@ -506,13 +507,9 @@ impl Vec { /// Converts the vector into [`Box<[T]>`][owned slice]. /// - /// Note that this will drop any excess capacity. Calling this and - /// converting back to a vector with [`into_vec`] is equivalent to calling - /// [`shrink_to_fit`]. + /// Note that this will drop any excess capacity. /// /// [owned slice]: ../../std/boxed/struct.Box.html - /// [`into_vec`]: ../../std/primitive.slice.html#method.into_vec - /// [`shrink_to_fit`]: #method.shrink_to_fit /// /// # Examples /// @@ -1968,16 +1965,19 @@ impl Vec { /// Using this method is equivalent to the following code: /// /// ``` - /// # let some_predicate = |x: &mut i32| { *x == 2 }; - /// # let mut vec = vec![1, 2, 3, 4, 5]; + /// # let some_predicate = |x: &mut i32| { *x == 2 || *x == 3 || *x == 6 }; + /// # let mut vec = vec![1, 2, 3, 4, 5, 6]; /// let mut i = 0; /// while i != vec.len() { /// if some_predicate(&mut vec[i]) { /// let val = vec.remove(i); /// // your code here + /// } else { + /// i += 1; /// } - /// i += 1; /// } + /// + /// # assert_eq!(vec, vec![1, 4, 5]); /// ``` /// /// But `drain_filter` is easier to use. `drain_filter` is also more efficient, @@ -2690,7 +2690,13 @@ impl<'a, T, F> Iterator for DrainFilter<'a, T, F> self.del += 1; return Some(ptr::read(&v[i])); } else if self.del > 0 { - v.swap(i - self.del, i); + let del = self.del; + let src: *const T = &v[i]; + let dst: *mut T = &mut v[i - del]; + // This is safe because self.vec has length 0 + // thus its elements will not have Drop::drop + // called on them in the event of a panic. + ptr::copy_nonoverlapping(src, dst, 1); } } None diff --git a/src/liballoc/vec_deque.rs b/src/liballoc/vec_deque.rs index cddfb4d019..f56aa23a4e 100644 --- a/src/liballoc/vec_deque.rs +++ b/src/liballoc/vec_deque.rs @@ -459,10 +459,12 @@ impl VecDeque { /// /// `i` and `j` may be equal. /// - /// Fails if there is no element with either index. - /// /// Element at index 0 is the front of the queue. /// + /// # Panics + /// + /// Panics if either index is out of bounds. + /// /// # Examples /// /// ``` @@ -1920,7 +1922,7 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { } } -// FIXME(#19839) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { @@ -1971,6 +1973,14 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); unsafe { Some(self.ring.get_unchecked(self.head)) } } + + fn rfold(self, mut accum: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc + { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + accum = back.iter().rfold(accum, &mut f); + front.iter().rfold(accum, &mut f) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -2056,6 +2066,14 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { Some(&mut *(elem as *mut _)) } } + + fn rfold(self, mut accum: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc + { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + accum = back.iter_mut().rfold(accum, &mut f); + front.iter_mut().rfold(accum, &mut f) + } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/liballoc_jemalloc/Cargo.toml b/src/liballoc_jemalloc/Cargo.toml index 94700cf447..4042c4d2d4 100644 --- a/src/liballoc_jemalloc/Cargo.toml +++ b/src/liballoc_jemalloc/Cargo.toml @@ -19,7 +19,7 @@ libc = { path = "../rustc/libc_shim" } [build-dependencies] build_helper = { path = "../build_helper" } -gcc = "0.3.50" +cc = "1.0" [features] debug = [] diff --git a/src/liballoc_jemalloc/build.rs b/src/liballoc_jemalloc/build.rs index 41193f6a41..7dd85ddcc7 100644 --- a/src/liballoc_jemalloc/build.rs +++ b/src/liballoc_jemalloc/build.rs @@ -11,12 +11,12 @@ #![deny(warnings)] extern crate build_helper; -extern crate gcc; +extern crate cc; use std::env; use std::path::PathBuf; use std::process::Command; -use build_helper::{run, native_lib_boilerplate}; +use build_helper::{run, native_lib_boilerplate, BuildExpectation}; fn main() { // FIXME: This is a hack to support building targets that don't @@ -63,7 +63,7 @@ fn main() { _ => return, }; - let compiler = gcc::Config::new().get_compiler(); + let compiler = cc::Build::new().get_compiler(); // only msvc returns None for ar so unwrap is okay let ar = build_helper::cc2ar(compiler.path(), &target).unwrap(); let cflags = compiler.args() @@ -111,9 +111,11 @@ fn main() { cmd.arg("--with-jemalloc-prefix=je_"); } - if cfg!(feature = "debug-jemalloc") { - cmd.arg("--enable-debug"); - } + // FIXME: building with jemalloc assertions is currently broken. + // See . + //if cfg!(feature = "debug") { + // cmd.arg("--enable-debug"); + //} cmd.arg(format!("--host={}", build_helper::gnu_target(&target))); cmd.arg(format!("--build={}", build_helper::gnu_target(&host))); @@ -124,7 +126,7 @@ fn main() { cmd.arg("--with-lg-quantum=4"); } - run(&mut cmd); + run(&mut cmd, BuildExpectation::None); let mut make = Command::new(build_helper::make(&host)); make.current_dir(&native.out_dir) @@ -141,14 +143,14 @@ fn main() { .arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set")); } - run(&mut make); + run(&mut make, BuildExpectation::None); // The pthread_atfork symbols is used by jemalloc on android but the really // old android we're building on doesn't have them defined, so just make // sure the symbols are available. if target.contains("androideabi") { println!("cargo:rerun-if-changed=pthread_atfork_dummy.c"); - gcc::Config::new() + cc::Build::new() .flag("-fvisibility=hidden") .file("pthread_atfork_dummy.c") .compile("libpthread_atfork_dummy.a"); diff --git a/src/liballoc_jemalloc/lib.rs b/src/liballoc_jemalloc/lib.rs index efefabc974..d153f19c46 100644 --- a/src/liballoc_jemalloc/lib.rs +++ b/src/liballoc_jemalloc/lib.rs @@ -9,6 +9,7 @@ // except according to those terms. #![no_std] +#![allow(unused_attributes)] #![unstable(feature = "alloc_jemalloc", reason = "this library is unlikely to be stabilized in its current \ form or name", @@ -19,8 +20,10 @@ #![feature(libc)] #![feature(linkage)] #![feature(staged_api)] +#![feature(rustc_attrs)] #![cfg_attr(dummy_jemalloc, allow(dead_code, unused_extern_crates))] #![cfg_attr(not(dummy_jemalloc), feature(allocator_api))] +#![rustc_alloc_kind = "exe"] extern crate alloc; extern crate alloc_system; diff --git a/src/liballoc_system/lib.rs b/src/liballoc_system/lib.rs index 1defe30871..2eb659699e 100644 --- a/src/liballoc_system/lib.rs +++ b/src/liballoc_system/lib.rs @@ -9,6 +9,7 @@ // except according to those terms. #![no_std] +#![allow(unused_attributes)] #![deny(warnings)] #![unstable(feature = "alloc_system", reason = "this library is unlikely to be stabilized in its current \ @@ -19,7 +20,9 @@ #![feature(alloc)] #![feature(core_intrinsics)] #![feature(staged_api)] +#![feature(rustc_attrs)] #![cfg_attr(any(unix, target_os = "redox"), feature(libc))] +#![rustc_alloc_kind = "lib"] // The minimum alignment guaranteed by the architecture. This value is used to // add fast paths for low alignment values. In practice, the alignment is a @@ -221,7 +224,7 @@ mod platform { } } - #[cfg(any(target_os = "android", target_os = "redox"))] + #[cfg(any(target_os = "android", target_os = "redox", target_os = "solaris"))] #[inline] unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { // On android we currently target API level 9 which unfortunately @@ -244,7 +247,7 @@ mod platform { libc::memalign(layout.align(), layout.size()) as *mut u8 } - #[cfg(not(any(target_os = "android", target_os = "redox")))] + #[cfg(not(any(target_os = "android", target_os = "redox", target_os = "solaris")))] #[inline] unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { let mut out = ptr::null_mut(); diff --git a/src/libbacktrace/configure b/src/libbacktrace/configure index 47df55c22d..0f301f2b76 100755 --- a/src/libbacktrace/configure +++ b/src/libbacktrace/configure @@ -11844,6 +11844,9 @@ elf*) FORMAT_FILE="elf.lo" ;; pecoff) FORMAT_FILE="pecoff.lo" backtrace_supports_data=no ;; +macho*) FORMAT_FILE="macho.lo" + backtrace_supports_data=no + ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: could not determine output file type" >&5 $as_echo "$as_me: WARNING: could not determine output file type" >&2;} FORMAT_FILE="unknown.lo" @@ -12320,6 +12323,12 @@ fi fi fi + +case "${host_os}" in +darwin*) + have_mmap=no ;; +esac + if test "$have_mmap" = "no"; then VIEW_FILE=read.lo ALLOC_FILE=alloc.lo diff --git a/src/libbacktrace/configure.ac b/src/libbacktrace/configure.ac index 71e85187ef..ea1b27d807 100644 --- a/src/libbacktrace/configure.ac +++ b/src/libbacktrace/configure.ac @@ -231,6 +231,9 @@ elf*) FORMAT_FILE="elf.lo" ;; pecoff) FORMAT_FILE="pecoff.lo" backtrace_supports_data=no ;; +macho*) FORMAT_FILE="macho.lo" + backtrace_supports_data=no + ;; *) AC_MSG_WARN([could not determine output file type]) FORMAT_FILE="unknown.lo" backtrace_supported=no @@ -280,6 +283,12 @@ else AC_CHECK_FUNC(mmap, [have_mmap=yes], [have_mmap=no]) fi fi + +case "${host_os}" in +darwin*) + have_mmap=no ;; +esac + if test "$have_mmap" = "no"; then VIEW_FILE=read.lo ALLOC_FILE=alloc.lo diff --git a/src/libbacktrace/filetype.awk b/src/libbacktrace/filetype.awk index 57bab797a9..a5f6c8cc18 100644 --- a/src/libbacktrace/filetype.awk +++ b/src/libbacktrace/filetype.awk @@ -3,3 +3,9 @@ /\177ELF\002/ { if (NR == 1) { print "elf64"; exit } } /\114\001/ { if (NR == 1) { print "pecoff"; exit } } /\144\206/ { if (NR == 1) { print "pecoff"; exit } } +/\xFE\xED\xFA\xCE/ { if (NR == 1) { print "macho32"; exit } } +/\xCE\xFA\xED\xFE/ { if (NR == 1) { print "macho32"; exit } } +/\xFE\xED\xFA\xCF/ { if (NR == 1) { print "macho64"; exit } } +/\xCF\xFA\xED\xFE/ { if (NR == 1) { print "macho64"; exit } } +/\xCA\xFE\xBA\xBE/ { if (NR == 1) { print "macho-fat"; exit } } +/\xBE\xBA\xFE\xCA/ { if (NR == 1) { print "macho-fat"; exit } } diff --git a/src/libbacktrace/macho.c b/src/libbacktrace/macho.c new file mode 100644 index 0000000000..9af14e724b --- /dev/null +++ b/src/libbacktrace/macho.c @@ -0,0 +1,1416 @@ +/* macho.c -- Get debug data from an Mach-O file for backtraces. + Copyright (C) 2012-2016 Free Software Foundation, Inc. + Written by John Colanduoni. + + Pending upstream pull request: + https://github.com/ianlancetaylor/libbacktrace/pull/2 + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + (1) Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + (2) Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + (3) The name of the author may not be used to + endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. */ + +#include "config.h" + +/* We can't use autotools to detect the pointer width of our program because + we may be building a fat Mach-O file containing both 32-bit and 64-bit + variants. However Mach-O runs a limited set of platforms so detection + via preprocessor is not difficult. */ + +#if defined(__MACH__) +#if defined(__LP64__) +#define BACKTRACE_BITS 64 +#else +#define BACKTRACE_BITS 32 +#endif +#else +#error Attempting to build Mach-O support on incorrect platform +#endif + +#if defined(__x86_64__) +#define NATIVE_CPU_TYPE CPU_TYPE_X86_64 +#elif defined(__i386__) +#define NATIVE_CPU_TYPE CPU_TYPE_X86 +#elif defined(__aarch64__) +#define NATIVE_CPU_TYPE CPU_TYPE_ARM64 +#elif defined(__arm__) +#define NATIVE_CPU_TYPE CPU_TYPE_ARM +#else +#error Could not detect native Mach-O cpu_type_t +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "backtrace.h" +#include "internal.h" + +struct macho_commands_view +{ + struct backtrace_view view; + uint32_t commands_count; + uint32_t commands_total_size; + int bytes_swapped; + size_t base_offset; +}; + +enum debug_section +{ + DEBUG_INFO, + DEBUG_LINE, + DEBUG_ABBREV, + DEBUG_RANGES, + DEBUG_STR, + DEBUG_MAX +}; + +static const char *const debug_section_names[DEBUG_MAX] = + { + "__debug_info", + "__debug_line", + "__debug_abbrev", + "__debug_ranges", + "__debug_str" + }; + +struct found_dwarf_section +{ + uint32_t file_offset; + uintptr_t file_size; + const unsigned char *data; +}; + +/* Mach-O symbols don't have a length. As a result we have to infer it + by sorting the symbol addresses for each image and recording the + memory range attributed to each image. */ +struct macho_symbol +{ + uintptr_t addr; + size_t size; + const char *name; +}; + +struct macho_syminfo_data +{ + struct macho_syminfo_data *next; + struct macho_symbol *symbols; + size_t symbol_count; + uintptr_t min_addr; + uintptr_t max_addr; +}; + +uint16_t +macho_file_to_host_u16 (int file_bytes_swapped, uint16_t input) +{ + if (file_bytes_swapped) + return (input >> 8) | (input << 8); + else + return input; +} + +uint32_t +macho_file_to_host_u32 (int file_bytes_swapped, uint32_t input) +{ + if (file_bytes_swapped) + { + return ((input >> 24) & 0x000000FF) + | ((input >> 8) & 0x0000FF00) + | ((input << 8) & 0x00FF0000) + | ((input << 24) & 0xFF000000); + } + else + { + return input; + } +} + +uint64_t +macho_file_to_host_u64 (int file_bytes_swapped, uint64_t input) +{ + if (file_bytes_swapped) + { + return macho_file_to_host_u32 (file_bytes_swapped, + (uint32_t) (input >> 32)) + | (((uint64_t) macho_file_to_host_u32 (file_bytes_swapped, + (uint32_t) input)) << 32); + } + else + { + return input; + } +} + +#if BACKTRACE_BITS == 64 +#define macho_file_to_host_usize macho_file_to_host_u64 +typedef struct mach_header_64 mach_header_native_t; +#define LC_SEGMENT_NATIVE LC_SEGMENT_64 +typedef struct segment_command_64 segment_command_native_t; +typedef struct nlist_64 nlist_native_t; +typedef struct section_64 section_native_t; +#else /* BACKTRACE_BITS == 32 */ +#define macho_file_to_host_usize macho_file_to_host_u32 +typedef struct mach_header mach_header_native_t; +#define LC_SEGMENT_NATIVE LC_SEGMENT +typedef struct segment_command segment_command_native_t; +typedef struct nlist nlist_native_t; +typedef struct section section_native_t; +#endif + +// Gets a view into a Mach-O image, taking any slice offset into account +int +macho_get_view (struct backtrace_state *state, int descriptor, + off_t offset, size_t size, + backtrace_error_callback error_callback, + void *data, struct macho_commands_view *commands_view, + struct backtrace_view *view) +{ + return backtrace_get_view (state, descriptor, + commands_view->base_offset + offset, size, + error_callback, data, view); +} + +int +macho_get_commands (struct backtrace_state *state, int descriptor, + backtrace_error_callback error_callback, + void *data, struct macho_commands_view *commands_view, + int *incompatible) +{ + int ret = 0; + int is_fat = 0; + struct backtrace_view file_header_view; + int file_header_view_valid = 0; + struct backtrace_view fat_archs_view; + int fat_archs_view_valid = 0; + const mach_header_native_t *file_header; + uint64_t commands_offset; + + *incompatible = 0; + + if (!backtrace_get_view (state, descriptor, 0, sizeof (mach_header_native_t), + error_callback, data, &file_header_view)) + goto end; + file_header_view_valid = 1; + + switch (*(uint32_t *) file_header_view.data) + { + case MH_MAGIC: + if (BACKTRACE_BITS == 32) + commands_view->bytes_swapped = 0; + else + { + *incompatible = 1; + goto end; + } + break; + case MH_CIGAM: + if (BACKTRACE_BITS == 32) + commands_view->bytes_swapped = 1; + else + { + *incompatible = 1; + goto end; + } + break; + case MH_MAGIC_64: + if (BACKTRACE_BITS == 64) + commands_view->bytes_swapped = 0; + else + { + *incompatible = 1; + goto end; + } + break; + case MH_CIGAM_64: + if (BACKTRACE_BITS == 64) + commands_view->bytes_swapped = 1; + else + { + *incompatible = 1; + goto end; + } + break; + case FAT_MAGIC: + is_fat = 1; + commands_view->bytes_swapped = 0; + break; + case FAT_CIGAM: + is_fat = 1; + commands_view->bytes_swapped = 1; + break; + default: + goto end; + } + + if (is_fat) + { + uint32_t native_slice_offset; + size_t archs_total_size; + uint32_t arch_count; + const struct fat_header *fat_header; + const struct fat_arch *archs; + uint32_t i; + + fat_header = file_header_view.data; + arch_count = + macho_file_to_host_u32 (commands_view->bytes_swapped, + fat_header->nfat_arch); + + archs_total_size = arch_count * sizeof (struct fat_arch); + + if (!backtrace_get_view (state, descriptor, sizeof (struct fat_header), + archs_total_size, error_callback, + data, &fat_archs_view)) + goto end; + fat_archs_view_valid = 1; + + native_slice_offset = 0; + archs = fat_archs_view.data; + for (i = 0; i < arch_count; i++) + { + const struct fat_arch *raw_arch = archs + i; + int cpu_type = + (int) macho_file_to_host_u32 (commands_view->bytes_swapped, + (uint32_t) raw_arch->cputype); + + if (cpu_type == NATIVE_CPU_TYPE) + { + native_slice_offset = + macho_file_to_host_u32 (commands_view->bytes_swapped, + raw_arch->offset); + + break; + } + } + + if (native_slice_offset == 0) + { + *incompatible = 1; + goto end; + } + + backtrace_release_view (state, &file_header_view, error_callback, data); + file_header_view_valid = 0; + if (!backtrace_get_view (state, descriptor, native_slice_offset, + sizeof (mach_header_native_t), error_callback, + data, &file_header_view)) + goto end; + file_header_view_valid = 1; + + // The endianess of the slice may be different than the fat image + switch (*(uint32_t *) file_header_view.data) + { + case MH_MAGIC: + if (BACKTRACE_BITS == 32) + commands_view->bytes_swapped = 0; + else + goto end; + break; + case MH_CIGAM: + if (BACKTRACE_BITS == 32) + commands_view->bytes_swapped = 1; + else + goto end; + break; + case MH_MAGIC_64: + if (BACKTRACE_BITS == 64) + commands_view->bytes_swapped = 0; + else + goto end; + break; + case MH_CIGAM_64: + if (BACKTRACE_BITS == 64) + commands_view->bytes_swapped = 1; + else + goto end; + break; + default: + goto end; + } + + commands_view->base_offset = native_slice_offset; + } + else + commands_view->base_offset = 0; + + file_header = file_header_view.data; + commands_view->commands_count = + macho_file_to_host_u32 (commands_view->bytes_swapped, + file_header->ncmds); + commands_view->commands_total_size = + macho_file_to_host_u32 (commands_view->bytes_swapped, + file_header->sizeofcmds); + commands_offset = + commands_view->base_offset + sizeof (mach_header_native_t); + + if (!backtrace_get_view (state, descriptor, commands_offset, + commands_view->commands_total_size, error_callback, + data, &commands_view->view)) + goto end; + + ret = 1; + +end: + if (file_header_view_valid) + backtrace_release_view (state, &file_header_view, error_callback, data); + if (fat_archs_view_valid) + backtrace_release_view (state, &fat_archs_view, error_callback, data); + return ret; +} + +int +macho_get_uuid (struct backtrace_state *state ATTRIBUTE_UNUSED, + int descriptor ATTRIBUTE_UNUSED, + backtrace_error_callback error_callback, + void *data, struct macho_commands_view *commands_view, + uuid_t *uuid) +{ + size_t offset = 0; + uint32_t i = 0; + + for (i = 0; i < commands_view->commands_count; i++) + { + const struct load_command *raw_command; + struct load_command command; + + if (offset + sizeof (struct load_command) + > commands_view->commands_total_size) + { + error_callback (data, + "executable file contains out of range command offset", + 0); + return 0; + } + + raw_command = + commands_view->view.data + offset; + command.cmd = macho_file_to_host_u32 (commands_view->bytes_swapped, + raw_command->cmd); + command.cmdsize = macho_file_to_host_u32 (commands_view->bytes_swapped, + raw_command->cmdsize); + + if (command.cmd == LC_UUID) + { + const struct uuid_command *uuid_command; + + if (offset + sizeof (struct uuid_command) + > commands_view->commands_total_size) + { + error_callback (data, + "executable file contains out of range command offset", + 0); + return 0; + } + + uuid_command = + (struct uuid_command *) raw_command; + memcpy (uuid, uuid_command->uuid, sizeof (uuid_t)); + return 1; + } + + offset += command.cmdsize; + } + + error_callback (data, "executable file is missing an identifying UUID", 0); + return 0; +} + +/* Returns the base address of a Mach-O image, as encoded in the file header. + * WARNING: This does not take ASLR into account, which is ubiquitous on recent + * Darwin platforms. + */ +int +macho_get_addr_range (struct backtrace_state *state ATTRIBUTE_UNUSED, + int descriptor ATTRIBUTE_UNUSED, + backtrace_error_callback error_callback, + void *data, struct macho_commands_view *commands_view, + uintptr_t *base_address, uintptr_t *max_address) +{ + size_t offset = 0; + int found_text = 0; + uint32_t i = 0; + + *max_address = 0; + + for (i = 0; i < commands_view->commands_count; i++) + { + const struct load_command *raw_command; + struct load_command command; + + if (offset + sizeof (struct load_command) + > commands_view->commands_total_size) + { + error_callback (data, + "executable file contains out of range command offset", + 0); + return 0; + } + + raw_command = commands_view->view.data + offset; + command.cmd = macho_file_to_host_u32 (commands_view->bytes_swapped, + raw_command->cmd); + command.cmdsize = macho_file_to_host_u32 (commands_view->bytes_swapped, + raw_command->cmdsize); + + if (command.cmd == LC_SEGMENT_NATIVE) + { + const segment_command_native_t *raw_segment; + uintptr_t segment_vmaddr; + uintptr_t segment_vmsize; + uintptr_t segment_maxaddr; + uintptr_t text_fileoff; + + if (offset + sizeof (segment_command_native_t) + > commands_view->commands_total_size) + { + error_callback (data, + "executable file contains out of range command offset", + 0); + return 0; + } + + raw_segment = (segment_command_native_t *) raw_command; + + segment_vmaddr = macho_file_to_host_usize ( + commands_view->bytes_swapped, raw_segment->vmaddr); + segment_vmsize = macho_file_to_host_usize ( + commands_view->bytes_swapped, raw_segment->vmsize); + segment_maxaddr = segment_vmaddr + segment_vmsize; + + if (strncmp (raw_segment->segname, "__TEXT", + sizeof (raw_segment->segname)) == 0) + { + text_fileoff = macho_file_to_host_usize ( + commands_view->bytes_swapped, raw_segment->fileoff); + *base_address = segment_vmaddr - text_fileoff; + + found_text = 1; + } + + if (segment_maxaddr > *max_address) + *max_address = segment_maxaddr; + } + + offset += command.cmdsize; + } + + if (found_text) + return 1; + else + { + error_callback (data, "executable is missing __TEXT segment", 0); + return 0; + } +} + +static int +macho_symbol_compare_addr (const void *left_raw, const void *right_raw) +{ + const struct macho_symbol *left = left_raw; + const struct macho_symbol *right = right_raw; + + if (left->addr > right->addr) + return 1; + else if (left->addr < right->addr) + return -1; + else + return 0; +} + +int +macho_symbol_type_relevant (uint8_t type) +{ + uint8_t type_field = (uint8_t) (type & N_TYPE); + + return !(type & N_EXT) && + (type_field == N_ABS || type_field == N_SECT); +} + +int +macho_add_symtab (struct backtrace_state *state, + backtrace_error_callback error_callback, + void *data, int descriptor, + struct macho_commands_view *commands_view, + uintptr_t base_address, uintptr_t max_image_address, + intptr_t vmslide, int *found_sym) +{ + struct macho_syminfo_data *syminfo_data; + + int ret = 0; + size_t offset = 0; + struct backtrace_view symtab_view; + int symtab_view_valid = 0; + struct backtrace_view strtab_view; + int strtab_view_valid = 0; + size_t syminfo_index = 0; + size_t function_count = 0; + uint32_t i = 0; + uint32_t j = 0; + uint32_t symtab_index = 0; + + *found_sym = 0; + + for (i = 0; i < commands_view->commands_count; i++) + { + const struct load_command *raw_command; + struct load_command command; + + if (offset + sizeof (struct load_command) + > commands_view->commands_total_size) + { + error_callback (data, + "executable file contains out of range command offset", + 0); + return 0; + } + + raw_command = commands_view->view.data + offset; + command.cmd = macho_file_to_host_u32 (commands_view->bytes_swapped, + raw_command->cmd); + command.cmdsize = macho_file_to_host_u32 (commands_view->bytes_swapped, + raw_command->cmdsize); + + if (command.cmd == LC_SYMTAB) + { + const struct symtab_command *symtab_command; + uint32_t symbol_table_offset; + uint32_t symbol_count; + uint32_t string_table_offset; + uint32_t string_table_size; + + if (offset + sizeof (struct symtab_command) + > commands_view->commands_total_size) + { + error_callback (data, + "executable file contains out of range command offset", + 0); + return 0; + } + + symtab_command = (struct symtab_command *) raw_command; + + symbol_table_offset = macho_file_to_host_u32 ( + commands_view->bytes_swapped, symtab_command->symoff); + symbol_count = macho_file_to_host_u32 ( + commands_view->bytes_swapped, symtab_command->nsyms); + string_table_offset = macho_file_to_host_u32 ( + commands_view->bytes_swapped, symtab_command->stroff); + string_table_size = macho_file_to_host_u32 ( + commands_view->bytes_swapped, symtab_command->strsize); + + + if (!macho_get_view (state, descriptor, symbol_table_offset, + symbol_count * sizeof (nlist_native_t), + error_callback, data, commands_view, + &symtab_view)) + goto end; + symtab_view_valid = 1; + + if (!macho_get_view (state, descriptor, string_table_offset, + string_table_size, error_callback, data, + commands_view, &strtab_view)) + goto end; + strtab_view_valid = 1; + + // Count functions first + for (j = 0; j < symbol_count; j++) + { + const nlist_native_t *raw_sym = + ((const nlist_native_t *) symtab_view.data) + j; + + if (macho_symbol_type_relevant (raw_sym->n_type)) + { + function_count += 1; + } + } + + // Allocate space for the: + // (a) macho_syminfo_data for this image + // (b) macho_symbol entries + syminfo_data = + backtrace_alloc (state, + sizeof (struct macho_syminfo_data), + error_callback, data); + if (syminfo_data == NULL) + goto end; + + syminfo_data->symbols = backtrace_alloc ( + state, function_count * sizeof (struct macho_symbol), + error_callback, data); + if (syminfo_data->symbols == NULL) + goto end; + + syminfo_data->symbol_count = function_count; + syminfo_data->next = NULL; + syminfo_data->min_addr = base_address; + syminfo_data->max_addr = max_image_address; + + for (symtab_index = 0; + symtab_index < symbol_count; symtab_index++) + { + const nlist_native_t *raw_sym = + ((const nlist_native_t *) symtab_view.data) + + symtab_index; + + if (macho_symbol_type_relevant (raw_sym->n_type)) + { + size_t strtab_index; + const char *name; + size_t max_len_plus_one; + + syminfo_data->symbols[syminfo_index].addr = + macho_file_to_host_usize (commands_view->bytes_swapped, + raw_sym->n_value) + vmslide; + + strtab_index = macho_file_to_host_u32 ( + commands_view->bytes_swapped, + raw_sym->n_un.n_strx); + + // Check the range of the supposed "string" we've been + // given + if (strtab_index >= string_table_size) + { + error_callback ( + data, + "dSYM file contains out of range string table index", + 0); + goto end; + } + + name = strtab_view.data + strtab_index; + max_len_plus_one = string_table_size - strtab_index; + + if (strnlen (name, max_len_plus_one) >= max_len_plus_one) + { + error_callback ( + data, + "dSYM file contains unterminated string", + 0); + goto end; + } + + // Remove underscore prefixes + if (name[0] == '_') + { + name = name + 1; + } + + syminfo_data->symbols[syminfo_index].name = name; + + syminfo_index += 1; + } + } + + backtrace_qsort (syminfo_data->symbols, + syminfo_data->symbol_count, + sizeof (struct macho_symbol), + macho_symbol_compare_addr); + + // Calculate symbol sizes + for (syminfo_index = 0; + syminfo_index < syminfo_data->symbol_count; syminfo_index++) + { + if (syminfo_index + 1 < syminfo_data->symbol_count) + { + syminfo_data->symbols[syminfo_index].size = + syminfo_data->symbols[syminfo_index + 1].addr - + syminfo_data->symbols[syminfo_index].addr; + } + else + { + syminfo_data->symbols[syminfo_index].size = + max_image_address - + syminfo_data->symbols[syminfo_index].addr; + } + } + + if (!state->threaded) + { + struct macho_syminfo_data **pp; + + for (pp = (struct macho_syminfo_data **) (void *) &state->syminfo_data; + *pp != NULL; + pp = &(*pp)->next); + *pp = syminfo_data; + } + else + { + while (1) + { + struct macho_syminfo_data **pp; + + pp = (struct macho_syminfo_data **) (void *) &state->syminfo_data; + + while (1) + { + struct macho_syminfo_data *p; + + p = backtrace_atomic_load_pointer (pp); + + if (p == NULL) + break; + + pp = &p->next; + } + + if (__sync_bool_compare_and_swap (pp, NULL, syminfo_data)) + break; + } + } + + strtab_view_valid = 0; // We need to keep string table around + *found_sym = 1; + ret = 1; + goto end; + } + + offset += command.cmdsize; + } + + // No symbol table here + ret = 1; + goto end; + +end: + if (symtab_view_valid) + backtrace_release_view (state, &symtab_view, error_callback, data); + if (strtab_view_valid) + backtrace_release_view (state, &strtab_view, error_callback, data); + return ret; +} + +int +macho_try_dwarf (struct backtrace_state *state, + backtrace_error_callback error_callback, + void *data, fileline *fileline_fn, uuid_t *executable_uuid, + uintptr_t base_address, uintptr_t max_image_address, + intptr_t vmslide, char *dwarf_filename, int *matched, + int *found_sym, int *found_dwarf) +{ + uuid_t dwarf_uuid; + + int ret = 0; + int dwarf_descriptor; + int dwarf_descriptor_valid = 0; + struct macho_commands_view commands_view; + int commands_view_valid = 0; + struct backtrace_view dwarf_view; + int dwarf_view_valid = 0; + size_t offset = 0; + struct found_dwarf_section dwarf_sections[DEBUG_MAX]; + uintptr_t min_dwarf_offset = 0; + uintptr_t max_dwarf_offset = 0; + uint32_t i = 0; + uint32_t j = 0; + int k = 0; + + *matched = 0; + *found_sym = 0; + *found_dwarf = 0; + + if ((dwarf_descriptor = backtrace_open (dwarf_filename, error_callback, + data, NULL)) == 0) + goto end; + dwarf_descriptor_valid = 1; + + int incompatible; + if (!macho_get_commands (state, dwarf_descriptor, error_callback, data, + &commands_view, &incompatible)) + { + // Failing to read the header here is fine, because this dSYM may be + // for a different architecture + if (incompatible) + { + ret = 1; + } + goto end; + } + commands_view_valid = 1; + + // Get dSYM UUID and compare + if (!macho_get_uuid (state, dwarf_descriptor, error_callback, data, + &commands_view, &dwarf_uuid)) + { + error_callback (data, "dSYM file is missing an identifying uuid", 0); + goto end; + } + if (memcmp (executable_uuid, &dwarf_uuid, sizeof (uuid_t)) != 0) + { + // DWARF doesn't belong to desired executable + ret = 1; + goto end; + } + + *matched = 1; + + // Read symbol table + if (!macho_add_symtab (state, error_callback, data, dwarf_descriptor, + &commands_view, base_address, max_image_address, + vmslide, found_sym)) + goto end; + + // Get DWARF sections + + memset (dwarf_sections, 0, sizeof (dwarf_sections)); + offset = 0; + for (i = 0; i < commands_view.commands_count; i++) + { + const struct load_command *raw_command; + struct load_command command; + + if (offset + sizeof (struct load_command) + > commands_view.commands_total_size) + { + error_callback (data, + "dSYM file contains out of range command offset", 0); + goto end; + } + + raw_command = commands_view.view.data + offset; + command.cmd = macho_file_to_host_u32 (commands_view.bytes_swapped, + raw_command->cmd); + command.cmdsize = macho_file_to_host_u32 (commands_view.bytes_swapped, + raw_command->cmdsize); + + if (command.cmd == LC_SEGMENT_NATIVE) + { + uint32_t section_count; + size_t section_offset; + const segment_command_native_t *raw_segment; + + if (offset + sizeof (segment_command_native_t) + > commands_view.commands_total_size) + { + error_callback (data, + "dSYM file contains out of range command offset", + 0); + goto end; + } + + raw_segment = (const segment_command_native_t *) raw_command; + + if (strncmp (raw_segment->segname, "__DWARF", + sizeof (raw_segment->segname)) == 0) + { + section_count = macho_file_to_host_u32 ( + commands_view.bytes_swapped, + raw_segment->nsects); + + section_offset = offset + sizeof (segment_command_native_t); + + // Search sections for relevant DWARF section names + for (j = 0; j < section_count; j++) + { + const section_native_t *raw_section; + + if (section_offset + sizeof (section_native_t) > + commands_view.commands_total_size) + { + error_callback (data, + "dSYM file contains out of range command offset", + 0); + goto end; + } + + raw_section = commands_view.view.data + section_offset; + + for (k = 0; k < DEBUG_MAX; k++) + { + uintptr_t dwarf_section_end; + + if (strncmp (raw_section->sectname, + debug_section_names[k], + sizeof (raw_section->sectname)) == 0) + { + *found_dwarf = 1; + + dwarf_sections[k].file_offset = + macho_file_to_host_u32 ( + commands_view.bytes_swapped, + raw_section->offset); + dwarf_sections[k].file_size = + macho_file_to_host_usize ( + commands_view.bytes_swapped, + raw_section->size); + + if (min_dwarf_offset == 0 || + dwarf_sections[k].file_offset < + min_dwarf_offset) + min_dwarf_offset = dwarf_sections[k].file_offset; + + dwarf_section_end = + dwarf_sections[k].file_offset + + dwarf_sections[k].file_size; + if (dwarf_section_end > max_dwarf_offset) + max_dwarf_offset = dwarf_section_end; + + break; + } + } + + section_offset += sizeof (section_native_t); + } + + break; + } + } + + offset += command.cmdsize; + } + + if (!*found_dwarf) + { + // No DWARF in this file + ret = 1; + goto end; + } + + if (!macho_get_view (state, dwarf_descriptor, (off_t) min_dwarf_offset, + max_dwarf_offset - min_dwarf_offset, error_callback, + data, &commands_view, &dwarf_view)) + goto end; + dwarf_view_valid = 1; + + for (i = 0; i < DEBUG_MAX; i++) + { + if (dwarf_sections[i].file_offset == 0) + dwarf_sections[i].data = NULL; + else + dwarf_sections[i].data = + dwarf_view.data + dwarf_sections[i].file_offset - min_dwarf_offset; + } + + if (!backtrace_dwarf_add (state, vmslide, + dwarf_sections[DEBUG_INFO].data, + dwarf_sections[DEBUG_INFO].file_size, + dwarf_sections[DEBUG_LINE].data, + dwarf_sections[DEBUG_LINE].file_size, + dwarf_sections[DEBUG_ABBREV].data, + dwarf_sections[DEBUG_ABBREV].file_size, + dwarf_sections[DEBUG_RANGES].data, + dwarf_sections[DEBUG_RANGES].file_size, + dwarf_sections[DEBUG_STR].data, + dwarf_sections[DEBUG_STR].file_size, + ((__DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN) + ^ commands_view.bytes_swapped), + error_callback, data, fileline_fn)) + goto end; + + // Don't release the DWARF view because it is still in use + dwarf_descriptor_valid = 0; + dwarf_view_valid = 0; + ret = 1; + +end: + if (dwarf_descriptor_valid) + backtrace_close (dwarf_descriptor, error_callback, data); + if (commands_view_valid) + backtrace_release_view (state, &commands_view.view, error_callback, data); + if (dwarf_view_valid) + backtrace_release_view (state, &dwarf_view, error_callback, data); + return ret; +} + +int +macho_try_dsym (struct backtrace_state *state, + backtrace_error_callback error_callback, + void *data, fileline *fileline_fn, uuid_t *executable_uuid, + uintptr_t base_address, uintptr_t max_image_address, + intptr_t vmslide, char *dsym_filename, int *matched, + int *found_sym, int *found_dwarf) +{ + int ret = 0; + char dwarf_image_dir_path[PATH_MAX]; + DIR *dwarf_image_dir; + int dwarf_image_dir_valid = 0; + struct dirent *directory_entry; + char dwarf_filename[PATH_MAX]; + int dwarf_matched; + int dwarf_had_sym; + int dwarf_had_dwarf; + + *matched = 0; + *found_sym = 0; + *found_dwarf = 0; + + strncpy (dwarf_image_dir_path, dsym_filename, PATH_MAX); + strncat (dwarf_image_dir_path, "/Contents/Resources/DWARF", PATH_MAX); + + if (!(dwarf_image_dir = opendir (dwarf_image_dir_path))) + { + error_callback (data, "could not open DWARF directory in dSYM", + 0); + goto end; + } + dwarf_image_dir_valid = 1; + + while ((directory_entry = readdir (dwarf_image_dir))) + { + if (directory_entry->d_type != DT_REG) + continue; + + strncpy (dwarf_filename, dwarf_image_dir_path, PATH_MAX); + strncat (dwarf_filename, "/", PATH_MAX); + strncat (dwarf_filename, directory_entry->d_name, PATH_MAX); + + if (!macho_try_dwarf (state, error_callback, data, fileline_fn, + executable_uuid, base_address, max_image_address, + vmslide, dwarf_filename, + &dwarf_matched, &dwarf_had_sym, &dwarf_had_dwarf)) + goto end; + + if (dwarf_matched) + { + *matched = 1; + *found_sym = dwarf_had_sym; + *found_dwarf = dwarf_had_dwarf; + ret = 1; + goto end; + } + } + + // No matching DWARF in this dSYM + ret = 1; + goto end; + +end: + if (dwarf_image_dir_valid) + closedir (dwarf_image_dir); + return ret; +} + +int +macho_add (struct backtrace_state *state, + backtrace_error_callback error_callback, void *data, int descriptor, + const char *filename, fileline *fileline_fn, intptr_t vmslide, + int *found_sym, int *found_dwarf) +{ + uuid_t image_uuid; + uintptr_t image_file_base_address; + uintptr_t image_file_max_address; + uintptr_t image_actual_base_address = 0; + uintptr_t image_actual_max_address = 0; + + int ret = 0; + struct macho_commands_view commands_view; + int commands_view_valid = 0; + char executable_dirname[PATH_MAX]; + size_t filename_len; + DIR *executable_dir = NULL; + int executable_dir_valid = 0; + struct dirent *directory_entry; + char dsym_full_path[PATH_MAX]; + static const char *extension; + size_t extension_len; + ssize_t i; + + *found_sym = 0; + *found_dwarf = 0; + + // Find Mach-O commands list + int incompatible; + if (!macho_get_commands (state, descriptor, error_callback, data, + &commands_view, &incompatible)) + goto end; + commands_view_valid = 1; + + // First we need to get the uuid of our file so we can hunt down the correct + // dSYM + if (!macho_get_uuid (state, descriptor, error_callback, data, &commands_view, + &image_uuid)) + goto end; + + // Now we need to find the in memory base address. Step one is to find out + // what the executable thinks the base address is + if (!macho_get_addr_range (state, descriptor, error_callback, data, + &commands_view, + &image_file_base_address, + &image_file_max_address)) + goto end; + + image_actual_base_address = + image_file_base_address + vmslide; + image_actual_max_address = + image_file_max_address + vmslide; + + if (image_actual_base_address == 0) + { + error_callback (data, "executable file is not loaded", 0); + goto end; + } + + // Look for dSYM in our executable's directory + strncpy (executable_dirname, filename, PATH_MAX); + filename_len = strlen (executable_dirname); + for (i = filename_len - 1; i >= 0; i--) + { + if (executable_dirname[i] == '/') + { + executable_dirname[i] = '\0'; + break; + } + else if (i == 0) + { + executable_dirname[0] = '.'; + executable_dirname[1] = '\0'; + break; + } + } + + if (!(executable_dir = opendir (executable_dirname))) + { + error_callback (data, "could not open directory containing executable", + 0); + goto end; + } + executable_dir_valid = 1; + + extension = ".dSYM"; + extension_len = strlen (extension); + while ((directory_entry = readdir (executable_dir))) + { + if (directory_entry->d_namlen < extension_len) + continue; + if (strncasecmp (directory_entry->d_name + directory_entry->d_namlen + - extension_len, extension, extension_len) == 0) + { + int matched; + int dsym_had_sym; + int dsym_had_dwarf; + + // Found a dSYM + strncpy (dsym_full_path, executable_dirname, PATH_MAX); + strncat (dsym_full_path, "/", PATH_MAX); + strncat (dsym_full_path, directory_entry->d_name, PATH_MAX); + + if (!macho_try_dsym (state, error_callback, data, + fileline_fn, &image_uuid, + image_actual_base_address, + image_actual_max_address, vmslide, + dsym_full_path, + &matched, &dsym_had_sym, &dsym_had_dwarf)) + goto end; + + if (matched) + { + *found_sym = dsym_had_sym; + *found_dwarf = dsym_had_dwarf; + ret = 1; + goto end; + } + } + } + + // No matching dSYM + ret = 1; + goto end; + +end: + if (commands_view_valid) + backtrace_release_view (state, &commands_view.view, error_callback, + data); + if (executable_dir_valid) + closedir (executable_dir); + return ret; +} + +static int +macho_symbol_search (const void *vkey, const void *ventry) +{ + const uintptr_t *key = (const uintptr_t *) vkey; + const struct macho_symbol *entry = (const struct macho_symbol *) ventry; + uintptr_t addr; + + addr = *key; + if (addr < entry->addr) + return -1; + else if (addr >= entry->addr + entry->size) + return 1; + else + return 0; +} + +static void +macho_syminfo (struct backtrace_state *state, + uintptr_t addr, + backtrace_syminfo_callback callback, + backtrace_error_callback error_callback ATTRIBUTE_UNUSED, + void *data) +{ + struct macho_syminfo_data *edata; + struct macho_symbol *sym = NULL; + + if (!state->threaded) + { + for (edata = (struct macho_syminfo_data *) state->syminfo_data; + edata != NULL; + edata = edata->next) + { + if (addr >= edata->min_addr && addr <= edata->max_addr) + { + sym = ((struct macho_symbol *) + bsearch (&addr, edata->symbols, edata->symbol_count, + sizeof (struct macho_symbol), macho_symbol_search)); + if (sym != NULL) + break; + } + } + } + else + { + struct macho_syminfo_data **pp; + + pp = (struct macho_syminfo_data **) (void *) &state->syminfo_data; + while (1) + { + edata = backtrace_atomic_load_pointer (pp); + if (edata == NULL) + break; + + if (addr >= edata->min_addr && addr <= edata->max_addr) + { + sym = ((struct macho_symbol *) + bsearch (&addr, edata->symbols, edata->symbol_count, + sizeof (struct macho_symbol), macho_symbol_search)); + if (sym != NULL) + break; + } + + pp = &edata->next; + } + } + + if (sym == NULL) + callback (data, addr, NULL, 0, 0); + else + callback (data, addr, sym->name, sym->addr, sym->size); +} + + +static int +macho_nodebug (struct backtrace_state *state ATTRIBUTE_UNUSED, + uintptr_t pc ATTRIBUTE_UNUSED, + backtrace_full_callback callback ATTRIBUTE_UNUSED, + backtrace_error_callback error_callback, void *data) +{ + error_callback (data, "no debug info in Mach-O executable", -1); + return 0; +} + +static void +macho_nosyms (struct backtrace_state *state ATTRIBUTE_UNUSED, + uintptr_t addr ATTRIBUTE_UNUSED, + backtrace_syminfo_callback callback ATTRIBUTE_UNUSED, + backtrace_error_callback error_callback, void *data) +{ + error_callback (data, "no symbol table in Mach-O executable", -1); +} + +int +backtrace_initialize (struct backtrace_state *state, int descriptor, + backtrace_error_callback error_callback, + void *data, fileline *fileline_fn) +{ + int ret; + fileline macho_fileline_fn = macho_nodebug; + int found_sym = 0; + int found_dwarf = 0; + uint32_t i = 0; + uint32_t loaded_image_count; + + // Add all loaded images + loaded_image_count = _dyld_image_count (); + for (i = 0; i < loaded_image_count; i++) + { + int current_found_sym; + int current_found_dwarf; + int current_descriptor; + intptr_t current_vmslide; + const char *current_name; + + current_vmslide = _dyld_get_image_vmaddr_slide (i); + current_name = _dyld_get_image_name (i); + + if (current_name == NULL || (i != 0 && current_vmslide == 0)) + continue; + + if (!(current_descriptor = + backtrace_open (current_name, error_callback, data, NULL))) + { + continue; + } + + if (macho_add (state, error_callback, data, current_descriptor, + current_name, &macho_fileline_fn, current_vmslide, + ¤t_found_sym, ¤t_found_dwarf)) + { + found_sym = found_sym || current_found_sym; + found_dwarf = found_dwarf || current_found_dwarf; + } + + backtrace_close (current_descriptor, error_callback, data); + } + + if (!state->threaded) + { + if (found_sym) + state->syminfo_fn = macho_syminfo; + else if (state->syminfo_fn == NULL) + state->syminfo_fn = macho_nosyms; + } + else + { + if (found_sym) + backtrace_atomic_store_pointer (&state->syminfo_fn, macho_syminfo); + else + (void) __sync_bool_compare_and_swap (&state->syminfo_fn, NULL, + macho_nosyms); + } + + if (!state->threaded) + { + if (state->fileline_fn == NULL || state->fileline_fn == macho_nodebug) + *fileline_fn = macho_fileline_fn; + } + else + { + fileline current_fn; + + current_fn = backtrace_atomic_load_pointer (&state->fileline_fn); + if (current_fn == NULL || current_fn == macho_nodebug) + *fileline_fn = macho_fileline_fn; + } + + return 1; +} + diff --git a/src/libcompiler_builtins/Cargo.toml b/src/libcompiler_builtins/Cargo.toml index 070d8f8c7a..b7743c2ce0 100644 --- a/src/libcompiler_builtins/Cargo.toml +++ b/src/libcompiler_builtins/Cargo.toml @@ -8,16 +8,15 @@ version = "0.1.0" cast = { version = "0.2.2", features = ["x128"], optional = true } rand = { version = "0.3.15", optional = true } -[build-dependencies.gcc] +[build-dependencies.cc] optional = true -version = "0.3.36" +version = "1.0" [features] -c = ["gcc"] +c = ["cc"] compiler-builtins = [] default = ["compiler-builtins"] mem = [] -rustbuild = ["compiler-builtins"] mangled-names = [] # generate tests diff --git a/src/libcompiler_builtins/README.md b/src/libcompiler_builtins/README.md index a0e2a29ed1..6356996434 100644 --- a/src/libcompiler_builtins/README.md +++ b/src/libcompiler_builtins/README.md @@ -51,7 +51,7 @@ features = ["c"] ## Contributing -1. Pick one or more intrinsics from the [pending list][#progress]. +1. Pick one or more intrinsics from the [pending list](#progress). 2. Fork this repository 3. Port the intrinsic(s) and their corresponding [unit tests][1] from their [C implementation][2] to Rust. @@ -159,8 +159,8 @@ features = ["c"] - [x] floatunsisf.c - [ ] i386/ashldi3.S - [ ] i386/ashrdi3.S -- [ ] i386/chkstk.S -- [ ] i386/chkstk2.S +- [x] i386/chkstk.S +- [x] i386/chkstk2.S - [ ] i386/divdi3.S - [ ] i386/lshrdi3.S - [ ] i386/moddi3.S diff --git a/src/libcompiler_builtins/appveyor.yml b/src/libcompiler_builtins/appveyor.yml index e51ddab740..bb78ad36a2 100644 --- a/src/libcompiler_builtins/appveyor.yml +++ b/src/libcompiler_builtins/appveyor.yml @@ -14,8 +14,8 @@ environment: # `rust_eh_unwind_resume` in the debug LTO builds that aren't optimized out, # so we skip that test for now. Would be great to not skip it! - TARGET: i686-pc-windows-gnu - MINGW_URL: https://s3.amazonaws.com/rust-lang-ci - MINGW_ARCHIVE: i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z + MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror + MINGW_ARCHIVE: i686-6.3.0-release-win32-dwarf-rt_v5-rev1.7z MINGW_DIR: mingw32 DEBUG_LTO_BUILD_DOESNT_WORK: 1 - TARGET: x86_64-pc-windows-gnu diff --git a/src/libcompiler_builtins/build.rs b/src/libcompiler_builtins/build.rs index 25cc5204d2..1895201e8f 100644 --- a/src/libcompiler_builtins/build.rs +++ b/src/libcompiler_builtins/build.rs @@ -3957,7 +3957,7 @@ macro_rules! panic { #[cfg(feature = "c")] mod c { - extern crate gcc; + extern crate cc; use std::collections::BTreeMap; use std::env; @@ -4008,7 +4008,9 @@ mod c { let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap(); let target_vendor = env::var("CARGO_CFG_TARGET_VENDOR").unwrap(); - let cfg = &mut gcc::Config::new(); + let cfg = &mut cc::Build::new(); + + cfg.warnings(false); if target_env == "msvc" { // Don't pull in extra libraries on MSVC @@ -4104,11 +4106,15 @@ mod c { // also needs to satisfy intrinsics that jemalloc or C in general may // need, so include a few more that aren't typically needed by // LLVM/Rust. - sources.extend(&[ - "ffsdi2.c", - ]); + if cfg!(feature = "rustbuild") { + sources.extend(&[ + "ffsdi2.c", + ]); + } - if target_os != "ios" { + // On iOS and 32-bit OSX these are all just empty intrinsics, no need to + // include them. + if target_os != "ios" && (target_vendor != "apple" || target_arch != "x86") { sources.extend( &[ "absvti2.c", @@ -4172,8 +4178,6 @@ mod c { &[ "i386/ashldi3.S", "i386/ashrdi3.S", - "i386/chkstk.S", - "i386/chkstk2.S", "i386/divdi3.S", "i386/floatdidf.S", "i386/floatdisf.S", @@ -4357,7 +4361,9 @@ mod c { sources.remove(&["aeabi_cdcmp", "aeabi_cfcmp"]); } - let root = if env::var_os("CARGO_FEATURE_RUSTBUILD").is_some() { + // When compiling in rustbuild (the rust-lang/rust repo) this build + // script runs from a directory other than this root directory. + let root = if cfg!(feature = "rustbuild") { Path::new("../../libcompiler_builtins") } else { Path::new(".") diff --git a/src/libcompiler_builtins/src/float/add.rs b/src/libcompiler_builtins/src/float/add.rs index a4b763b7e7..d355cb1ff2 100644 --- a/src/libcompiler_builtins/src/float/add.rs +++ b/src/libcompiler_builtins/src/float/add.rs @@ -1,196 +1,196 @@ -use core::num::Wrapping; - +use int::{Int, CastInto}; use float::Float; /// Returns `a + b` -macro_rules! add { - ($a:expr, $b:expr, $ty:ty) => ({ - let a = $a; - let b = $b; - let one = Wrapping(1 as <$ty as Float>::Int); - let zero = Wrapping(0 as <$ty as Float>::Int); - - let bits = Wrapping(<$ty>::bits() as <$ty as Float>::Int); - let significand_bits = Wrapping(<$ty>::significand_bits() as <$ty as Float>::Int); - let exponent_bits = bits - significand_bits - one; - let max_exponent = (one << exponent_bits.0 as usize) - one; - - let implicit_bit = one << significand_bits.0 as usize; - let significand_mask = implicit_bit - one; - let sign_bit = one << (significand_bits + exponent_bits).0 as usize; - let abs_mask = sign_bit - one; - let exponent_mask = abs_mask ^ significand_mask; - let inf_rep = exponent_mask; - let quiet_bit = implicit_bit >> 1; - let qnan_rep = exponent_mask | quiet_bit; - - let mut a_rep = Wrapping(a.repr()); - let mut b_rep = Wrapping(b.repr()); - let a_abs = a_rep & abs_mask; - let b_abs = b_rep & abs_mask; - - // Detect if a or b is zero, infinity, or NaN. - if a_abs - one >= inf_rep - one || - b_abs - one >= inf_rep - one { - // NaN + anything = qNaN - if a_abs > inf_rep { - return <$ty as Float>::from_repr((a_abs | quiet_bit).0); - } - // anything + NaN = qNaN - if b_abs > inf_rep { - return <$ty as Float>::from_repr((b_abs | quiet_bit).0); - } - - if a_abs == inf_rep { - // +/-infinity + -/+infinity = qNaN - if (a.repr() ^ b.repr()) == sign_bit.0 { - return <$ty as Float>::from_repr(qnan_rep.0); - } else { - // +/-infinity + anything remaining = +/- infinity - return a; - } - } +fn add(a: F, b: F) -> F where + u32: CastInto, + F::Int: CastInto, + i32: CastInto, + F::Int: CastInto, +{ + let one = F::Int::ONE; + let zero = F::Int::ZERO; + + let bits = F::BITS.cast(); + let significand_bits = F::SIGNIFICAND_BITS; + let max_exponent = F::EXPONENT_MAX; + + let implicit_bit = F::IMPLICIT_BIT; + let significand_mask = F::SIGNIFICAND_MASK; + let sign_bit = F::SIGN_MASK as F::Int; + let abs_mask = sign_bit - one; + let exponent_mask = F::EXPONENT_MASK; + let inf_rep = exponent_mask; + let quiet_bit = implicit_bit >> 1; + let qnan_rep = exponent_mask | quiet_bit; + + let mut a_rep = a.repr(); + let mut b_rep = b.repr(); + let a_abs = a_rep & abs_mask; + let b_abs = b_rep & abs_mask; + + // Detect if a or b is zero, infinity, or NaN. + if a_abs.wrapping_sub(one) >= inf_rep - one || + b_abs.wrapping_sub(one) >= inf_rep - one { + // NaN + anything = qNaN + if a_abs > inf_rep { + return F::from_repr(a_abs | quiet_bit); + } + // anything + NaN = qNaN + if b_abs > inf_rep { + return F::from_repr(b_abs | quiet_bit); + } - // anything remaining + +/-infinity = +/-infinity - if b_abs == inf_rep { - return b; + if a_abs == inf_rep { + // +/-infinity + -/+infinity = qNaN + if (a.repr() ^ b.repr()) == sign_bit { + return F::from_repr(qnan_rep); + } else { + // +/-infinity + anything remaining = +/- infinity + return a; } + } - // zero + anything = anything - if a_abs.0 == 0 { - // but we need to get the sign right for zero + zero - if b_abs.0 == 0 { - return <$ty as Float>::from_repr(a.repr() & b.repr()); - } else { - return b; - } - } + // anything remaining + +/-infinity = +/-infinity + if b_abs == inf_rep { + return b; + } - // anything + zero = anything - if b_abs.0 == 0 { - return a; + // zero + anything = anything + if a_abs == Int::ZERO { + // but we need to get the sign right for zero + zero + if b_abs == Int::ZERO { + return F::from_repr(a.repr() & b.repr()); + } else { + return b; } } - // Swap a and b if necessary so that a has the larger absolute value. - if b_abs > a_abs { - // Don't use mem::swap because it may generate references to memcpy in unoptimized code. - let tmp = a_rep; - a_rep = b_rep; - b_rep = tmp; + // anything + zero = anything + if b_abs == Int::ZERO { + return a; } + } + + // Swap a and b if necessary so that a has the larger absolute value. + if b_abs > a_abs { + // Don't use mem::swap because it may generate references to memcpy in unoptimized code. + let tmp = a_rep; + a_rep = b_rep; + b_rep = tmp; + } + + // Extract the exponent and significand from the (possibly swapped) a and b. + let mut a_exponent: i32 = ((a_rep & exponent_mask) >> significand_bits).cast(); + let mut b_exponent: i32 = ((b_rep & exponent_mask) >> significand_bits).cast(); + let mut a_significand = a_rep & significand_mask; + let mut b_significand = b_rep & significand_mask; + + // normalize any denormals, and adjust the exponent accordingly. + if a_exponent == 0 { + let (exponent, significand) = F::normalize(a_significand); + a_exponent = exponent; + a_significand = significand; + } + if b_exponent == 0 { + let (exponent, significand) = F::normalize(b_significand); + b_exponent = exponent; + b_significand = significand; + } - // Extract the exponent and significand from the (possibly swapped) a and b. - let mut a_exponent = Wrapping((a_rep >> significand_bits.0 as usize & max_exponent).0 as i32); - let mut b_exponent = Wrapping((b_rep >> significand_bits.0 as usize & max_exponent).0 as i32); - let mut a_significand = a_rep & significand_mask; - let mut b_significand = b_rep & significand_mask; - - // normalize any denormals, and adjust the exponent accordingly. - if a_exponent.0 == 0 { - let (exponent, significand) = <$ty>::normalize(a_significand.0); - a_exponent = Wrapping(exponent); - a_significand = Wrapping(significand); + // The sign of the result is the sign of the larger operand, a. If they + // have opposite signs, we are performing a subtraction; otherwise addition. + let result_sign = a_rep & sign_bit; + let subtraction = ((a_rep ^ b_rep) & sign_bit) != zero; + + // Shift the significands to give us round, guard and sticky, and or in the + // implicit significand bit. (If we fell through from the denormal path it + // was already set by normalize(), but setting it twice won't hurt + // anything.) + a_significand = (a_significand | implicit_bit) << 3; + b_significand = (b_significand | implicit_bit) << 3; + + // Shift the significand of b by the difference in exponents, with a sticky + // bottom bit to get rounding correct. + let align = a_exponent.wrapping_sub(b_exponent).cast(); + if align != Int::ZERO { + if align < bits { + let sticky = F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO); + b_significand = (b_significand >> align.cast()) | sticky; + } else { + b_significand = one; // sticky; b is known to be non-zero. } - if b_exponent.0 == 0 { - let (exponent, significand) = <$ty>::normalize(b_significand.0); - b_exponent = Wrapping(exponent); - b_significand = Wrapping(significand); + } + if subtraction { + a_significand = a_significand.wrapping_sub(b_significand); + // If a == -b, return +zero. + if a_significand == Int::ZERO { + return F::from_repr(Int::ZERO); } - // The sign of the result is the sign of the larger operand, a. If they - // have opposite signs, we are performing a subtraction; otherwise addition. - let result_sign = a_rep & sign_bit; - let subtraction = ((a_rep ^ b_rep) & sign_bit) != zero; - - // Shift the significands to give us round, guard and sticky, and or in the - // implicit significand bit. (If we fell through from the denormal path it - // was already set by normalize(), but setting it twice won't hurt - // anything.) - a_significand = (a_significand | implicit_bit) << 3; - b_significand = (b_significand | implicit_bit) << 3; - - // Shift the significand of b by the difference in exponents, with a sticky - // bottom bit to get rounding correct. - let align = Wrapping((a_exponent - b_exponent).0 as <$ty as Float>::Int); - if align.0 != 0 { - if align < bits { - let sticky = ((b_significand << (bits - align).0 as usize).0 != 0) as <$ty as Float>::Int; - b_significand = (b_significand >> align.0 as usize) | Wrapping(sticky); - } else { - b_significand = one; // sticky; b is known to be non-zero. - } + // If partial cancellation occured, we need to left-shift the result + // and adjust the exponent: + if a_significand < implicit_bit << 3 { + let shift = a_significand.leading_zeros() as i32 + - (implicit_bit << 3).leading_zeros() as i32; + a_significand <<= shift; + a_exponent -= shift; } - if subtraction { - a_significand -= b_significand; - // If a == -b, return +zero. - if a_significand.0 == 0 { - return <$ty as Float>::from_repr(0); - } - - // If partial cancellation occured, we need to left-shift the result - // and adjust the exponent: - if a_significand < implicit_bit << 3 { - let shift = a_significand.0.leading_zeros() as i32 - - (implicit_bit << 3).0.leading_zeros() as i32; - a_significand <<= shift as usize; - a_exponent -= Wrapping(shift); - } - } else /* addition */ { - a_significand += b_significand; - - // If the addition carried up, we need to right-shift the result and - // adjust the exponent: - if (a_significand & implicit_bit << 4).0 != 0 { - let sticky = ((a_significand & one).0 != 0) as <$ty as Float>::Int; - a_significand = a_significand >> 1 | Wrapping(sticky); - a_exponent += Wrapping(1); - } + } else /* addition */ { + a_significand += b_significand; + + // If the addition carried up, we need to right-shift the result and + // adjust the exponent: + if a_significand & implicit_bit << 4 != Int::ZERO { + let sticky = F::Int::from_bool(a_significand & one != Int::ZERO); + a_significand = a_significand >> 1 | sticky; + a_exponent += 1; } + } - // If we have overflowed the type, return +/- infinity: - if a_exponent >= Wrapping(max_exponent.0 as i32) { - return <$ty>::from_repr((inf_rep | result_sign).0); - } + // If we have overflowed the type, return +/- infinity: + if a_exponent >= max_exponent as i32 { + return F::from_repr(inf_rep | result_sign); + } - if a_exponent.0 <= 0 { - // Result is denormal before rounding; the exponent is zero and we - // need to shift the significand. - let shift = Wrapping((Wrapping(1) - a_exponent).0 as <$ty as Float>::Int); - let sticky = ((a_significand << (bits - shift).0 as usize).0 != 0) as <$ty as Float>::Int; - a_significand = a_significand >> shift.0 as usize | Wrapping(sticky); - a_exponent = Wrapping(0); - } + if a_exponent <= 0 { + // Result is denormal before rounding; the exponent is zero and we + // need to shift the significand. + let shift = (1 - a_exponent).cast(); + let sticky = F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO); + a_significand = a_significand >> shift.cast() | sticky; + a_exponent = 0; + } - // Low three bits are round, guard, and sticky. - let round_guard_sticky: i32 = (a_significand.0 & 0x7) as i32; + // Low three bits are round, guard, and sticky. + let a_significand_i32: i32 = a_significand.cast(); + let round_guard_sticky: i32 = a_significand_i32 & 0x7; - // Shift the significand into place, and mask off the implicit bit. - let mut result = a_significand >> 3 & significand_mask; + // Shift the significand into place, and mask off the implicit bit. + let mut result = a_significand >> 3 & significand_mask; - // Insert the exponent and sign. - result |= Wrapping(a_exponent.0 as <$ty as Float>::Int) << significand_bits.0 as usize; - result |= result_sign; + // Insert the exponent and sign. + result |= a_exponent.cast() << significand_bits; + result |= result_sign; - // Final rounding. The result may overflow to infinity, but that is the - // correct result in that case. - if round_guard_sticky > 0x4 { result += one; } - if round_guard_sticky == 0x4 { result += result & one; } + // Final rounding. The result may overflow to infinity, but that is the + // correct result in that case. + if round_guard_sticky > 0x4 { result += one; } + if round_guard_sticky == 0x4 { result += result & one; } - <$ty>::from_repr(result.0) - }) + F::from_repr(result) } intrinsics! { #[aapcs_on_arm] #[arm_aeabi_alias = __aeabi_fadd] pub extern "C" fn __addsf3(a: f32, b: f32) -> f32 { - add!(a, b, f32) + add(a, b) } #[aapcs_on_arm] #[arm_aeabi_alias = __aeabi_dadd] pub extern "C" fn __adddf3(a: f64, b: f64) -> f64 { - add!(a, b, f64) + add(a, b) } } diff --git a/src/libcompiler_builtins/src/float/conv.rs b/src/libcompiler_builtins/src/float/conv.rs index f2fd01d37b..33644ce56a 100644 --- a/src/libcompiler_builtins/src/float/conv.rs +++ b/src/libcompiler_builtins/src/float/conv.rs @@ -8,10 +8,10 @@ macro_rules! int_to_float { return 0.0 } - let mant_dig = <$fty>::significand_bits() + 1; - let exponent_bias = <$fty>::exponent_bias(); + let mant_dig = <$fty>::SIGNIFICAND_BITS + 1; + let exponent_bias = <$fty>::EXPONENT_BIAS; - let n = <$ity>::bits(); + let n = <$ity>::BITS; let (s, a) = i.extract_sign(); let mut a = a; @@ -21,7 +21,7 @@ macro_rules! int_to_float { // exponent let mut e = sd - 1; - if <$ity>::bits() < mant_dig { + if <$ity>::BITS < mant_dig { return <$fty>::from_parts(s, (e + exponent_bias) as <$fty as Float>::Int, (a as <$fty as Float>::Int) << (mant_dig - e - 1)) @@ -142,12 +142,12 @@ macro_rules! float_to_int { let f = $f; let fixint_min = <$ity>::min_value(); let fixint_max = <$ity>::max_value(); - let fixint_bits = <$ity>::bits() as usize; + let fixint_bits = <$ity>::BITS as usize; let fixint_unsigned = fixint_min == 0; - let sign_bit = <$fty>::sign_mask(); - let significand_bits = <$fty>::significand_bits() as usize; - let exponent_bias = <$fty>::exponent_bias() as usize; + let sign_bit = <$fty>::SIGN_MASK; + let significand_bits = <$fty>::SIGNIFICAND_BITS as usize; + let exponent_bias = <$fty>::EXPONENT_BIAS as usize; //let exponent_max = <$fty>::exponent_max() as usize; // Break a into sign, exponent, significand @@ -157,7 +157,7 @@ macro_rules! float_to_int { // this is used to work around -1 not being available for unsigned let sign = if (a_rep & sign_bit) == 0 { Sign::Positive } else { Sign::Negative }; let mut exponent = (a_abs >> significand_bits) as usize; - let significand = (a_abs & <$fty>::significand_mask()) | <$fty>::implicit_bit(); + let significand = (a_abs & <$fty>::SIGNIFICAND_MASK) | <$fty>::IMPLICIT_BIT; // if < 1 or unsigned & negative if exponent < exponent_bias || diff --git a/src/libcompiler_builtins/src/float/mod.rs b/src/libcompiler_builtins/src/float/mod.rs index 46e3e5d958..23aef32e5d 100644 --- a/src/libcompiler_builtins/src/float/mod.rs +++ b/src/libcompiler_builtins/src/float/mod.rs @@ -1,4 +1,7 @@ use core::mem; +use core::ops; + +use super::int::Int; pub mod conv; pub mod add; @@ -6,41 +9,49 @@ pub mod pow; pub mod sub; /// Trait for some basic operations on floats -pub trait Float: Sized + Copy { +pub trait Float: + Copy + + PartialEq + + PartialOrd + + ops::AddAssign + + ops::MulAssign + + ops::Add + + ops::Sub + + ops::Div + + ops::Rem + +{ /// A uint of the same with as the float - type Int; + type Int: Int; - /// Returns the bitwidth of the float type - fn bits() -> u32; + const ZERO: Self; + const ONE: Self; - /// Returns the bitwidth of the significand - fn significand_bits() -> u32; + /// The bitwidth of the float type + const BITS: u32; - /// Returns the bitwidth of the exponent - fn exponent_bits() -> u32 { - Self::bits() - Self::significand_bits() - 1 - } - /// Returns the maximum value of the exponent - fn exponent_max() -> u32 { - (1 << Self::exponent_bits()) - 1 - } + /// The bitwidth of the significand + const SIGNIFICAND_BITS: u32; - /// Returns the exponent bias value - fn exponent_bias() -> u32 { - Self::exponent_max() >> 1 - } + /// The bitwidth of the exponent + const EXPONENT_BITS: u32 = Self::BITS - Self::SIGNIFICAND_BITS - 1; - /// Returns a mask for the sign bit - fn sign_mask() -> Self::Int; + /// The maximum value of the exponent + const EXPONENT_MAX: u32 = (1 << Self::EXPONENT_BITS) - 1; - /// Returns a mask for the significand - fn significand_mask() -> Self::Int; + /// The exponent bias value + const EXPONENT_BIAS: u32 = Self::EXPONENT_MAX >> 1; - // Returns the implicit bit of the float format - fn implicit_bit() -> Self::Int; + /// A mask for the sign bit + const SIGN_MASK: Self::Int; - /// Returns a mask for the exponent - fn exponent_mask() -> Self::Int; + /// A mask for the significand + const SIGNIFICAND_MASK: Self::Int; + + // The implicit bit of the float format + const IMPLICIT_BIT: Self::Int; + + /// A mask for the exponent + const EXPONENT_MASK: Self::Int; /// Returns `self` transmuted to `Self::Int` fn repr(self) -> Self::Int; @@ -63,94 +74,48 @@ pub trait Float: Sized + Copy { // FIXME: Some of this can be removed if RFC Issue #1424 is resolved // https://github.com/rust-lang/rfcs/issues/1424 -impl Float for f32 { - type Int = u32; - fn bits() -> u32 { - 32 - } - fn significand_bits() -> u32 { - 23 - } - fn implicit_bit() -> Self::Int { - 1 << Self::significand_bits() - } - fn sign_mask() -> Self::Int { - 1 << (Self::bits() - 1) - } - fn significand_mask() -> Self::Int { - (1 << Self::significand_bits()) - 1 - } - fn exponent_mask() -> Self::Int { - !(Self::sign_mask() | Self::significand_mask()) - } - fn repr(self) -> Self::Int { - unsafe { mem::transmute(self) } - } - #[cfg(test)] - fn eq_repr(self, rhs: Self) -> bool { - if self.is_nan() && rhs.is_nan() { - true - } else { - self.repr() == rhs.repr() - } - } - fn from_repr(a: Self::Int) -> Self { - unsafe { mem::transmute(a) } - } - fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { - Self::from_repr(((sign as Self::Int) << (Self::bits() - 1)) | - ((exponent << Self::significand_bits()) & Self::exponent_mask()) | - (significand & Self::significand_mask())) - } - fn normalize(significand: Self::Int) -> (i32, Self::Int) { - let shift = significand.leading_zeros() - .wrapping_sub((1u32 << Self::significand_bits()).leading_zeros()); - (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) - } -} -impl Float for f64 { - type Int = u64; - fn bits() -> u32 { - 64 - } - fn significand_bits() -> u32 { - 52 - } - // Returns the implicit bit of the float format - fn implicit_bit() -> Self::Int { - 1 << Self::significand_bits() - } - fn sign_mask() -> Self::Int { - 1 << (Self::bits() - 1) - } - fn significand_mask() -> Self::Int { - (1 << Self::significand_bits()) - 1 - } - fn exponent_mask() -> Self::Int { - !(Self::sign_mask() | Self::significand_mask()) - } - fn repr(self) -> Self::Int { - unsafe { mem::transmute(self) } - } - #[cfg(test)] - fn eq_repr(self, rhs: Self) -> bool { - if self.is_nan() && rhs.is_nan() { - true - } else { - self.repr() == rhs.repr() +macro_rules! float_impl { + ($ty:ident, $ity:ident, $bits:expr, $significand_bits:expr) => { + impl Float for $ty { + type Int = $ity; + const ZERO: Self = 0.0; + const ONE: Self = 1.0; + + const BITS: u32 = $bits; + const SIGNIFICAND_BITS: u32 = $significand_bits; + + const SIGN_MASK: Self::Int = 1 << (Self::BITS - 1); + const SIGNIFICAND_MASK: Self::Int = (1 << Self::SIGNIFICAND_BITS) - 1; + const IMPLICIT_BIT: Self::Int = 1 << Self::SIGNIFICAND_BITS; + const EXPONENT_MASK: Self::Int = !(Self::SIGN_MASK | Self::SIGNIFICAND_MASK); + + fn repr(self) -> Self::Int { + unsafe { mem::transmute(self) } + } + #[cfg(test)] + fn eq_repr(self, rhs: Self) -> bool { + if self.is_nan() && rhs.is_nan() { + true + } else { + self.repr() == rhs.repr() + } + } + fn from_repr(a: Self::Int) -> Self { + unsafe { mem::transmute(a) } + } + fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { + Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) | + ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) | + (significand & Self::SIGNIFICAND_MASK)) + } + fn normalize(significand: Self::Int) -> (i32, Self::Int) { + let shift = significand.leading_zeros() + .wrapping_sub((Self::Int::ONE << Self::SIGNIFICAND_BITS).leading_zeros()); + (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) + } } } - fn from_repr(a: Self::Int) -> Self { - unsafe { mem::transmute(a) } - } - fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { - Self::from_repr(((sign as Self::Int) << (Self::bits() - 1)) | - ((exponent << Self::significand_bits()) & Self::exponent_mask()) | - (significand & Self::significand_mask())) - } - fn normalize(significand: Self::Int) -> (i32, Self::Int) { - let shift = significand.leading_zeros() - .wrapping_sub((1u64 << Self::significand_bits()).leading_zeros()); - (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) - } } + +float_impl!(f32, u32, 32, 23); +float_impl!(f64, u64, 64, 52); diff --git a/src/libcompiler_builtins/src/float/pow.rs b/src/libcompiler_builtins/src/float/pow.rs index bc15dc0888..f879c1a1f8 100644 --- a/src/libcompiler_builtins/src/float/pow.rs +++ b/src/libcompiler_builtins/src/float/pow.rs @@ -1,11 +1,12 @@ use int::Int; +use float::Float; -/// Returns `a` raised to the power `b` -macro_rules! pow { - ($a: expr, $b: expr) => ({ - let (mut a, mut b) = ($a, $b); +trait Pow: Float { + /// Returns `a` raised to the power `b` + fn pow(self, mut b: i32) -> Self { + let mut a = self; let recip = b < 0; - let mut r = 1.0; + let mut r = Self::ONE; loop { if (b & 1) != 0 { r *= a; @@ -18,19 +19,22 @@ macro_rules! pow { } if recip { - 1.0 / r + Self::ONE / r } else { r } - }) + } } +impl Pow for f32 {} +impl Pow for f64 {} + intrinsics! { pub extern "C" fn __powisf2(a: f32, b: i32) -> f32 { - pow!(a, b) + a.pow(b) } pub extern "C" fn __powidf2(a: f64, b: i32) -> f64 { - pow!(a, b) + a.pow(b) } } diff --git a/src/libcompiler_builtins/src/float/sub.rs b/src/libcompiler_builtins/src/float/sub.rs index 4fa436db61..ed7dd2c0aa 100644 --- a/src/libcompiler_builtins/src/float/sub.rs +++ b/src/libcompiler_builtins/src/float/sub.rs @@ -3,11 +3,11 @@ use float::Float; intrinsics! { #[arm_aeabi_alias = __aeabi_fsub] pub extern "C" fn __subsf3(a: f32, b: f32) -> f32 { - a + f32::from_repr(b.repr() ^ f32::sign_mask()) + a + f32::from_repr(b.repr() ^ f32::SIGN_MASK) } #[arm_aeabi_alias = __aeabi_dsub] pub extern "C" fn __subdf3(a: f64, b: f64) -> f64 { - a + f64::from_repr(b.repr() ^ f64::sign_mask()) + a + f64::from_repr(b.repr() ^ f64::SIGN_MASK) } } diff --git a/src/libcompiler_builtins/src/int/mod.rs b/src/libcompiler_builtins/src/int/mod.rs index 0334a4acb8..24b27b1fae 100644 --- a/src/libcompiler_builtins/src/int/mod.rs +++ b/src/libcompiler_builtins/src/int/mod.rs @@ -23,6 +23,10 @@ pub trait Int: PartialEq + PartialOrd + ops::AddAssign + + ops::BitAndAssign + + ops::BitOrAssign + + ops::ShlAssign + + ops::ShrAssign + ops::Add + ops::Sub + ops::Div + @@ -31,7 +35,6 @@ pub trait Int: ops::BitOr + ops::BitXor + ops::BitAnd + - ops::BitAndAssign + ops::Not + { /// Type with the same width but other signedness @@ -39,11 +42,11 @@ pub trait Int: /// Unsigned version of Self type UnsignedInt: Int; - /// Returns the bitwidth of the int type - fn bits() -> u32; + /// The bitwidth of the int type + const BITS: u32; - fn zero() -> Self; - fn one() -> Self; + const ZERO: Self; + const ONE: Self; /// Extracts the sign from self and returns a tuple. /// @@ -60,14 +63,18 @@ pub trait Int: fn unsigned(self) -> Self::UnsignedInt; fn from_unsigned(unsigned: Self::UnsignedInt) -> Self; + fn from_bool(b: bool) -> Self; + // copied from primitive integers, but put in a trait fn max_value() -> Self; fn min_value() -> Self; fn wrapping_add(self, other: Self) -> Self; fn wrapping_mul(self, other: Self) -> Self; fn wrapping_sub(self, other: Self) -> Self; + fn wrapping_shl(self, other: u32) -> Self; fn aborting_div(self, other: Self) -> Self; fn aborting_rem(self, other: Self) -> Self; + fn leading_zeros(self) -> u32; } fn unwrap(t: Option) -> T { @@ -77,34 +84,15 @@ fn unwrap(t: Option) -> T { } } -macro_rules! int_impl { - ($ity:ty, $uty:ty, $bits:expr) => { - impl Int for $uty { - type OtherSign = $ity; - type UnsignedInt = $uty; +macro_rules! int_impl_common { + ($ty:ty, $bits:expr) => { + const BITS: u32 = $bits; - fn zero() -> Self { - 0 - } - - fn one() -> Self { - 1 - } - - fn bits() -> u32 { - $bits - } - - fn extract_sign(self) -> (bool, $uty) { - (false, self) - } - - fn unsigned(self) -> $uty { - self - } + const ZERO: Self = 0; + const ONE: Self = 1; - fn from_unsigned(me: $uty) -> Self { - me + fn from_bool(b: bool) -> Self { + b as $ty } fn max_value() -> Self { @@ -127,6 +115,10 @@ macro_rules! int_impl { ::wrapping_sub(self, other) } + fn wrapping_shl(self, other: u32) -> Self { + ::wrapping_shl(self, other) + } + fn aborting_div(self, other: Self) -> Self { unwrap(::checked_div(self, other)) } @@ -134,24 +126,38 @@ macro_rules! int_impl { fn aborting_rem(self, other: Self) -> Self { unwrap(::checked_rem(self, other)) } - } - impl Int for $ity { - type OtherSign = $uty; + fn leading_zeros(self) -> u32 { + ::leading_zeros(self) + } + } +} + +macro_rules! int_impl { + ($ity:ty, $uty:ty, $bits:expr) => { + impl Int for $uty { + type OtherSign = $ity; type UnsignedInt = $uty; - fn bits() -> u32 { - $bits + fn extract_sign(self) -> (bool, $uty) { + (false, self) } - fn zero() -> Self { - 0 + fn unsigned(self) -> $uty { + self } - fn one() -> Self { - 1 + fn from_unsigned(me: $uty) -> Self { + me } + int_impl_common!($uty, $bits); + } + + impl Int for $ity { + type OtherSign = $uty; + type UnsignedInt = $uty; + fn extract_sign(self) -> (bool, $uty) { if self < 0 { (true, (!(self as $uty)).wrapping_add(1)) @@ -168,33 +174,7 @@ macro_rules! int_impl { me as $ity } - fn max_value() -> Self { - ::max_value() - } - - fn min_value() -> Self { - ::min_value() - } - - fn wrapping_add(self, other: Self) -> Self { - ::wrapping_add(self, other) - } - - fn wrapping_mul(self, other: Self) -> Self { - ::wrapping_mul(self, other) - } - - fn wrapping_sub(self, other: Self) -> Self { - ::wrapping_sub(self, other) - } - - fn aborting_div(self, other: Self) -> Self { - unwrap(::checked_div(self, other)) - } - - fn aborting_rem(self, other: Self) -> Self { - unwrap(::checked_rem(self, other)) - } + int_impl_common!($ity, $bits); } } } @@ -244,3 +224,28 @@ large_int!(u64, u32, u32, 32); large_int!(i64, u32, i32, 32); large_int!(u128, u64, u64, 64); large_int!(i128, u64, i64, 64); + +/// Trait to express (possibly lossy) casting of integers +pub trait CastInto: Copy { + fn cast(self) -> T; +} + +macro_rules! cast_into { + ($ty:ty) => { + cast_into!($ty; usize, isize, u32, i32, u64, i64, u128, i128); + }; + ($ty:ty; $($into:ty),*) => {$( + impl CastInto<$into> for $ty { + fn cast(self) -> $into { + self as $into + } + } + )*}; +} + +cast_into!(u32); +cast_into!(i32); +cast_into!(u64); +cast_into!(i64); +cast_into!(u128); +cast_into!(i128); diff --git a/src/libcompiler_builtins/src/int/mul.rs b/src/libcompiler_builtins/src/int/mul.rs index 98a8987e1d..a4b2ebd7fb 100644 --- a/src/libcompiler_builtins/src/int/mul.rs +++ b/src/libcompiler_builtins/src/int/mul.rs @@ -5,8 +5,8 @@ use int::Int; trait Mul: LargeInt { fn mul(self, other: Self) -> Self { - let half_bits = Self::bits() / 4; - let lower_mask = !<::LowHalf>::zero() >> half_bits; + let half_bits = Self::BITS / 4; + let lower_mask = !<::LowHalf>::ZERO >> half_bits; let mut low = (self.low() & lower_mask).wrapping_mul(other.low() & lower_mask); let mut t = low >> half_bits; low &= lower_mask; @@ -33,23 +33,23 @@ trait Mulo: Int + ops::Neg { *overflow = 0; let result = self.wrapping_mul(other); if self == Self::min_value() { - if other != Self::zero() && other != Self::one() { + if other != Self::ZERO && other != Self::ONE { *overflow = 1; } return result; } if other == Self::min_value() { - if self != Self::zero() && self != Self::one() { + if self != Self::ZERO && self != Self::ONE { *overflow = 1; } return result; } - let sa = self >> (Self::bits() - 1); + let sa = self >> (Self::BITS - 1); let abs_a = (self ^ sa) - sa; - let sb = other >> (Self::bits() - 1); + let sb = other >> (Self::BITS - 1); let abs_b = (other ^ sb) - sb; - let two = Self::one() + Self::one(); + let two = Self::ONE + Self::ONE; if abs_a < two || abs_b < two { return result; } diff --git a/src/libcompiler_builtins/src/int/sdiv.rs b/src/libcompiler_builtins/src/int/sdiv.rs index c949c3394d..ff8fa61e4b 100644 --- a/src/libcompiler_builtins/src/int/sdiv.rs +++ b/src/libcompiler_builtins/src/int/sdiv.rs @@ -3,9 +3,9 @@ use int::Int; trait Div: Int { /// Returns `a / b` fn div(self, other: Self) -> Self { - let s_a = self >> (Self::bits() - 1); - let s_b = other >> (Self::bits() - 1); - // NOTE it's OK to overflow here because of the `as $uty` cast below + let s_a = self >> (Self::BITS - 1); + let s_b = other >> (Self::BITS - 1); + // NOTE it's OK to overflow here because of the `.unsigned()` below. // This whole operation is computing the absolute value of the inputs // So some overflow will happen when dealing with e.g. `i64::MIN` // where the absolute value is `(-i64::MIN) as u64` @@ -25,10 +25,10 @@ impl Div for i128 {} trait Mod: Int { /// Returns `a % b` fn mod_(self, other: Self) -> Self { - let s = other >> (Self::bits() - 1); + let s = other >> (Self::BITS - 1); // NOTE(wrapping_sub) see comment in the `div` let b = (other ^ s).wrapping_sub(s); - let s = self >> (Self::bits() - 1); + let s = self >> (Self::BITS - 1); let a = (self ^ s).wrapping_sub(s); let r = a.unsigned().aborting_rem(b.unsigned()); diff --git a/src/libcompiler_builtins/src/int/shift.rs b/src/libcompiler_builtins/src/int/shift.rs index a9b6c05d3d..805d705962 100644 --- a/src/libcompiler_builtins/src/int/shift.rs +++ b/src/libcompiler_builtins/src/int/shift.rs @@ -1,13 +1,13 @@ use int::{Int, LargeInt}; trait Ashl: Int + LargeInt { - /// Returns `a << b`, requires `b < $ty::bits()` + /// Returns `a << b`, requires `b < Self::BITS` fn ashl(self, offset: u32) -> Self where Self: LargeInt::LowHalf>, { - let half_bits = Self::bits() / 2; + let half_bits = Self::BITS / 2; if offset & half_bits != 0 { - Self::from_parts(Int::zero(), self.low() << (offset - half_bits)) + Self::from_parts(Int::ZERO, self.low() << (offset - half_bits)) } else if offset == 0 { self } else { @@ -22,11 +22,11 @@ impl Ashl for u64 {} impl Ashl for u128 {} trait Ashr: Int + LargeInt { - /// Returns arithmetic `a >> b`, requires `b < $ty::bits()` + /// Returns arithmetic `a >> b`, requires `b < Self::BITS` fn ashr(self, offset: u32) -> Self where Self: LargeInt::HighHalf as Int>::UnsignedInt>, { - let half_bits = Self::bits() / 2; + let half_bits = Self::BITS / 2; if offset & half_bits != 0 { Self::from_parts((self.high() >> (offset - half_bits)).unsigned(), self.high() >> (half_bits - 1)) @@ -44,13 +44,13 @@ impl Ashr for i64 {} impl Ashr for i128 {} trait Lshr: Int + LargeInt { - /// Returns logical `a >> b`, requires `b < $ty::bits()` + /// Returns logical `a >> b`, requires `b < Self::BITS` fn lshr(self, offset: u32) -> Self where Self: LargeInt::LowHalf>, { - let half_bits = Self::bits() / 2; + let half_bits = Self::BITS / 2; if offset & half_bits != 0 { - Self::from_parts(self.high() >> (offset - half_bits), Int::zero()) + Self::from_parts(self.high() >> (offset - half_bits), Int::ZERO) } else if offset == 0 { self } else { diff --git a/src/libcompiler_builtins/src/int/udiv.rs b/src/libcompiler_builtins/src/int/udiv.rs index b8d949128d..74a2ac3387 100644 --- a/src/libcompiler_builtins/src/int/udiv.rs +++ b/src/libcompiler_builtins/src/int/udiv.rs @@ -63,7 +63,7 @@ macro_rules! udivmod_inner { sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros()); // D > N - if sr > ::bits() - 2 { + if sr > ::BITS - 2 { if let Some(rem) = rem { *rem = n; } @@ -72,8 +72,8 @@ macro_rules! udivmod_inner { sr += 1; - // 1 <= sr <= ::bits() - 1 - q = n << (<$ty>::bits() - sr); + // 1 <= sr <= ::BITS - 1 + q = n << (<$ty>::BITS - sr); r = n >> sr; } else if d.high() == 0 { // K X @@ -92,10 +92,10 @@ macro_rules! udivmod_inner { }; } - sr = 1 + ::bits() + d.low().leading_zeros() - n.high().leading_zeros(); + sr = 1 + ::BITS + d.low().leading_zeros() - n.high().leading_zeros(); - // 2 <= sr <= u64::bits() - 1 - q = n << (<$ty>::bits() - sr); + // 2 <= sr <= u64::BITS - 1 + q = n << (<$ty>::BITS - sr); r = n >> sr; } else { // K X @@ -104,7 +104,7 @@ macro_rules! udivmod_inner { sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros()); // D > N - if sr > ::bits() - 1 { + if sr > ::BITS - 1 { if let Some(rem) = rem { *rem = n; } @@ -113,16 +113,16 @@ macro_rules! udivmod_inner { sr += 1; - // 1 <= sr <= ::bits() - q = n << (<$ty>::bits() - sr); + // 1 <= sr <= ::BITS + q = n << (<$ty>::BITS - sr); r = n >> sr; } // Not a special case // q and r are initialized with - // q = n << (u64::bits() - sr) + // q = n << (u64::BITS - sr) // r = n >> sr - // 1 <= sr <= u64::bits() - 1 + // 1 <= sr <= u64::BITS - 1 let mut carry = 0; // Don't use a range because they may generate references to memcpy in unoptimized code @@ -131,7 +131,7 @@ macro_rules! udivmod_inner { i += 1; // r:q = ((r:q) << 1) | carry - r = (r << 1) | (q >> (<$ty>::bits() - 1)); + r = (r << 1) | (q >> (<$ty>::BITS - 1)); q = (q << 1) | carry as $ty; // carry = 0 @@ -139,7 +139,7 @@ macro_rules! udivmod_inner { // r -= d; // carry = 1; // } - let s = (d.wrapping_sub(r).wrapping_sub(1)) as os_ty!($ty) >> (<$ty>::bits() - 1); + let s = (d.wrapping_sub(r).wrapping_sub(1)) as os_ty!($ty) >> (<$ty>::BITS - 1); carry = (s & 1) as hty!($ty); r -= d & s as $ty; } @@ -169,19 +169,19 @@ intrinsics! { let mut sr = d.leading_zeros().wrapping_sub(n.leading_zeros()); // d > n - if sr > u32::bits() - 1 { + if sr > u32::BITS - 1 { return 0; } // d == 1 - if sr == u32::bits() - 1 { + if sr == u32::BITS - 1 { return n; } sr += 1; - // 1 <= sr <= u32::bits() - 1 - let mut q = n << (u32::bits() - sr); + // 1 <= sr <= u32::BITS - 1 + let mut q = n << (u32::BITS - sr); let mut r = n >> sr; let mut carry = 0; @@ -192,7 +192,7 @@ intrinsics! { i += 1; // r:q = ((r:q) << 1) | carry - r = (r << 1) | (q >> (u32::bits() - 1)); + r = (r << 1) | (q >> (u32::BITS - 1)); q = (q << 1) | carry; // carry = 0; @@ -201,7 +201,7 @@ intrinsics! { // carry = 1; // } - let s = (d.wrapping_sub(r).wrapping_sub(1)) as i32 >> (u32::bits() - 1); + let s = (d.wrapping_sub(r).wrapping_sub(1)) as i32 >> (u32::BITS - 1); carry = (s & 1) as u32; r -= d & s as u32; } diff --git a/src/libcompiler_builtins/src/lib.rs b/src/libcompiler_builtins/src/lib.rs index b8cf83e212..d30a218e3d 100644 --- a/src/libcompiler_builtins/src/lib.rs +++ b/src/libcompiler_builtins/src/lib.rs @@ -51,6 +51,9 @@ pub mod mem; #[cfg(target_arch = "arm")] pub mod arm; +#[cfg(target_arch = "x86")] +pub mod x86; + #[cfg(target_arch = "x86_64")] pub mod x86_64; diff --git a/src/libcompiler_builtins/src/probestack.rs b/src/libcompiler_builtins/src/probestack.rs index 329e272651..13ea5a5f03 100644 --- a/src/libcompiler_builtins/src/probestack.rs +++ b/src/libcompiler_builtins/src/probestack.rs @@ -44,8 +44,8 @@ #![cfg(not(windows))] // Windows already has builtins to do this #[naked] -#[cfg_attr(not(feature = "mangled-names"), no_mangle)] -#[cfg(target_arch = "x86_64")] +#[no_mangle] +#[cfg(all(target_arch = "x86_64", not(feature = "mangled-names")))] pub unsafe extern fn __rust_probestack() { // Our goal here is to touch each page between %rsp+8 and %rsp+8-%rax, // ensuring that if any pages are unmapped we'll make a page fault. @@ -87,8 +87,8 @@ pub unsafe extern fn __rust_probestack() { } #[naked] -#[cfg_attr(not(feature = "mangled-names"), no_mangle)] -#[cfg(target_arch = "x86")] +#[no_mangle] +#[cfg(all(target_arch = "x86", not(feature = "mangled-names")))] pub unsafe extern fn __rust_probestack() { // This is the same as x86_64 above, only translated for 32-bit sizes. Note // that on Unix we're expected to restore everything as it was, this diff --git a/src/libcompiler_builtins/src/x86.rs b/src/libcompiler_builtins/src/x86.rs new file mode 100644 index 0000000000..3e540e21ef --- /dev/null +++ b/src/libcompiler_builtins/src/x86.rs @@ -0,0 +1,71 @@ +#![allow(unused_imports)] + +use core::intrinsics; + +// NOTE These functions are implemented using assembly because they using a custom +// calling convention which can't be implemented using a normal Rust function + +// NOTE These functions are never mangled as they are not tested against compiler-rt +// and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca + +#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))] +#[naked] +#[no_mangle] +pub unsafe fn ___chkstk_ms() { + asm!(" + push %ecx + push %eax + cmp $$0x1000,%eax + lea 12(%esp),%ecx + jb 1f + 2: + sub $$0x1000,%ecx + test %ecx,(%ecx) + sub $$0x1000,%eax + cmp $$0x1000,%eax + ja 2b + 1: + sub %eax,%ecx + test %ecx,(%ecx) + pop %eax + pop %ecx + ret"); + intrinsics::unreachable(); +} + +// FIXME: __alloca should be an alias to __chkstk +#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))] +#[naked] +#[no_mangle] +pub unsafe fn __alloca() { + asm!("jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"); + intrinsics::unreachable(); +} + +#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))] +#[naked] +#[no_mangle] +pub unsafe fn ___chkstk() { + asm!(" + push %ecx + cmp $$0x1000,%eax + lea 8(%esp),%ecx // esp before calling this routine -> ecx + jb 1f + 2: + sub $$0x1000,%ecx + test %ecx,(%ecx) + sub $$0x1000,%eax + cmp $$0x1000,%eax + ja 2b + 1: + sub %eax,%ecx + test %ecx,(%ecx) + + lea 4(%esp),%eax // load pointer to the return address into eax + mov %ecx,%esp // install the new top of stack pointer into esp + mov -4(%eax),%ecx // restore ecx + push (%eax) // push return address onto the stack + sub %esp,%eax // restore the original value in eax + ret"); + intrinsics::unreachable(); +} diff --git a/src/libcompiler_builtins/src/x86_64.rs b/src/libcompiler_builtins/src/x86_64.rs index bc3c4ee4c5..09b0081526 100644 --- a/src/libcompiler_builtins/src/x86_64.rs +++ b/src/libcompiler_builtins/src/x86_64.rs @@ -8,62 +8,64 @@ use core::intrinsics; // NOTE These functions are never mangled as they are not tested against compiler-rt // and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca -#[cfg(all(windows, target_env = "gnu"))] +#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))] #[naked] -#[cfg_attr(not(feature = "mangled-names"), no_mangle)] +#[no_mangle] pub unsafe fn ___chkstk_ms() { - asm!("push %rcx - push %rax - cmp $$0x1000,%rax - lea 24(%rsp),%rcx - jb 1f - 2: - sub $$0x1000,%rcx - test %rcx,(%rcx) - sub $$0x1000,%rax - cmp $$0x1000,%rax - ja 2b - 1: - sub %rax,%rcx - test %rcx,(%rcx) - pop %rax - pop %rcx - ret"); + asm!(" + push %rcx + push %rax + cmp $$0x1000,%rax + lea 24(%rsp),%rcx + jb 1f + 2: + sub $$0x1000,%rcx + test %rcx,(%rcx) + sub $$0x1000,%rax + cmp $$0x1000,%rax + ja 2b + 1: + sub %rax,%rcx + test %rcx,(%rcx) + pop %rax + pop %rcx + ret"); intrinsics::unreachable(); } -#[cfg(all(windows, target_env = "gnu"))] +#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))] #[naked] -#[cfg_attr(not(feature = "mangled-names"), no_mangle)] +#[no_mangle] pub unsafe fn __alloca() { asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"); intrinsics::unreachable(); } -#[cfg(all(windows, target_env = "gnu"))] +#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))] #[naked] -#[cfg_attr(not(feature = "mangled-names"), no_mangle)] +#[no_mangle] pub unsafe fn ___chkstk() { - asm!("push %rcx - cmp $$0x1000,%rax - lea 16(%rsp),%rcx // rsp before calling this routine -> rcx - jb 1f - 2: - sub $$0x1000,%rcx - test %rcx,(%rcx) - sub $$0x1000,%rax - cmp $$0x1000,%rax - ja 2b - 1: - sub %rax,%rcx - test %rcx,(%rcx) + asm!(" + push %rcx + cmp $$0x1000,%rax + lea 16(%rsp),%rcx // rsp before calling this routine -> rcx + jb 1f + 2: + sub $$0x1000,%rcx + test %rcx,(%rcx) + sub $$0x1000,%rax + cmp $$0x1000,%rax + ja 2b + 1: + sub %rax,%rcx + test %rcx,(%rcx) - lea 8(%rsp),%rax // load pointer to the return address into rax - mov %rcx,%rsp // install the new top of stack pointer into rsp - mov -8(%rax),%rcx // restore rcx - push (%rax) // push return address onto the stack - sub %rsp,%rax // restore the original value in rax - ret"); + lea 8(%rsp),%rax // load pointer to the return address into rax + mov %rcx,%rsp // install the new top of stack pointer into rsp + mov -8(%rax),%rcx // restore rcx + push (%rax) // push return address onto the stack + sub %rsp,%rax // restore the original value in rax + ret"); intrinsics::unreachable(); } diff --git a/src/libcompiler_builtins/tests/floattidf.rs b/src/libcompiler_builtins/tests/floattidf.rs index 4e4f84ddb0..c2e798a996 100644 --- a/src/libcompiler_builtins/tests/floattidf.rs +++ b/src/libcompiler_builtins/tests/floattidf.rs @@ -4,5 +4,6 @@ not(any(target_env = "gnu", target_env = "musl")), target_os = "linux", test), no_std)] +#![cfg(not(target_arch = "mips"))] // FIXME(#168) include!(concat!(env!("OUT_DIR"), "/floattidf.rs")); diff --git a/src/libcore/array.rs b/src/libcore/array.rs index 4b7706bac3..6a7926fecd 100644 --- a/src/libcore/array.rs +++ b/src/libcore/array.rs @@ -123,14 +123,6 @@ macro_rules! array_impls { } } - #[stable(feature = "rust1", since = "1.0.0")] - #[cfg(stage0)] - impl Clone for [T; $N] { - fn clone(&self) -> [T; $N] { - *self - } - } - #[stable(feature = "rust1", since = "1.0.0")] impl Hash for [T; $N] { fn hash(&self, state: &mut H) { diff --git a/src/libcore/benches/iter.rs b/src/libcore/benches/iter.rs index 5b06229c21..1f16f5b1df 100644 --- a/src/libcore/benches/iter.rs +++ b/src/libcore/benches/iter.rs @@ -146,3 +146,132 @@ fn bench_for_each_chain_ref_fold(b: &mut Bencher) { acc }); } + + +/// Helper to benchmark `sum` for iterators taken by value which +/// can optimize `fold`, and by reference which cannot. +macro_rules! bench_sums { + ($bench_sum:ident, $bench_ref_sum:ident, $iter:expr) => { + #[bench] + fn $bench_sum(b: &mut Bencher) { + b.iter(|| -> i64 { + $iter.map(black_box).sum() + }); + } + + #[bench] + fn $bench_ref_sum(b: &mut Bencher) { + b.iter(|| -> i64 { + $iter.map(black_box).by_ref().sum() + }); + } + } +} + +bench_sums! { + bench_flat_map_sum, + bench_flat_map_ref_sum, + (0i64..1000).flat_map(|x| x..x+1000) +} + +bench_sums! { + bench_flat_map_chain_sum, + bench_flat_map_chain_ref_sum, + (0i64..1000000).flat_map(|x| once(x).chain(once(x))) +} + +bench_sums! { + bench_enumerate_sum, + bench_enumerate_ref_sum, + (0i64..1000000).enumerate().map(|(i, x)| x * i as i64) +} + +bench_sums! { + bench_enumerate_chain_sum, + bench_enumerate_chain_ref_sum, + (0i64..1000000).chain(0..1000000).enumerate().map(|(i, x)| x * i as i64) +} + +bench_sums! { + bench_filter_sum, + bench_filter_ref_sum, + (0i64..1000000).filter(|x| x % 2 == 0) +} + +bench_sums! { + bench_filter_chain_sum, + bench_filter_chain_ref_sum, + (0i64..1000000).chain(0..1000000).filter(|x| x % 2 == 0) +} + +bench_sums! { + bench_filter_map_sum, + bench_filter_map_ref_sum, + (0i64..1000000).filter_map(|x| x.checked_mul(x)) +} + +bench_sums! { + bench_filter_map_chain_sum, + bench_filter_map_chain_ref_sum, + (0i64..1000000).chain(0..1000000).filter_map(|x| x.checked_mul(x)) +} + +bench_sums! { + bench_fuse_sum, + bench_fuse_ref_sum, + (0i64..1000000).fuse() +} + +bench_sums! { + bench_fuse_chain_sum, + bench_fuse_chain_ref_sum, + (0i64..1000000).chain(0..1000000).fuse() +} + +bench_sums! { + bench_inspect_sum, + bench_inspect_ref_sum, + (0i64..1000000).inspect(|_| {}) +} + +bench_sums! { + bench_inspect_chain_sum, + bench_inspect_chain_ref_sum, + (0i64..1000000).chain(0..1000000).inspect(|_| {}) +} + +bench_sums! { + bench_peekable_sum, + bench_peekable_ref_sum, + (0i64..1000000).peekable() +} + +bench_sums! { + bench_peekable_chain_sum, + bench_peekable_chain_ref_sum, + (0i64..1000000).chain(0..1000000).peekable() +} + +bench_sums! { + bench_skip_sum, + bench_skip_ref_sum, + (0i64..1000000).skip(1000) +} + +bench_sums! { + bench_skip_chain_sum, + bench_skip_chain_ref_sum, + (0i64..1000000).chain(0..1000000).skip(1000) +} + +bench_sums! { + bench_skip_while_sum, + bench_skip_while_ref_sum, + (0i64..1000000).skip_while(|&x| x < 1000) +} + +bench_sums! { + bench_skip_while_chain_sum, + bench_skip_while_chain_ref_sum, + (0i64..1000000).chain(0..1000000).skip_while(|&x| x < 1000) +} diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index e0a3b8d52f..b9c5ff10f8 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -329,6 +329,7 @@ impl Cell { /// let c = Cell::new(5); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_cell_new"))] #[inline] pub const fn new(value: T) -> Cell { Cell { @@ -543,6 +544,7 @@ impl RefCell { /// let c = RefCell::new(5); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_refcell_new"))] #[inline] pub const fn new(value: T) -> RefCell { RefCell { @@ -1188,6 +1190,7 @@ impl UnsafeCell { /// let uc = UnsafeCell::new(5); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_unsafe_cell_new"))] #[inline] pub const fn new(value: T) -> UnsafeCell { UnsafeCell { value: value } diff --git a/src/libcore/clone.rs b/src/libcore/clone.rs index 2dc51718b9..826420a0c0 100644 --- a/src/libcore/clone.rs +++ b/src/libcore/clone.rs @@ -88,7 +88,7 @@ /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(stage0), lang = "clone")] +#[lang = "clone"] pub trait Clone : Sized { /// Returns a copy of the value. /// @@ -130,45 +130,3 @@ pub struct AssertParamIsClone { _field: ::marker::PhantomData reason = "deriving hack, should not be public", issue = "0")] pub struct AssertParamIsCopy { _field: ::marker::PhantomData } - -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -impl<'a, T: ?Sized> Clone for &'a T { - /// Returns a shallow copy of the reference. - #[inline] - fn clone(&self) -> &'a T { *self } -} - -macro_rules! clone_impl { - ($t:ty) => { - #[stable(feature = "rust1", since = "1.0.0")] - #[cfg(stage0)] - impl Clone for $t { - /// Returns a deep copy of the value. - #[inline] - fn clone(&self) -> $t { *self } - } - } -} - -clone_impl! { isize } -clone_impl! { i8 } -clone_impl! { i16 } -clone_impl! { i32 } -clone_impl! { i64 } -clone_impl! { i128 } - -clone_impl! { usize } -clone_impl! { u8 } -clone_impl! { u16 } -clone_impl! { u32 } -clone_impl! { u64 } -clone_impl! { u128 } - -clone_impl! { f32 } -clone_impl! { f64 } - -clone_impl! { ! } -clone_impl! { () } -clone_impl! { bool } -clone_impl! { char } diff --git a/src/libcore/cmp.rs b/src/libcore/cmp.rs index e012cbd76f..cc71e09cae 100644 --- a/src/libcore/cmp.rs +++ b/src/libcore/cmp.rs @@ -162,8 +162,8 @@ pub trait PartialEq { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub trait Eq: PartialEq { - // FIXME #13101: this method is used solely by #[deriving] to - // assert that every component of a type implements #[deriving] + // this method is used solely by #[deriving] to assert + // that every component of a type implements #[deriving] // itself, the current deriving infrastructure means doing this // assertion without using a method on this trait is nearly // impossible. diff --git a/src/libcore/convert.rs b/src/libcore/convert.rs index 6f3c3863fa..e815d72d36 100644 --- a/src/libcore/convert.rs +++ b/src/libcore/convert.rs @@ -48,8 +48,25 @@ #![stable(feature = "rust1", since = "1.0.0")] -use str::FromStr; +use fmt; +/// A type used as the error type for implementations of fallible conversion +/// traits in cases where conversions cannot actually fail. +/// +/// Because `Infallible` has no variants, a value of this type can never exist. +/// It is used only to satisfy trait signatures that expect an error type, and +/// signals to both the compiler and the user that the error case is impossible. +#[unstable(feature = "try_from", issue = "33417")] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub enum Infallible {} + +#[unstable(feature = "try_from", issue = "33417")] +impl fmt::Display for Infallible { + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + match *self { + } + } +} /// A cheap reference-to-reference conversion. Used to convert a value to a /// reference value within generic code. /// @@ -417,6 +434,17 @@ impl TryInto for T where U: TryFrom } } +// Infallible conversions are semantically equivalent to fallible conversions +// with an uninhabited error type. +#[unstable(feature = "try_from", issue = "33417")] +impl TryFrom for T where T: From { + type Error = Infallible; + + fn try_from(value: U) -> Result { + Ok(T::from(value)) + } +} + //////////////////////////////////////////////////////////////////////////////// // CONCRETE IMPLS //////////////////////////////////////////////////////////////////////////////// @@ -442,14 +470,3 @@ impl AsRef for str { self } } - -// FromStr implies TryFrom<&str> -#[unstable(feature = "try_from", issue = "33417")] -impl<'a, T> TryFrom<&'a str> for T where T: FromStr -{ - type Error = ::Err; - - fn try_from(s: &'a str) -> Result { - FromStr::from_str(s) - } -} diff --git a/src/libcore/fmt/builders.rs b/src/libcore/fmt/builders.rs index b594c886b6..60b9eeb128 100644 --- a/src/libcore/fmt/builders.rs +++ b/src/libcore/fmt/builders.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use fmt::{self, FlagV1}; +use fmt; struct PadAdapter<'a, 'b: 'a> { fmt: &'a mut fmt::Formatter<'b>, @@ -140,7 +140,7 @@ impl<'a, 'b: 'a> DebugStruct<'a, 'b> { } fn is_pretty(&self) -> bool { - self.fmt.flags() & (1 << (FlagV1::Alternate as usize)) != 0 + self.fmt.alternate() } } @@ -233,7 +233,7 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> { } fn is_pretty(&self) -> bool { - self.fmt.flags() & (1 << (FlagV1::Alternate as usize)) != 0 + self.fmt.alternate() } } @@ -277,7 +277,7 @@ impl<'a, 'b: 'a> DebugInner<'a, 'b> { } fn is_pretty(&self) -> bool { - self.fmt.flags() & (1 << (FlagV1::Alternate as usize)) != 0 + self.fmt.alternate() } } @@ -519,6 +519,6 @@ impl<'a, 'b: 'a> DebugMap<'a, 'b> { } fn is_pretty(&self) -> bool { - self.fmt.flags() & (1 << (FlagV1::Alternate as usize)) != 0 + self.fmt.alternate() } } diff --git a/src/libcore/fmt/mod.rs b/src/libcore/fmt/mod.rs index cf6262bda9..1e45af5b10 100644 --- a/src/libcore/fmt/mod.rs +++ b/src/libcore/fmt/mod.rs @@ -322,7 +322,6 @@ impl<'a> ArgumentV1<'a> { // flags available in the v1 format of format_args #[derive(Copy, Clone)] -#[allow(dead_code)] // SignMinus isn't currently used enum FlagV1 { SignPlus, SignMinus, Alternate, SignAwareZeroPad, } impl<'a> Arguments<'a> { @@ -427,7 +426,7 @@ impl<'a> Display for Arguments<'a> { } } -/// Format trait for the `?` character. +/// `?` formatting. /// /// `Debug` should format the output in a programmer-facing, debugging context. /// @@ -488,13 +487,14 @@ impl<'a> Display for Arguments<'a> { /// The origin is: Point { x: 0, y: 0 } /// ``` /// -/// There are a number of `debug_*` methods on `Formatter` to help you with manual +/// There are a number of `debug_*` methods on [`Formatter`] to help you with manual /// implementations, such as [`debug_struct`][debug_struct]. /// /// `Debug` implementations using either `derive` or the debug builder API -/// on `Formatter` support pretty printing using the alternate flag: `{:#?}`. +/// on [`Formatter`] support pretty printing using the alternate flag: `{:#?}`. /// /// [debug_struct]: ../../std/fmt/struct.Formatter.html#method.debug_struct +/// [`Formatter`]: ../../std/fmt/struct.Formatter.html /// /// Pretty printing with `#?`: /// @@ -592,7 +592,7 @@ pub trait Display { fn fmt(&self, f: &mut Formatter) -> Result; } -/// Format trait for the `o` character. +/// `o` formatting. /// /// The `Octal` trait should format its output as a number in base-8. /// @@ -639,7 +639,7 @@ pub trait Octal { fn fmt(&self, f: &mut Formatter) -> Result; } -/// Format trait for the `b` character. +/// `b` formatting. /// /// The `Binary` trait should format its output as a number in binary. /// @@ -686,7 +686,7 @@ pub trait Binary { fn fmt(&self, f: &mut Formatter) -> Result; } -/// Format trait for the `x` character. +/// `x` formatting. /// /// The `LowerHex` trait should format its output as a number in hexadecimal, with `a` through `f` /// in lower case. @@ -734,7 +734,7 @@ pub trait LowerHex { fn fmt(&self, f: &mut Formatter) -> Result; } -/// Format trait for the `X` character. +/// `X` formatting. /// /// The `UpperHex` trait should format its output as a number in hexadecimal, with `A` through `F` /// in upper case. @@ -782,7 +782,7 @@ pub trait UpperHex { fn fmt(&self, f: &mut Formatter) -> Result; } -/// Format trait for the `p` character. +/// `p` formatting. /// /// The `Pointer` trait should format its output as a memory location. This is commonly presented /// as hexadecimal. @@ -827,7 +827,7 @@ pub trait Pointer { fn fmt(&self, f: &mut Formatter) -> Result; } -/// Format trait for the `e` character. +/// `e` formatting. /// /// The `LowerExp` trait should format its output in scientific notation with a lower-case `e`. /// @@ -870,7 +870,7 @@ pub trait LowerExp { fn fmt(&self, f: &mut Formatter) -> Result; } -/// Format trait for the `E` character. +/// `E` formatting. /// /// The `UpperExp` trait should format its output in scientific notation with an upper-case `E`. /// @@ -1275,7 +1275,7 @@ impl<'a> Formatter<'a> { write(self.buf, fmt) } - /// Flags for formatting (packed version of rt::Flag) + /// Flags for formatting #[stable(feature = "rust1", since = "1.0.0")] pub fn flags(&self) -> u32 { self.flags } @@ -1321,8 +1321,11 @@ impl<'a> Formatter<'a> { self.flags & (1 << FlagV1::SignAwareZeroPad as u32) != 0 } - /// Creates a `DebugStruct` builder designed to assist with creation of - /// `fmt::Debug` implementations for structs. + /// Creates a [`DebugStruct`] builder designed to assist with creation of + /// [`fmt::Debug`] implementations for structs. + /// + /// [`DebugStruct`]: ../../std/fmt/struct.DebugStruct.html + /// [`fmt::Debug`]: ../../std/fmt/trait.Debug.html /// /// # Examples /// @@ -1700,8 +1703,18 @@ impl Debug for RefCell { .finish() } Err(_) => { + // The RefCell is mutably borrowed so we can't look at its value + // here. Show a placeholder instead. + struct BorrowedPlaceholder; + + impl Debug for BorrowedPlaceholder { + fn fmt(&self, f: &mut Formatter) -> Result { + f.write_str("") + } + } + f.debug_struct("RefCell") - .field("value", &"") + .field("value", &BorrowedPlaceholder) .finish() } } diff --git a/src/libcore/fmt/num.rs b/src/libcore/fmt/num.rs index 4ca303dee4..c821817258 100644 --- a/src/libcore/fmt/num.rs +++ b/src/libcore/fmt/num.rs @@ -12,7 +12,6 @@ #![allow(deprecated)] -// FIXME: #6220 Implement floating point formatting use fmt; use ops::{Div, Rem, Sub}; @@ -242,7 +241,7 @@ macro_rules! impl_Display { // decode last 1 or 2 chars if n < 10 { curr -= 1; - *buf_ptr.offset(curr) = (n as u8) + 48; + *buf_ptr.offset(curr) = (n as u8) + b'0'; } else { let d1 = n << 1; curr -= 2; diff --git a/src/libcore/hash/mod.rs b/src/libcore/hash/mod.rs index a8b84203d6..bc1b911cd7 100644 --- a/src/libcore/hash/mod.rs +++ b/src/libcore/hash/mod.rs @@ -359,6 +359,52 @@ pub trait Hasher { } } +#[stable(feature = "indirect_hasher_impl", since = "1.22.0")] +impl<'a, H: Hasher + ?Sized> Hasher for &'a mut H { + fn finish(&self) -> u64 { + (**self).finish() + } + fn write(&mut self, bytes: &[u8]) { + (**self).write(bytes) + } + fn write_u8(&mut self, i: u8) { + (**self).write_u8(i) + } + fn write_u16(&mut self, i: u16) { + (**self).write_u16(i) + } + fn write_u32(&mut self, i: u32) { + (**self).write_u32(i) + } + fn write_u64(&mut self, i: u64) { + (**self).write_u64(i) + } + fn write_u128(&mut self, i: u128) { + (**self).write_u128(i) + } + fn write_usize(&mut self, i: usize) { + (**self).write_usize(i) + } + fn write_i8(&mut self, i: i8) { + (**self).write_i8(i) + } + fn write_i16(&mut self, i: i16) { + (**self).write_i16(i) + } + fn write_i32(&mut self, i: i32) { + (**self).write_i32(i) + } + fn write_i64(&mut self, i: i64) { + (**self).write_i64(i) + } + fn write_i128(&mut self, i: i128) { + (**self).write_i128(i) + } + fn write_isize(&mut self, i: isize) { + (**self).write_isize(i) + } +} + /// A trait for creating instances of [`Hasher`]. /// /// A `BuildHasher` is typically used (e.g. by [`HashMap`]) to create diff --git a/src/libcore/internal_macros.rs b/src/libcore/internal_macros.rs index 9a7914064f..cb215a38e5 100644 --- a/src/libcore/internal_macros.rs +++ b/src/libcore/internal_macros.rs @@ -68,3 +68,22 @@ macro_rules! forward_ref_binop { } } } + +// implements "T op= &U", based on "T op= U" +// where U is expected to be `Copy`able +macro_rules! forward_ref_op_assign { + (impl $imp:ident, $method:ident for $t:ty, $u:ty) => { + forward_ref_op_assign!(impl $imp, $method for $t, $u, + #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]); + }; + (impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => { + #[$attr] + impl<'a> $imp<&'a $u> for $t { + #[inline] + fn $method(&mut self, other: &'a $u) { + $imp::$method(self, *other); + } + } + } +} + diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index ad776c8605..bc82f0230e 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -848,12 +848,12 @@ extern "rust-intrinsic" { /// // The no-copy, unsafe way, still using transmute, but not UB. /// // This is equivalent to the original, but safer, and reuses the /// // same Vec internals. Therefore the new inner type must have the - /// // exact same size, and the same or lesser alignment, as the old - /// // type. The same caveats exist for this method as transmute, for + /// // exact same size, and the same alignment, as the old type. + /// // The same caveats exist for this method as transmute, for /// // the original inner type (`&i32`) to the converted inner type /// // (`Option<&i32>`), so read the nomicon pages linked above. /// let v_from_raw = unsafe { - /// Vec::from_raw_parts(v_orig.as_mut_ptr(), + /// Vec::from_raw_parts(v_orig.as_mut_ptr() as *mut Option<&i32>, /// v_orig.len(), /// v_orig.capacity()) /// }; @@ -1343,4 +1343,50 @@ extern "rust-intrinsic" { /// on MSVC it's `*mut [usize; 2]`. For more information see the compiler's /// source as well as std's catch implementation. pub fn try(f: fn(*mut u8), data: *mut u8, local_ptr: *mut u8) -> i32; + + /// Computes the byte offset that needs to be applied to `ptr` in order to + /// make it aligned to `align`. + /// If it is not possible to align `ptr`, the implementation returns + /// `usize::max_value()`. + /// + /// There are no guarantees whatsover that offsetting the pointer will not + /// overflow or go beyond the allocation that `ptr` points into. + /// It is up to the caller to ensure that the returned offset is correct + /// in all terms other than alignment. + /// + /// # Examples + /// + /// Accessing adjacent `u8` as `u16` + /// + /// ``` + /// # #![feature(core_intrinsics)] + /// # fn foo(n: usize) { + /// # use std::intrinsics::align_offset; + /// # use std::mem::align_of; + /// # unsafe { + /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; + /// let ptr = &x[n] as *const u8; + /// let offset = align_offset(ptr as *const (), align_of::()); + /// if offset < x.len() - n - 1 { + /// let u16_ptr = ptr.offset(offset as isize) as *const u16; + /// assert_ne!(*u16_ptr, 500); + /// } else { + /// // while the pointer can be aligned via `offset`, it would point + /// // outside the allocation + /// } + /// # } } + /// ``` + #[cfg(not(stage0))] + pub fn align_offset(ptr: *const (), align: usize) -> usize; +} + +#[cfg(stage0)] +/// remove me after the next release +pub unsafe fn align_offset(ptr: *const (), align: usize) -> usize { + let offset = ptr as usize % align; + if offset == 0 { + 0 + } else { + align - offset + } } diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/iterator.rs index ceb2a3f1d5..e9e31065cf 100644 --- a/src/libcore/iter/iterator.rs +++ b/src/libcore/iter/iterator.rs @@ -1337,7 +1337,7 @@ pub trait Iterator { (left, right) } - /// An iterator adaptor that applies a function, producing a single, final value. + /// An iterator method that applies a function, producing a single, final value. /// /// `fold()` takes two arguments: an initial value, and a closure with two /// arguments: an 'accumulator', and an element. The closure returns the value that diff --git a/src/libcore/iter/mod.rs b/src/libcore/iter/mod.rs index ebedfe1d74..8d2521b053 100644 --- a/src/libcore/iter/mod.rs +++ b/src/libcore/iter/mod.rs @@ -359,6 +359,12 @@ impl Iterator for Rev where I: DoubleEndedIterator { #[inline] fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } + fn fold(self, init: Acc, f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.rfold(init, f) + } + #[inline] fn find

(&mut self, predicate: P) -> Option where P: FnMut(&Self::Item) -> bool @@ -379,6 +385,12 @@ impl DoubleEndedIterator for Rev where I: DoubleEndedIterator { #[inline] fn next_back(&mut self) -> Option<::Item> { self.iter.next() } + fn rfold(self, init: Acc, f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(init, f) + } + fn rfind

(&mut self, predicate: P) -> Option where P: FnMut(&Self::Item) -> bool { @@ -449,6 +461,12 @@ impl<'a, I, T: 'a> DoubleEndedIterator for Cloned fn next_back(&mut self) -> Option { self.it.next_back().cloned() } + + fn rfold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.rfold(init, move |acc, elt| f(acc, elt.clone())) + } } #[stable(feature = "iter_cloned", since = "1.1.0")] @@ -470,7 +488,7 @@ impl<'a, I, T: 'a> FusedIterator for Cloned {} #[doc(hidden)] -unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned +default unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned where I: TrustedRandomAccess, T: Clone { unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { @@ -481,6 +499,18 @@ unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned fn may_have_side_effect() -> bool { true } } +#[doc(hidden)] +unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned + where I: TrustedRandomAccess, T: Copy +{ + unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { + *self.it.get_unchecked(i) + } + + #[inline] + fn may_have_side_effect() -> bool { false } +} + #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<'a, I, T: 'a> TrustedLen for Cloned where I: TrustedLen, @@ -528,7 +558,7 @@ impl Iterator for Cycle where I: Clone + Iterator { #[unstable(feature = "fused", issue = "35602")] impl FusedIterator for Cycle where I: Clone + Iterator {} -/// An adapter for stepping iterators by a custom amount. +/// An iterator for stepping iterators by a custom amount. /// /// This `struct` is created by the [`step_by`] method on [`Iterator`]. See /// its documentation for more. @@ -761,6 +791,26 @@ impl DoubleEndedIterator for Chain where ChainState::Back => self.b.next_back(), } } + + fn rfold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + let mut accum = init; + match self.state { + ChainState::Both | ChainState::Back => { + accum = self.b.rfold(accum, &mut f); + } + _ => { } + } + match self.state { + ChainState::Both | ChainState::Front => { + accum = self.a.rfold(accum, &mut f); + } + _ => { } + } + accum + } + } // Note: *both* must be fused to handle double-ended iterators. @@ -1094,6 +1144,13 @@ impl DoubleEndedIterator for Map where fn next_back(&mut self) -> Option { self.iter.next_back().map(&mut self.f) } + + fn rfold(self, init: Acc, mut g: G) -> Acc + where G: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.rfold(init, move |acc, elt| g(acc, f(elt))) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1193,6 +1250,18 @@ impl Iterator for Filter where P: FnMut(&I::Item) -> bool } count } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut predicate = self.predicate; + self.iter.fold(init, move |acc, item| if predicate(&item) { + fold(acc, item) + } else { + acc + }) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1208,6 +1277,18 @@ impl DoubleEndedIterator for Filter } None } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut predicate = self.predicate; + self.iter.rfold(init, move |acc, item| if predicate(&item) { + fold(acc, item) + } else { + acc + }) + } } #[unstable(feature = "fused", issue = "35602")] @@ -1259,6 +1340,17 @@ impl Iterator for FilterMap let (_, upper) = self.iter.size_hint(); (0, upper) // can't know a lower bound, due to the predicate } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.fold(init, move |acc, item| match f(item) { + Some(x) => fold(acc, x), + None => acc, + }) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1274,6 +1366,17 @@ impl DoubleEndedIterator for FilterMap } None } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.rfold(init, move |acc, item| match f(item) { + Some(x) => fold(acc, x), + None => acc, + }) + } } #[unstable(feature = "fused", issue = "35602")] @@ -1338,6 +1441,19 @@ impl Iterator for Enumerate where I: Iterator { fn count(self) -> usize { self.iter.count() } + + #[inline] + #[rustc_inherit_overflow_checks] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut count = self.count; + self.iter.fold(init, move |acc, item| { + let acc = fold(acc, (count, item)); + count += 1; + acc + }) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1353,6 +1469,19 @@ impl DoubleEndedIterator for Enumerate where (self.count + len, a) }) } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + // Can safely add and subtract the count, as `ExactSizeIterator` promises + // that the number of elements fits into a `usize`. + let mut count = self.count + self.iter.len(); + self.iter.rfold(init, move |acc, item| { + count -= 1; + fold(acc, (count, item)) + }) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1464,6 +1593,18 @@ impl Iterator for Peekable { let hi = hi.and_then(|x| x.checked_add(peek_len)); (lo, hi) } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let acc = match self.peeked { + Some(None) => return init, + Some(Some(v)) => fold(init, v), + None => init, + }; + self.iter.fold(acc, fold) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1572,6 +1713,19 @@ impl Iterator for SkipWhile let (_, upper) = self.iter.size_hint(); (0, upper) // can't know a lower bound, due to the predicate } + + #[inline] + fn fold(mut self, mut init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + if !self.flag { + match self.next() { + Some(v) => init = fold(init, v), + None => return init, + } + } + self.iter.fold(init, fold) + } } #[unstable(feature = "fused", issue = "35602")] @@ -1712,6 +1866,19 @@ impl Iterator for Skip where I: Iterator { (lower, upper) } + + #[inline] + fn fold(mut self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + if self.n > 0 { + // nth(n) skips n+1 + if self.iter.nth(self.n - 1).is_none() { + return init; + } + } + self.iter.fold(init, fold) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1902,6 +2069,16 @@ impl Iterator for FlatMap _ => (lo, None) } } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.frontiter.into_iter() + .chain(self.iter.map(self.f).map(U::into_iter)) + .chain(self.backiter) + .fold(init, |acc, iter| iter.fold(acc, &mut fold)) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1924,6 +2101,16 @@ impl DoubleEndedIterator for FlatMap wher } } } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.frontiter.into_iter() + .chain(self.iter.map(self.f).map(U::into_iter)) + .chain(self.backiter) + .rfold(init, |acc, iter| iter.rfold(acc, &mut fold)) + } } #[unstable(feature = "fused", issue = "35602")] @@ -2001,6 +2188,17 @@ impl Iterator for Fuse where I: Iterator { self.iter.size_hint() } } + + #[inline] + default fn fold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + if self.done { + init + } else { + self.iter.fold(init, fold) + } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -2015,6 +2213,17 @@ impl DoubleEndedIterator for Fuse where I: DoubleEndedIterator { next } } + + #[inline] + default fn rfold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + if self.done { + init + } else { + self.iter.rfold(init, fold) + } + } } unsafe impl TrustedRandomAccess for Fuse @@ -2055,6 +2264,13 @@ impl Iterator for Fuse where I: FusedIterator { fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(init, fold) + } } #[unstable(feature = "fused", reason = "recently added", issue = "35602")] @@ -2065,6 +2281,13 @@ impl DoubleEndedIterator for Fuse fn next_back(&mut self) -> Option<::Item> { self.iter.next_back() } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.rfold(init, fold) + } } @@ -2129,6 +2352,14 @@ impl Iterator for Inspect where F: FnMut(&I::Item) { fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.fold(init, move |acc, item| { f(&item); fold(acc, item) }) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -2140,6 +2371,14 @@ impl DoubleEndedIterator for Inspect let next = self.iter.next_back(); self.do_inspect(next) } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.rfold(init, move |acc, item| { f(&item); fold(acc, item) }) + } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs index 73d518b570..e9aee4a467 100644 --- a/src/libcore/iter/range.rs +++ b/src/libcore/iter/range.rs @@ -89,6 +89,7 @@ macro_rules! step_impl_unsigned { } #[inline] + #[allow(unreachable_patterns)] fn add_usize(&self, n: usize) -> Option { match <$t>::try_from(n) { Ok(n_as_t) => self.checked_add(n_as_t), @@ -120,6 +121,7 @@ macro_rules! step_impl_signed { } #[inline] + #[allow(unreachable_patterns)] fn add_usize(&self, n: usize) -> Option { match <$unsigned>::try_from(n) { Ok(n_as_unsigned) => { diff --git a/src/libcore/iter/traits.rs b/src/libcore/iter/traits.rs index 2af129a67b..28236d193c 100644 --- a/src/libcore/iter/traits.rs +++ b/src/libcore/iter/traits.rs @@ -196,6 +196,23 @@ pub trait FromIterator: Sized { /// assert_eq!(i as i32, n); /// } /// ``` +/// +/// It is common to use `IntoIterator` as a trait bound. This allows +/// the input collection type to change, so long as it is still an +/// iterator. Additional bounds can be specified by restricting on +/// `Item`: +/// +/// ```rust +/// fn collect_as_strings(collection: T) -> Vec +/// where T: IntoIterator, +/// T::Item : std::fmt::Debug, +/// { +/// collection +/// .into_iter() +/// .map(|item| format!("{:?}", item)) +/// .collect() +/// } +/// ``` #[stable(feature = "rust1", since = "1.0.0")] pub trait IntoIterator { /// The type of the elements being iterated over. @@ -398,6 +415,70 @@ pub trait DoubleEndedIterator: Iterator { #[stable(feature = "rust1", since = "1.0.0")] fn next_back(&mut self) -> Option; + /// An iterator method that reduces the iterator's elements to a single, + /// final value, starting from the back. + /// + /// This is the reverse version of [`fold()`]: it takes elements starting from + /// the back of the iterator. + /// + /// `rfold()` takes two arguments: an initial value, and a closure with two + /// arguments: an 'accumulator', and an element. The closure returns the value that + /// the accumulator should have for the next iteration. + /// + /// The initial value is the value the accumulator will have on the first + /// call. + /// + /// After applying this closure to every element of the iterator, `rfold()` + /// returns the accumulator. + /// + /// This operation is sometimes called 'reduce' or 'inject'. + /// + /// Folding is useful whenever you have a collection of something, and want + /// to produce a single value from it. + /// + /// [`fold()`]: trait.Iterator.html#method.fold + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(iter_rfold)] + /// let a = [1, 2, 3]; + /// + /// // the sum of all of the elements of a + /// let sum = a.iter() + /// .rfold(0, |acc, &x| acc + x); + /// + /// assert_eq!(sum, 6); + /// ``` + /// + /// This example builds a string, starting with an initial value + /// and continuing with each element from the back until the front: + /// + /// ``` + /// #![feature(iter_rfold)] + /// let numbers = [1, 2, 3, 4, 5]; + /// + /// let zero = "0".to_string(); + /// + /// let result = numbers.iter().rfold(zero, |acc, &x| { + /// format!("({} + {})", x, acc) + /// }); + /// + /// assert_eq!(result, "(1 + (2 + (3 + (4 + (5 + 0)))))"); + /// ``` + #[inline] + #[unstable(feature = "iter_rfold", issue = "44705")] + fn rfold(mut self, mut accum: B, mut f: F) -> B where + Self: Sized, F: FnMut(B, Self::Item) -> B, + { + while let Some(x) = self.next_back() { + accum = f(accum, x); + } + accum + } + /// Searches for an element of an iterator from the right that satisfies a predicate. /// /// `rfind()` takes a closure that returns `true` or `false`. It applies diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index c270c6ae0d..69612bd2a3 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -85,12 +85,30 @@ #![feature(prelude_import)] #![feature(repr_simd, platform_intrinsics)] #![feature(rustc_attrs)] +#![cfg_attr(not(stage0), feature(rustc_const_unstable))] #![feature(specialization)] #![feature(staged_api)] #![feature(unboxed_closures)] #![feature(untagged_unions)] #![feature(unwind_attributes)] +#![cfg_attr(not(stage0), feature(const_min_value))] +#![cfg_attr(not(stage0), feature(const_max_value))] +#![cfg_attr(not(stage0), feature(const_atomic_bool_new))] +#![cfg_attr(not(stage0), feature(const_atomic_isize_new))] +#![cfg_attr(not(stage0), feature(const_atomic_usize_new))] +#![cfg_attr(not(stage0), feature(const_atomic_i8_new))] +#![cfg_attr(not(stage0), feature(const_atomic_u8_new))] +#![cfg_attr(not(stage0), feature(const_atomic_i16_new))] +#![cfg_attr(not(stage0), feature(const_atomic_u16_new))] +#![cfg_attr(not(stage0), feature(const_atomic_i32_new))] +#![cfg_attr(not(stage0), feature(const_atomic_u32_new))] +#![cfg_attr(not(stage0), feature(const_atomic_i64_new))] +#![cfg_attr(not(stage0), feature(const_atomic_u64_new))] +#![cfg_attr(not(stage0), feature(const_unsafe_cell_new))] +#![cfg_attr(not(stage0), feature(const_cell_new))] +#![cfg_attr(not(stage0), feature(const_nonzero_new))] + #[prelude_import] #[allow(unused)] use prelude::v1::*; diff --git a/src/libcore/macros.rs b/src/libcore/macros.rs index 684b81a27f..d64c984ea7 100644 --- a/src/libcore/macros.rs +++ b/src/libcore/macros.rs @@ -8,16 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#[macro_export] -// This stability attribute is totally useless. -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -macro_rules! __rust_unstable_column { - () => { - column!() - } -} - /// Entry point of thread panic, for details, see std::macros #[macro_export] #[allow_internal_unstable] @@ -27,18 +17,11 @@ macro_rules! panic { panic!("explicit panic") ); ($msg:expr) => ({ - static _MSG_FILE_LINE_COL: (&'static str, &'static str, u32, u32) = - ($msg, file!(), line!(), __rust_unstable_column!()); - $crate::panicking::panic(&_MSG_FILE_LINE_COL) + $crate::panicking::panic(&($msg, file!(), line!(), __rust_unstable_column!())) }); ($fmt:expr, $($arg:tt)*) => ({ - // The leading _'s are to avoid dead code warnings if this is - // used inside a dead function. Just `#[allow(dead_code)]` is - // insufficient, since the user may have - // `#[forbid(dead_code)]` and which cannot be overridden. - static _MSG_FILE_LINE_COL: (&'static str, u32, u32) = - (file!(), line!(), __rust_unstable_column!()); - $crate::panicking::panic_fmt(format_args!($fmt, $($arg)*), &_MSG_FILE_LINE_COL) + $crate::panicking::panic_fmt(format_args!($fmt, $($arg)*), + &(file!(), line!(), __rust_unstable_column!())) }); } @@ -62,11 +45,13 @@ macro_rules! panic { /// # Custom Messages /// /// This macro has a second form, where a custom panic message can -/// be provided with or without arguments for formatting. +/// be provided with or without arguments for formatting. See [`std::fmt`] +/// for syntax for this form. /// /// [`panic!`]: macro.panic.html /// [`debug_assert!`]: macro.debug_assert.html -/// [testing]: ../book/first-edition/testing.html +/// [testing]: ../book/second-edition/ch11-01-writing-tests.html#checking-results-with-the-assert-macro +/// [`std::fmt`]: ../std/fmt/index.html /// /// # Examples /// @@ -252,13 +237,15 @@ macro_rules! debug_assert { /// On panic, this macro will print the values of the expressions with their /// debug representations. /// -/// Unlike `assert_eq!`, `debug_assert_eq!` statements are only enabled in non +/// Unlike [`assert_eq!`], `debug_assert_eq!` statements are only enabled in non /// optimized builds by default. An optimized build will omit all /// `debug_assert_eq!` statements unless `-C debug-assertions` is passed to the /// compiler. This makes `debug_assert_eq!` useful for checks that are too /// expensive to be present in a release build but may be helpful during /// development. /// +/// [`assert_eq!`]: ../std/macro.assert_eq.html +/// /// # Examples /// /// ``` @@ -277,13 +264,15 @@ macro_rules! debug_assert_eq { /// On panic, this macro will print the values of the expressions with their /// debug representations. /// -/// Unlike `assert_ne!`, `debug_assert_ne!` statements are only enabled in non +/// Unlike [`assert_ne!`], `debug_assert_ne!` statements are only enabled in non /// optimized builds by default. An optimized build will omit all /// `debug_assert_ne!` statements unless `-C debug-assertions` is passed to the /// compiler. This makes `debug_assert_ne!` useful for checks that are too /// expensive to be present in a release build but may be helpful during /// development. /// +/// [`assert_ne!`]: ../std/macro.assert_ne.html +/// /// # Examples /// /// ``` @@ -300,10 +289,9 @@ macro_rules! debug_assert_ne { /// Helper macro for reducing boilerplate code for matching `Result` together /// with converting downstream errors. /// -/// Prefer using `?` syntax to `try!`. `?` is built in to the language and is -/// more succinct than `try!`. It is the standard method for error propagation. +/// The `?` operator was added to replace `try!` and should be used instead. /// -/// `try!` matches the given `Result`. In case of the `Ok` variant, the +/// `try!` matches the given [`Result`]. In case of the `Ok` variant, the /// expression has the value of the wrapped value. /// /// In case of the `Err` variant, it retrieves the inner error. `try!` then @@ -312,7 +300,9 @@ macro_rules! debug_assert_ne { /// error is then immediately returned. /// /// Because of the early return, `try!` can only be used in functions that -/// return `Result`. +/// return [`Result`]. +/// +/// [`Result`]: ../std/result/enum.Result.html /// /// # Examples /// @@ -331,12 +321,19 @@ macro_rules! debug_assert_ne { /// } /// } /// +/// // The prefered method of quick returning Errors +/// fn write_to_file_question() -> Result<(), MyError> { +/// let mut file = File::create("my_best_friends.txt")?; +/// Ok(()) +/// } +/// +/// // The previous method of quick returning Errors /// fn write_to_file_using_try() -> Result<(), MyError> { /// let mut file = try!(File::create("my_best_friends.txt")); /// try!(file.write_all(b"This is a list of my best friends.")); -/// println!("I wrote to the file"); /// Ok(()) /// } +/// /// // This is equivalent to: /// fn write_to_file_using_match() -> Result<(), MyError> { /// let mut file = try!(File::create("my_best_friends.txt")); @@ -344,7 +341,6 @@ macro_rules! debug_assert_ne { /// Ok(v) => v, /// Err(e) => return Err(From::from(e)), /// } -/// println!("I wrote to the file"); /// Ok(()) /// } /// ``` @@ -365,7 +361,7 @@ macro_rules! try { /// formatted according to the specified format string and the result will be passed to the writer. /// The writer may be any value with a `write_fmt` method; generally this comes from an /// implementation of either the [`std::fmt::Write`] or the [`std::io::Write`] trait. The macro -/// returns whatever the 'write_fmt' method returns; commonly a [`std::fmt::Result`], or an +/// returns whatever the `write_fmt` method returns; commonly a [`std::fmt::Result`], or an /// [`io::Result`]. /// /// See [`std::fmt`] for more information on the format string syntax. @@ -470,10 +466,20 @@ macro_rules! writeln { /// * Loops that dynamically terminate. /// * Iterators that dynamically terminate. /// +/// If the determination that the code is unreachable proves incorrect, the +/// program immediately terminates with a [`panic!`]. The function [`unreachable`], +/// which belongs to the [`std::intrinsics`] module, informs the compilier to +/// optimize the code out of the release version entirely. +/// +/// [`panic!`]: ../std/macro.panic.html +/// [`unreachable`]: ../std/intrinsics/fn.unreachable.html +/// [`std::intrinsics`]: ../std/intrinsics/index.html +/// /// # Panics /// -/// This will always [panic!](macro.panic.html) +/// This will always [`panic!`] /// +/// [`panic!`]: ../std/macro.panic.html /// # Examples /// /// Match arms: @@ -516,13 +522,16 @@ macro_rules! unreachable { }); } -/// A standardized placeholder for marking unfinished code. It panics with the -/// message `"not yet implemented"` when executed. +/// A standardized placeholder for marking unfinished code. /// /// This can be useful if you are prototyping and are just looking to have your /// code typecheck, or if you're implementing a trait that requires multiple /// methods, and you're only planning on using one of them. /// +/// # Panics +/// +/// This will always [panic!](macro.panic.html) +/// /// # Examples /// /// Here's an example of some in-progress code. We have a trait `Foo`: diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index e8fd729b63..f56a9a4033 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -122,7 +122,7 @@ pub trait Sized { /// [RFC982]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md /// [nomicon-coerce]: ../../nomicon/coercions.html #[unstable(feature = "unsize", issue = "27732")] -#[lang="unsize"] +#[lang = "unsize"] pub trait Unsize { // Empty. } diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index bd08bd1a8f..e085d427b8 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -22,6 +22,7 @@ use hash; use intrinsics; use marker::{Copy, PhantomData, Sized}; use ptr; +use ops::{Deref, DerefMut}; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::transmute; @@ -176,38 +177,141 @@ pub fn forget(t: T) { /// Returns the size of a type in bytes. /// -/// More specifically, this is the offset in bytes between successive -/// items of the same type, including alignment padding. +/// More specifically, this is the offset in bytes between successive elements +/// in an array with that item type including alignment padding. Thus, for any +/// type `T` and length `n`, `[T; n]` has a size of `n * size_of::()`. +/// +/// In general, the size of a type is not stable across compilations, but +/// specific types such as primitives are. +/// +/// The following table gives the size for primitives. +/// +/// Type | size_of::\() +/// ---- | --------------- +/// () | 0 +/// u8 | 1 +/// u16 | 2 +/// u32 | 4 +/// u64 | 8 +/// i8 | 1 +/// i16 | 2 +/// i32 | 4 +/// i64 | 8 +/// f32 | 4 +/// f64 | 8 +/// char | 4 +/// +/// Furthermore, `usize` and `isize` have the same size. +/// +/// The types `*const T`, `&T`, `Box`, `Option<&T>`, and `Option>` all have +/// the same size. If `T` is Sized, all of those types have the same size as `usize`. +/// +/// The mutability of a pointer does not change its size. As such, `&T` and `&mut T` +/// have the same size. Likewise for `*const T` and `*mut T`. +/// +/// # Size of `#[repr(C)]` items +/// +/// The `C` representation for items has a defined layout. With this layout, +/// the size of items is also stable as long as all fields have a stable size. +/// +/// ## Size of Structs +/// +/// For `structs`, the size is determined by the following algorithm. +/// +/// For each field in the struct ordered by declaration order: +/// +/// 1. Add the size of the field. +/// 2. Round up the current size to the nearest multiple of the next field's [alignment]. +/// +/// Finally, round the size of the struct to the nearest multiple of its [alignment]. +/// +/// Unlike `C`, zero sized structs are not rounded up to one byte in size. +/// +/// ## Size of Enums +/// +/// Enums that carry no data other than the descriminant have the same size as C enums +/// on the platform they are compiled for. +/// +/// ## Size of Unions +/// +/// The size of a union is the size of its largest field. +/// +/// Unlike `C`, zero sized unions are not rounded up to one byte in size. /// /// # Examples /// /// ``` /// use std::mem; /// +/// // Some primitives /// assert_eq!(4, mem::size_of::()); -/// ``` -#[inline] -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -pub fn size_of() -> usize { - unsafe { intrinsics::size_of::() } -} - -/// Returns the size of a type in bytes. +/// assert_eq!(8, mem::size_of::()); +/// assert_eq!(0, mem::size_of::<()>()); /// -/// More specifically, this is the offset in bytes between successive -/// items of the same type, including alignment padding. +/// // Some arrays +/// assert_eq!(8, mem::size_of::<[i32; 2]>()); +/// assert_eq!(12, mem::size_of::<[i32; 3]>()); +/// assert_eq!(0, mem::size_of::<[i32; 0]>()); /// -/// # Examples +/// +/// // Pointer size equality +/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<*const i32>()); +/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::>()); +/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::>()); +/// assert_eq!(mem::size_of::>(), mem::size_of::>>()); +/// ``` +/// +/// Using `#[repr(C)]`. /// /// ``` /// use std::mem; /// -/// assert_eq!(4, mem::size_of::()); +/// #[repr(C)] +/// struct FieldStruct { +/// first: u8, +/// second: u16, +/// third: u8 +/// } +/// +/// // The size of the first field is 1, so add 1 to the size. Size is 1. +/// // The alignment of the second field is 2, so add 1 to the size for padding. Size is 2. +/// // The size of the second field is 2, so add 2 to the size. Size is 4. +/// // The alignment of the third field is 1, so add 0 to the size for padding. Size is 4. +/// // The size of the third field is 1, so add 1 to the size. Size is 5. +/// // Finally, the alignment of the struct is 2, so add 1 to the size for padding. Size is 6. +/// assert_eq!(6, mem::size_of::()); +/// +/// #[repr(C)] +/// struct TupleStruct(u8, u16, u8); +/// +/// // Tuple structs follow the same rules. +/// assert_eq!(6, mem::size_of::()); +/// +/// // Note that reordering the fields can lower the size. We can remove both padding bytes +/// // by putting `third` before `second`. +/// #[repr(C)] +/// struct FieldStructOptimized { +/// first: u8, +/// third: u8, +/// second: u16 +/// } +/// +/// assert_eq!(4, mem::size_of::()); +/// +/// // Union size is the size of the largest field. +/// #[repr(C)] +/// union ExampleUnion { +/// smaller: u8, +/// larger: u16 +/// } +/// +/// assert_eq!(2, mem::size_of::()); /// ``` +/// +/// [alignment]: ./fn.align_of.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(stage0))] +#[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_size_of"))] pub const fn size_of() -> usize { unsafe { intrinsics::size_of::() } } @@ -299,29 +403,7 @@ pub fn min_align_of_val(val: &T) -> usize { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -pub fn align_of() -> usize { - unsafe { intrinsics::min_align_of::() } -} - -/// Returns the [ABI]-required minimum alignment of a type. -/// -/// Every reference to a value of the type `T` must be a multiple of this number. -/// -/// This is the alignment used for struct fields. It may be smaller than the preferred alignment. -/// -/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface -/// -/// # Examples -/// -/// ``` -/// use std::mem; -/// -/// assert_eq!(4, mem::align_of::()); -/// ``` -#[inline] -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(stage0))] +#[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_align_of"))] pub const fn align_of() -> usize { unsafe { intrinsics::min_align_of::() } } @@ -863,6 +945,7 @@ pub fn discriminant(v: &T) -> Discriminant { /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] #[allow(unions_with_drop_fields)] +#[derive(Copy)] pub union ManuallyDrop{ value: T } impl ManuallyDrop { @@ -912,7 +995,7 @@ impl ManuallyDrop { } #[stable(feature = "manually_drop", since = "1.20.0")] -impl ::ops::Deref for ManuallyDrop { +impl Deref for ManuallyDrop { type Target = T; #[inline] fn deref(&self) -> &Self::Target { @@ -923,7 +1006,7 @@ impl ::ops::Deref for ManuallyDrop { } #[stable(feature = "manually_drop", since = "1.20.0")] -impl ::ops::DerefMut for ManuallyDrop { +impl DerefMut for ManuallyDrop { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { unsafe { @@ -941,6 +1024,75 @@ impl ::fmt::Debug for ManuallyDrop { } } +#[stable(feature = "manually_drop", since = "1.20.0")] +impl Clone for ManuallyDrop { + fn clone(&self) -> Self { + ManuallyDrop::new(self.deref().clone()) + } + + fn clone_from(&mut self, source: &Self) { + self.deref_mut().clone_from(source); + } +} + +#[stable(feature = "manually_drop", since = "1.20.0")] +impl Default for ManuallyDrop { + fn default() -> Self { + ManuallyDrop::new(Default::default()) + } +} + +#[stable(feature = "manually_drop", since = "1.20.0")] +impl PartialEq for ManuallyDrop { + fn eq(&self, other: &Self) -> bool { + self.deref().eq(other) + } + + fn ne(&self, other: &Self) -> bool { + self.deref().ne(other) + } +} + +#[stable(feature = "manually_drop", since = "1.20.0")] +impl Eq for ManuallyDrop {} + +#[stable(feature = "manually_drop", since = "1.20.0")] +impl PartialOrd for ManuallyDrop { + fn partial_cmp(&self, other: &Self) -> Option<::cmp::Ordering> { + self.deref().partial_cmp(other) + } + + fn lt(&self, other: &Self) -> bool { + self.deref().lt(other) + } + + fn le(&self, other: &Self) -> bool { + self.deref().le(other) + } + + fn gt(&self, other: &Self) -> bool { + self.deref().gt(other) + } + + fn ge(&self, other: &Self) -> bool { + self.deref().ge(other) + } +} + +#[stable(feature = "manually_drop", since = "1.20.0")] +impl Ord for ManuallyDrop { + fn cmp(&self, other: &Self) -> ::cmp::Ordering { + self.deref().cmp(other) + } +} + +#[stable(feature = "manually_drop", since = "1.20.0")] +impl ::hash::Hash for ManuallyDrop { + fn hash(&self, state: &mut H) { + self.deref().hash(state); + } +} + /// Tells LLVM that this point in the code is not reachable, enabling further /// optimizations. /// diff --git a/src/libcore/nonzero.rs b/src/libcore/nonzero.rs index 3ff1068b93..f075d825f5 100644 --- a/src/libcore/nonzero.rs +++ b/src/libcore/nonzero.rs @@ -68,6 +68,10 @@ pub struct NonZero(T); impl NonZero { /// Creates an instance of NonZero with the provided value. /// You must indeed ensure that the value is actually "non-zero". + #[unstable(feature = "nonzero", + reason = "needs an RFC to flesh out the design", + issue = "27730")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_nonzero_new"))] #[inline] pub const unsafe fn new_unchecked(inner: T) -> Self { NonZero(inner) diff --git a/src/libcore/num/dec2flt/mod.rs b/src/libcore/num/dec2flt/mod.rs index f353770a73..f93564c284 100644 --- a/src/libcore/num/dec2flt/mod.rs +++ b/src/libcore/num/dec2flt/mod.rs @@ -121,7 +121,6 @@ macro_rules! from_str_float_impl { /// * '-3.14' /// * '2.5E10', or equivalently, '2.5e10' /// * '2.5E-10' - /// * '.' (understood as 0) /// * '5.' /// * '.5', or, equivalently, '0.5' /// * 'inf', '-inf', 'NaN' diff --git a/src/libcore/num/f32.rs b/src/libcore/num/f32.rs index 5068375368..43d38926c9 100644 --- a/src/libcore/num/f32.rs +++ b/src/libcore/num/f32.rs @@ -10,8 +10,6 @@ //! Operations and constants for 32-bits floats (`f32` type) -#![cfg_attr(stage0, allow(overflowing_literals))] - #![stable(feature = "rust1", since = "1.0.0")] use intrinsics; diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index c5175287cc..85be8a0872 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -12,7 +12,7 @@ #![stable(feature = "rust1", since = "1.0.0")] -use convert::TryFrom; +use convert::{Infallible, TryFrom}; use fmt; use intrinsics; use str::FromStr; @@ -109,6 +109,7 @@ macro_rules! int_impl { /// assert_eq!(i8::min_value(), -128); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_min_value"))] #[inline] pub const fn min_value() -> Self { !0 ^ ((!0 as $UnsignedT) >> 1) as Self @@ -122,6 +123,7 @@ macro_rules! int_impl { /// assert_eq!(i8::max_value(), 127); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_max_value"))] #[inline] pub const fn max_value() -> Self { !Self::min_value() @@ -1280,6 +1282,7 @@ macro_rules! uint_impl { /// assert_eq!(u8::min_value(), 0); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_min_value"))] #[inline] pub const fn min_value() -> Self { 0 } @@ -1291,6 +1294,7 @@ macro_rules! uint_impl { /// assert_eq!(u8::max_value(), 255); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_max_value"))] #[inline] pub const fn max_value() -> Self { !0 } @@ -2503,16 +2507,24 @@ impl fmt::Display for TryFromIntError { } } +#[unstable(feature = "try_from", issue = "33417")] +impl From for TryFromIntError { + fn from(infallible: Infallible) -> TryFromIntError { + match infallible { + } + } +} + // no possible bounds violation macro_rules! try_from_unbounded { ($source:ty, $($target:ty),*) => {$( #[unstable(feature = "try_from", issue = "33417")] impl TryFrom<$source> for $target { - type Error = TryFromIntError; + type Error = Infallible; #[inline] - fn try_from(u: $source) -> Result<$target, TryFromIntError> { - Ok(u as $target) + fn try_from(value: $source) -> Result { + Ok(value as $target) } } )*} @@ -2584,31 +2596,17 @@ macro_rules! rev { } /// intra-sign conversions -try_from_unbounded!(u8, u8, u16, u32, u64, u128); -try_from_unbounded!(u16, u16, u32, u64, u128); -try_from_unbounded!(u32, u32, u64, u128); -try_from_unbounded!(u64, u64, u128); -try_from_unbounded!(u128, u128); try_from_upper_bounded!(u16, u8); try_from_upper_bounded!(u32, u16, u8); try_from_upper_bounded!(u64, u32, u16, u8); try_from_upper_bounded!(u128, u64, u32, u16, u8); -try_from_unbounded!(i8, i8, i16, i32, i64, i128); -try_from_unbounded!(i16, i16, i32, i64, i128); -try_from_unbounded!(i32, i32, i64, i128); -try_from_unbounded!(i64, i64, i128); -try_from_unbounded!(i128, i128); try_from_both_bounded!(i16, i8); try_from_both_bounded!(i32, i16, i8); try_from_both_bounded!(i64, i32, i16, i8); try_from_both_bounded!(i128, i64, i32, i16, i8); // unsigned-to-signed -try_from_unbounded!(u8, i16, i32, i64, i128); -try_from_unbounded!(u16, i32, i64, i128); -try_from_unbounded!(u32, i64, i128); -try_from_unbounded!(u64, i128); try_from_upper_bounded!(u8, i8); try_from_upper_bounded!(u16, i8, i16); try_from_upper_bounded!(u32, i8, i16, i32); @@ -2627,15 +2625,13 @@ try_from_both_bounded!(i64, u32, u16, u8); try_from_both_bounded!(i128, u64, u32, u16, u8); // usize/isize -try_from_unbounded!(usize, usize); try_from_upper_bounded!(usize, isize); try_from_lower_bounded!(isize, usize); -try_from_unbounded!(isize, isize); #[cfg(target_pointer_width = "16")] mod ptr_try_from_impls { use super::TryFromIntError; - use convert::TryFrom; + use convert::{Infallible, TryFrom}; try_from_upper_bounded!(usize, u8); try_from_unbounded!(usize, u16, u32, u64, u128); @@ -2647,21 +2643,21 @@ mod ptr_try_from_impls { try_from_both_bounded!(isize, i8); try_from_unbounded!(isize, i16, i32, i64, i128); - rev!(try_from_unbounded, usize, u8, u16); + rev!(try_from_unbounded, usize, u16); rev!(try_from_upper_bounded, usize, u32, u64, u128); rev!(try_from_lower_bounded, usize, i8, i16); rev!(try_from_both_bounded, usize, i32, i64, i128); rev!(try_from_unbounded, isize, u8); rev!(try_from_upper_bounded, isize, u16, u32, u64, u128); - rev!(try_from_unbounded, isize, i8, i16); + rev!(try_from_unbounded, isize, i16); rev!(try_from_both_bounded, isize, i32, i64, i128); } #[cfg(target_pointer_width = "32")] mod ptr_try_from_impls { use super::TryFromIntError; - use convert::TryFrom; + use convert::{Infallible, TryFrom}; try_from_upper_bounded!(usize, u8, u16); try_from_unbounded!(usize, u32, u64, u128); @@ -2673,21 +2669,21 @@ mod ptr_try_from_impls { try_from_both_bounded!(isize, i8, i16); try_from_unbounded!(isize, i32, i64, i128); - rev!(try_from_unbounded, usize, u8, u16, u32); + rev!(try_from_unbounded, usize, u16, u32); rev!(try_from_upper_bounded, usize, u64, u128); rev!(try_from_lower_bounded, usize, i8, i16, i32); rev!(try_from_both_bounded, usize, i64, i128); rev!(try_from_unbounded, isize, u8, u16); rev!(try_from_upper_bounded, isize, u32, u64, u128); - rev!(try_from_unbounded, isize, i8, i16, i32); + rev!(try_from_unbounded, isize, i16, i32); rev!(try_from_both_bounded, isize, i64, i128); } #[cfg(target_pointer_width = "64")] mod ptr_try_from_impls { use super::TryFromIntError; - use convert::TryFrom; + use convert::{Infallible, TryFrom}; try_from_upper_bounded!(usize, u8, u16, u32); try_from_unbounded!(usize, u64, u128); @@ -2699,14 +2695,14 @@ mod ptr_try_from_impls { try_from_both_bounded!(isize, i8, i16, i32); try_from_unbounded!(isize, i64, i128); - rev!(try_from_unbounded, usize, u8, u16, u32, u64); + rev!(try_from_unbounded, usize, u16, u32, u64); rev!(try_from_upper_bounded, usize, u128); rev!(try_from_lower_bounded, usize, i8, i16, i32, i64); rev!(try_from_both_bounded, usize, i128); rev!(try_from_unbounded, isize, u8, u16, u32); rev!(try_from_upper_bounded, isize, u64, u128); - rev!(try_from_unbounded, isize, i8, i16, i32, i64); + rev!(try_from_unbounded, isize, i16, i32, i64); rev!(try_from_both_bounded, isize, i128); } diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs index acdf685e85..ae1b0b3ce1 100644 --- a/src/libcore/num/wrapping.rs +++ b/src/libcore/num/wrapping.rs @@ -36,6 +36,7 @@ macro_rules! sh_impl_signed { *self = *self << other; } } + forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f } #[stable(feature = "rust1", since = "1.0.0")] impl Shr<$f> for Wrapping<$t> { @@ -58,6 +59,7 @@ macro_rules! sh_impl_signed { *self = *self >> other; } } + forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f } ) } @@ -80,6 +82,7 @@ macro_rules! sh_impl_unsigned { *self = *self << other; } } + forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f } #[stable(feature = "rust1", since = "1.0.0")] impl Shr<$f> for Wrapping<$t> { @@ -98,6 +101,7 @@ macro_rules! sh_impl_unsigned { *self = *self >> other; } } + forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f } ) } @@ -142,6 +146,7 @@ macro_rules! wrapping_impl { *self = *self + other; } } + forward_ref_op_assign! { impl AddAssign, add_assign for Wrapping<$t>, Wrapping<$t> } #[stable(feature = "rust1", since = "1.0.0")] impl Sub for Wrapping<$t> { @@ -162,6 +167,7 @@ macro_rules! wrapping_impl { *self = *self - other; } } + forward_ref_op_assign! { impl SubAssign, sub_assign for Wrapping<$t>, Wrapping<$t> } #[stable(feature = "rust1", since = "1.0.0")] impl Mul for Wrapping<$t> { @@ -182,6 +188,7 @@ macro_rules! wrapping_impl { *self = *self * other; } } + forward_ref_op_assign! { impl MulAssign, mul_assign for Wrapping<$t>, Wrapping<$t> } #[stable(feature = "wrapping_div", since = "1.3.0")] impl Div for Wrapping<$t> { @@ -202,6 +209,7 @@ macro_rules! wrapping_impl { *self = *self / other; } } + forward_ref_op_assign! { impl DivAssign, div_assign for Wrapping<$t>, Wrapping<$t> } #[stable(feature = "wrapping_impls", since = "1.7.0")] impl Rem for Wrapping<$t> { @@ -222,6 +230,7 @@ macro_rules! wrapping_impl { *self = *self % other; } } + forward_ref_op_assign! { impl RemAssign, rem_assign for Wrapping<$t>, Wrapping<$t> } #[stable(feature = "rust1", since = "1.0.0")] impl Not for Wrapping<$t> { @@ -254,6 +263,7 @@ macro_rules! wrapping_impl { *self = *self ^ other; } } + forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Wrapping<$t>, Wrapping<$t> } #[stable(feature = "rust1", since = "1.0.0")] impl BitOr for Wrapping<$t> { @@ -274,6 +284,7 @@ macro_rules! wrapping_impl { *self = *self | other; } } + forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Wrapping<$t>, Wrapping<$t> } #[stable(feature = "rust1", since = "1.0.0")] impl BitAnd for Wrapping<$t> { @@ -294,6 +305,7 @@ macro_rules! wrapping_impl { *self = *self & other; } } + forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Wrapping<$t>, Wrapping<$t> } #[stable(feature = "wrapping_neg", since = "1.10.0")] impl Neg for Wrapping<$t> { diff --git a/src/libcore/ops/arith.rs b/src/libcore/ops/arith.rs index 62007caedd..8b3d662a6d 100644 --- a/src/libcore/ops/arith.rs +++ b/src/libcore/ops/arith.rs @@ -662,6 +662,8 @@ macro_rules! add_assign_impl { #[rustc_inherit_overflow_checks] fn add_assign(&mut self, other: $t) { *self += other } } + + forward_ref_op_assign! { impl AddAssign, add_assign for $t, $t } )+) } @@ -713,6 +715,8 @@ macro_rules! sub_assign_impl { #[rustc_inherit_overflow_checks] fn sub_assign(&mut self, other: $t) { *self -= other } } + + forward_ref_op_assign! { impl SubAssign, sub_assign for $t, $t } )+) } @@ -755,6 +759,8 @@ macro_rules! mul_assign_impl { #[rustc_inherit_overflow_checks] fn mul_assign(&mut self, other: $t) { *self *= other } } + + forward_ref_op_assign! { impl MulAssign, mul_assign for $t, $t } )+) } @@ -796,6 +802,8 @@ macro_rules! div_assign_impl { #[inline] fn div_assign(&mut self, other: $t) { *self /= other } } + + forward_ref_op_assign! { impl DivAssign, div_assign for $t, $t } )+) } @@ -841,6 +849,8 @@ macro_rules! rem_assign_impl { #[inline] fn rem_assign(&mut self, other: $t) { *self %= other } } + + forward_ref_op_assign! { impl RemAssign, rem_assign for $t, $t } )+) } diff --git a/src/libcore/ops/bit.rs b/src/libcore/ops/bit.rs index 0bc5e554cb..7ac5fc4deb 100644 --- a/src/libcore/ops/bit.rs +++ b/src/libcore/ops/bit.rs @@ -593,6 +593,8 @@ macro_rules! bitand_assign_impl { #[inline] fn bitand_assign(&mut self, other: $t) { *self &= other } } + + forward_ref_op_assign! { impl BitAndAssign, bitand_assign for $t, $t } )+) } @@ -638,6 +640,8 @@ macro_rules! bitor_assign_impl { #[inline] fn bitor_assign(&mut self, other: $t) { *self |= other } } + + forward_ref_op_assign! { impl BitOrAssign, bitor_assign for $t, $t } )+) } @@ -683,6 +687,8 @@ macro_rules! bitxor_assign_impl { #[inline] fn bitxor_assign(&mut self, other: $t) { *self ^= other } } + + forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for $t, $t } )+) } @@ -729,6 +735,8 @@ macro_rules! shl_assign_impl { *self <<= other } } + + forward_ref_op_assign! { impl ShlAssign, shl_assign for $t, $f } ) } @@ -793,6 +801,8 @@ macro_rules! shr_assign_impl { *self >>= other } } + + forward_ref_op_assign! { impl ShrAssign, shr_assign for $t, $f } ) } diff --git a/src/libcore/ops/deref.rs b/src/libcore/ops/deref.rs index ea8dd82087..4cb6e8405f 100644 --- a/src/libcore/ops/deref.rs +++ b/src/libcore/ops/deref.rs @@ -40,7 +40,7 @@ /// [book]: ../../book/second-edition/ch15-02-deref.html /// [`DerefMut`]: trait.DerefMut.html /// [more]: #more-on-deref-coercion -/// [ref-deref-op]: ../../reference/expressions.html#the-dereference-operator +/// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator /// [ref-deref-trait]: ../../reference/the-deref-trait.html /// [type coercions]: ../../reference/type-coercions.html /// @@ -127,7 +127,7 @@ impl<'a, T: ?Sized> Deref for &'a mut T { /// [book]: ../../book/second-edition/ch15-02-deref.html /// [`Deref`]: trait.Deref.html /// [more]: #more-on-deref-coercion -/// [ref-deref-op]: ../../reference/expressions.html#the-dereference-operator +/// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator /// [ref-deref-trait]: ../../reference/the-deref-trait.html /// [type coercions]: ../../reference/type-coercions.html /// diff --git a/src/libcore/ops/generator.rs b/src/libcore/ops/generator.rs new file mode 100644 index 0000000000..798c182bc6 --- /dev/null +++ b/src/libcore/ops/generator.rs @@ -0,0 +1,131 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// The result of a generator resumption. +/// +/// This enum is returned from the `Generator::resume` method and indicates the +/// possible return values of a generator. Currently this corresponds to either +/// a suspension point (`Yielded`) or a termination point (`Complete`). +#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] +#[cfg_attr(not(stage0), lang = "generator_state")] +#[unstable(feature = "generator_trait", issue = "43122")] +pub enum GeneratorState { + /// The generator suspended with a value. + /// + /// This state indicates that a generator has been suspended, and typically + /// corresponds to a `yield` statement. The value provided in this variant + /// corresponds to the expression passed to `yield` and allows generators to + /// provide a value each time they yield. + Yielded(Y), + + /// The generator completed with a return value. + /// + /// This state indicates that a generator has finished execution with the + /// provided value. Once a generator has returned `Complete` it is + /// considered a programmer error to call `resume` again. + Complete(R), +} + +/// The trait implemented by builtin generator types. +/// +/// Generators, also commonly referred to as coroutines, are currently an +/// experimental language feature in Rust. Added in [RFC 2033] generators are +/// currently intended to primarily provide a building block for async/await +/// syntax but will likely extend to also providing an ergonomic definition for +/// iterators and other primitives. +/// +/// The syntax and semantics for generators is unstable and will require a +/// further RFC for stabilization. At this time, though, the syntax is +/// closure-like: +/// +/// ```rust +/// #![feature(generators, generator_trait)] +/// +/// use std::ops::{Generator, GeneratorState}; +/// +/// fn main() { +/// let mut generator = || { +/// yield 1; +/// return "foo" +/// }; +/// +/// match generator.resume() { +/// GeneratorState::Yielded(1) => {} +/// _ => panic!("unexpected return from resume"), +/// } +/// match generator.resume() { +/// GeneratorState::Complete("foo") => {} +/// _ => panic!("unexpected return from resume"), +/// } +/// } +/// ``` +/// +/// More documentation of generators can be found in the unstable book. +/// +/// [RFC 2033]: https://github.com/rust-lang/rfcs/pull/2033 +#[cfg_attr(not(stage0), lang = "generator")] +#[unstable(feature = "generator_trait", issue = "43122")] +#[fundamental] +pub trait Generator { + /// The type of value this generator yields. + /// + /// This associated type corresponds to the `yield` expression and the + /// values which are allowed to be returned each time a generator yields. + /// For example an iterator-as-a-generator would likely have this type as + /// `T`, the type being iterated over. + type Yield; + + /// The type of value this generator returns. + /// + /// This corresponds to the type returned from a generator either with a + /// `return` statement or implicitly as the last expression of a generator + /// literal. For example futures would use this as `Result` as it + /// represents a completed future. + type Return; + + /// Resumes the execution of this generator. + /// + /// This function will resume execution of the generator or start execution + /// if it hasn't already. This call will return back into the generator's + /// last suspension point, resuming execution from the latest `yield`. The + /// generator will continue executing until it either yields or returns, at + /// which point this function will return. + /// + /// # Return value + /// + /// The `GeneratorState` enum returned from this function indicates what + /// state the generator is in upon returning. If the `Yielded` variant is + /// returned then the generator has reached a suspension point and a value + /// has been yielded out. Generators in this state are available for + /// resumption at a later point. + /// + /// If `Complete` is returned then the generator has completely finished + /// with the value provided. It is invalid for the generator to be resumed + /// again. + /// + /// # Panics + /// + /// This function may panic if it is called after the `Complete` variant has + /// been returned previously. While generator literals in the language are + /// guaranteed to panic on resuming after `Complete`, this is not guaranteed + /// for all implementations of the `Generator` trait. + fn resume(&mut self) -> GeneratorState; +} + +#[unstable(feature = "generator_trait", issue = "43122")] +impl<'a, T> Generator for &'a mut T + where T: Generator + ?Sized +{ + type Yield = T::Yield; + type Return = T::Return; + fn resume(&mut self) -> GeneratorState { + (**self).resume() + } +} diff --git a/src/libcore/ops/mod.rs b/src/libcore/ops/mod.rs index b5e6912b10..e0efdbcd4c 100644 --- a/src/libcore/ops/mod.rs +++ b/src/libcore/ops/mod.rs @@ -150,7 +150,7 @@ //! [`Sub`]: trait.Sub.html //! [`Mul`]: trait.Mul.html //! [`clone`]: ../clone/trait.Clone.html#tymethod.clone -//! [operator precedence]: ../../reference/expressions.html#operator-precedence +//! [operator precedence]: ../../reference/expressions/operator-expr.html#operator-precedence #![stable(feature = "rust1", since = "1.0.0")] @@ -159,6 +159,7 @@ mod bit; mod deref; mod drop; mod function; +mod generator; mod index; mod place; mod range; @@ -196,6 +197,9 @@ pub use self::range::{RangeInclusive, RangeToInclusive}; #[unstable(feature = "try_trait", issue = "42327")] pub use self::try::Try; +#[unstable(feature = "generator_trait", issue = "43122")] +pub use self::generator::{Generator, GeneratorState}; + #[unstable(feature = "placement_new_protocol", issue = "27779")] pub use self::place::{Place, Placer, InPlace, Boxed, BoxPlace}; diff --git a/src/libcore/ops/range.rs b/src/libcore/ops/range.rs index 463a50491a..3f573f7c7e 100644 --- a/src/libcore/ops/range.rs +++ b/src/libcore/ops/range.rs @@ -241,9 +241,9 @@ impl> RangeTo { } } -/// An range bounded inclusively below and above (`start...end`). +/// An range bounded inclusively below and above (`start..=end`). /// -/// The `RangeInclusive` `start...end` contains all values with `x >= start` +/// The `RangeInclusive` `start..=end` contains all values with `x >= start` /// and `x <= end`. /// /// # Examples @@ -251,12 +251,12 @@ impl> RangeTo { /// ``` /// #![feature(inclusive_range,inclusive_range_syntax)] /// -/// assert_eq!((3...5), std::ops::RangeInclusive { start: 3, end: 5 }); -/// assert_eq!(3 + 4 + 5, (3...5).sum()); +/// assert_eq!((3..=5), std::ops::RangeInclusive { start: 3, end: 5 }); +/// assert_eq!(3 + 4 + 5, (3..=5).sum()); /// /// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ ...2], [0,1,2 ]); -/// assert_eq!(arr[1...2], [ 1,2 ]); // RangeInclusive +/// assert_eq!(arr[ ..=2], [0,1,2 ]); +/// assert_eq!(arr[1..=2], [ 1,2 ]); // RangeInclusive /// ``` #[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] @@ -276,7 +276,7 @@ pub struct RangeInclusive { #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl fmt::Debug for RangeInclusive { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{:?}...{:?}", self.start, self.end) + write!(fmt, "{:?}..={:?}", self.start, self.end) } } @@ -289,32 +289,32 @@ impl> RangeInclusive { /// ``` /// #![feature(range_contains,inclusive_range_syntax)] /// - /// assert!(!(3...5).contains(2)); - /// assert!( (3...5).contains(3)); - /// assert!( (3...5).contains(4)); - /// assert!( (3...5).contains(5)); - /// assert!(!(3...5).contains(6)); + /// assert!(!(3..=5).contains(2)); + /// assert!( (3..=5).contains(3)); + /// assert!( (3..=5).contains(4)); + /// assert!( (3..=5).contains(5)); + /// assert!(!(3..=5).contains(6)); /// - /// assert!( (3...3).contains(3)); - /// assert!(!(3...2).contains(3)); + /// assert!( (3..=3).contains(3)); + /// assert!(!(3..=2).contains(3)); /// ``` pub fn contains(&self, item: Idx) -> bool { self.start <= item && item <= self.end } } -/// A range only bounded inclusively above (`...end`). +/// A range only bounded inclusively above (`..=end`). /// -/// The `RangeToInclusive` `...end` contains all values with `x <= end`. +/// The `RangeToInclusive` `..=end` contains all values with `x <= end`. /// It cannot serve as an [`Iterator`] because it doesn't have a starting point. /// /// # Examples /// -/// The `...end` syntax is a `RangeToInclusive`: +/// The `..=end` syntax is a `RangeToInclusive`: /// /// ``` /// #![feature(inclusive_range,inclusive_range_syntax)] -/// assert_eq!((...5), std::ops::RangeToInclusive{ end: 5 }); +/// assert_eq!((..=5), std::ops::RangeToInclusive{ end: 5 }); /// ``` /// /// It does not have an [`IntoIterator`] implementation, so you can't use it in a @@ -325,7 +325,7 @@ impl> RangeInclusive { /// /// // error[E0277]: the trait bound `std::ops::RangeToInclusive<{integer}>: /// // std::iter::Iterator` is not satisfied -/// for i in ...5 { +/// for i in ..=5 { /// // ... /// } /// ``` @@ -337,8 +337,8 @@ impl> RangeInclusive { /// #![feature(inclusive_range_syntax)] /// /// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ ...2], [0,1,2 ]); // RangeToInclusive -/// assert_eq!(arr[1...2], [ 1,2 ]); +/// assert_eq!(arr[ ..=2], [0,1,2 ]); // RangeToInclusive +/// assert_eq!(arr[1..=2], [ 1,2 ]); /// ``` /// /// [`IntoIterator`]: ../iter/trait.Iterator.html @@ -357,7 +357,7 @@ pub struct RangeToInclusive { #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl fmt::Debug for RangeToInclusive { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "...{:?}", self.end) + write!(fmt, "..={:?}", self.end) } } @@ -370,9 +370,9 @@ impl> RangeToInclusive { /// ``` /// #![feature(range_contains,inclusive_range_syntax)] /// - /// assert!( (...5).contains(-1_000_000_000)); - /// assert!( (...5).contains(5)); - /// assert!(!(...5).contains(6)); + /// assert!( (..=5).contains(-1_000_000_000)); + /// assert!( (..=5).contains(5)); + /// assert!(!(..=5).contains(6)); /// ``` pub fn contains(&self, item: Idx) -> bool { (item <= self.end) diff --git a/src/libcore/ops/try.rs b/src/libcore/ops/try.rs index 78326c3e63..e788b66a1e 100644 --- a/src/libcore/ops/try.rs +++ b/src/libcore/ops/try.rs @@ -15,8 +15,24 @@ /// extracting those success or failure values from an existing instance and /// creating a new instance from a success or failure value. #[unstable(feature = "try_trait", issue = "42327")] -#[rustc_on_unimplemented = "the `?` operator can only be used in a function that returns `Result` \ - (or another type that implements `{Try}`)"] +#[cfg_attr(stage0, + rustc_on_unimplemented = "the `?` operator can only be used in a \ + function that returns `Result` \ + (or another type that implements `{Try}`)")] +#[cfg_attr(not(stage0), + rustc_on_unimplemented( + on(all( + any(from_method="from_error", from_method="from_ok"), + from_desugaring="?"), + message="the `?` operator can only be used in a \ + function that returns `Result` \ + (or another type that implements `{Try}`)", + label="cannot use the `?` operator in a function that returns `{Self}`"), + on(all(from_method="into_result", from_desugaring="?"), + message="the `?` operator can only be applied to values \ + that implement `{Try}`", + label="the `?` operator cannot be applied to type `{Self}`") +))] pub trait Try { /// The type of this value when viewed as successful. #[unstable(feature = "try_trait", issue = "42327")] diff --git a/src/libcore/ops/unsize.rs b/src/libcore/ops/unsize.rs index 58da290cfb..cd896859b1 100644 --- a/src/libcore/ops/unsize.rs +++ b/src/libcore/ops/unsize.rs @@ -42,7 +42,7 @@ use marker::Unsize; /// [unsize]: ../marker/trait.Unsize.html /// [nomicon-coerce]: ../../nomicon/coercions.html #[unstable(feature = "coerce_unsized", issue = "27732")] -#[lang="coerce_unsized"] +#[lang = "coerce_unsized"] pub trait CoerceUnsized { // Empty. } diff --git a/src/libcore/option.rs b/src/libcore/option.rs index aecf2ee932..980ea551f0 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -146,7 +146,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use iter::{FromIterator, FusedIterator, TrustedLen}; -use mem; +use {mem, ops}; // Note that this is not a lang item per se, but it has a hidden dependency on // `Iterator`, which is one. The compiler assumes that the `next` method of @@ -774,6 +774,26 @@ impl<'a, T: Clone> Option<&'a T> { } } +impl<'a, T: Clone> Option<&'a mut T> { + /// Maps an `Option<&mut T>` to an `Option` by cloning the contents of the + /// option. + /// + /// # Examples + /// + /// ``` + /// #![feature(option_ref_mut_cloned)] + /// let mut x = 12; + /// let opt_x = Some(&mut x); + /// assert_eq!(opt_x, Some(&mut 12)); + /// let cloned = opt_x.cloned(); + /// assert_eq!(cloned, Some(12)); + /// ``` + #[unstable(feature = "option_ref_mut_cloned", issue = "43738")] + pub fn cloned(self) -> Option { + self.map(|t| t.clone()) + } +} + impl Option { /// Returns the contained value or a default /// @@ -1103,3 +1123,29 @@ impl> FromIterator> for Option { } } } + +/// The error type that results from applying the try operator (`?`) to a `None` value. If you wish +/// to allow `x?` (where `x` is an `Option`) to be converted into your error type, you can +/// implement `impl From` for `YourErrorType`. In that case, `x?` within a function that +/// returns `Result<_, YourErrorType>` will translate a `None` value into an `Err` result. +#[unstable(feature = "try_trait", issue = "42327")] +#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] +pub struct NoneError; + +#[unstable(feature = "try_trait", issue = "42327")] +impl ops::Try for Option { + type Ok = T; + type Error = NoneError; + + fn into_result(self) -> Result { + self.ok_or(NoneError) + } + + fn from_ok(v: T) -> Self { + Some(v) + } + + fn from_error(_: NoneError) -> Self { + None + } +} diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index 63e9373e93..3d6abbb7e4 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -27,8 +27,6 @@ use nonzero::NonZero; use cmp::Ordering::{self, Less, Equal, Greater}; -// FIXME #19649: intrinsic docs don't render, so these have no docs :( - #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::copy_nonoverlapping; @@ -51,12 +49,12 @@ pub use intrinsics::write_bytes; /// as the compiler doesn't need to prove that it's sound to elide the /// copy. /// -/// # Undefined Behavior +/// # Safety /// /// This has all the same safety problems as `ptr::read` with respect to /// invalid pointers, types, and double drops. #[stable(feature = "drop_in_place", since = "1.8.0")] -#[lang="drop_in_place"] +#[lang = "drop_in_place"] #[allow(unconditional_recursion)] pub unsafe fn drop_in_place(to_drop: *mut T) { // Code here does not matter - this is replaced by the @@ -76,6 +74,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_null"))] pub const fn null() -> *const T { 0 as *const T } /// Creates a null mutable raw pointer. @@ -90,6 +89,7 @@ pub const fn null() -> *const T { 0 as *const T } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_null_mut"))] pub const fn null_mut() -> *mut T { 0 as *mut T } /// Swaps the values at two mutable locations of the same type, without @@ -525,15 +525,41 @@ impl *const T { } } - /// Calculates the offset from a pointer. `count` is in units of T; e.g. a - /// `count` of 3 represents a pointer offset of `3 * size_of::()` bytes. + /// Calculates the offset from a pointer. + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. /// /// # Safety /// - /// Both the starting and resulting pointer must be either in bounds or one - /// byte past the end of an allocated object. If either pointer is out of - /// bounds or arithmetic overflow occurs then - /// any further use of the returned value will result in undefined behavior. + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of an allocated object. + /// + /// * The computed offset, **in bytes**, cannot overflow or underflow an + /// `isize`. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().offset(vec.len() as isize)` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 2^63 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using `wrapping_offset` instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. /// /// # Examples /// @@ -555,6 +581,7 @@ impl *const T { } /// Calculates the offset from a pointer using wrapping arithmetic. + /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::()` bytes. /// @@ -630,6 +657,448 @@ impl *const T { Some(diff / size as isize) } } + + /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of an allocated object. + /// + /// * The computed offset, **in bytes**, cannot overflow or underflow an + /// `isize`. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum must fit in a `usize`. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 2^63 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using `wrapping_offset` instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let s: &str = "123"; + /// let ptr: *const u8 = s.as_ptr(); + /// + /// unsafe { + /// println!("{}", *ptr.add(1) as char); + /// println!("{}", *ptr.add(2) as char); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn add(self, count: usize) -> Self + where T: Sized, + { + self.offset(count as isize) + } + + /// Calculates the offset from a pointer (convenience for + /// `.offset((count as isize).wrapping_neg())`). + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of an allocated object. + /// + /// * The computed offset cannot exceed `isize::MAX` **bytes**. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum must fit in a usize. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 2^63 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using `wrapping_offset` instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let s: &str = "123"; + /// + /// unsafe { + /// let end: *const u8 = s.as_ptr().add(3); + /// println!("{}", *end.sub(1) as char); + /// println!("{}", *end.sub(2) as char); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn sub(self, count: usize) -> Self + where T: Sized, + { + self.offset((count as isize).wrapping_neg()) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// (convenience for `.wrapping_offset(count as isize)`) + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// The resulting pointer does not need to be in bounds, but it is + /// potentially hazardous to dereference (which requires `unsafe`). + /// + /// Always use `.add(count)` instead when possible, because `add` + /// allows the compiler to optimize better. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// // Iterate using a raw pointer in increments of two elements + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let step = 2; + /// let end_rounded_up = ptr.wrapping_add(6); + /// + /// // This loop prints "1, 3, 5, " + /// while ptr != end_rounded_up { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_add(step); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub fn wrapping_add(self, count: usize) -> Self + where T: Sized, + { + self.wrapping_offset(count as isize) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// The resulting pointer does not need to be in bounds, but it is + /// potentially hazardous to dereference (which requires `unsafe`). + /// + /// Always use `.sub(count)` instead when possible, because `sub` + /// allows the compiler to optimize better. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// // Iterate using a raw pointer in increments of two elements (backwards) + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let start_rounded_down = ptr.wrapping_sub(2); + /// ptr = ptr.wrapping_add(4); + /// let step = 2; + /// // This loop prints "5, 3, 1, " + /// while ptr != start_rounded_down { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_sub(step); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub fn wrapping_sub(self, count: usize) -> Self + where T: Sized, + { + self.wrapping_offset((count as isize).wrapping_neg()) + } + + /// Reads the value from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// # Safety + /// + /// Beyond accepting a raw pointer, this is unsafe because it semantically + /// moves the value out of `self` without preventing further usage of `self`. + /// If `T` is not `Copy`, then care must be taken to ensure that the value at + /// `self` is not used before the data is overwritten again (e.g. with `write`, + /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use + /// because it will attempt to drop the value previously at `*self`. + /// + /// The pointer must be aligned; use `read_unaligned` if that is not the case. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let x = 12; + /// let y = &x as *const i32; + /// + /// unsafe { + /// assert_eq!(y.read(), 12); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn read(self) -> T + where T: Sized, + { + read(self) + } + + /// Performs a volatile read of the value from `self` without moving it. This + /// leaves the memory in `self` unchanged. + /// + /// Volatile operations are intended to act on I/O memory, and are guaranteed + /// to not be elided or reordered by the compiler across other volatile + /// operations. + /// + /// # Notes + /// + /// Rust does not currently have a rigorously and formally defined memory model, + /// so the precise semantics of what "volatile" means here is subject to change + /// over time. That being said, the semantics will almost always end up pretty + /// similar to [C11's definition of volatile][c11]. + /// + /// The compiler shouldn't change the relative order or number of volatile + /// memory operations. However, volatile memory operations on zero-sized types + /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops + /// and may be ignored. + /// + /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf + /// + /// # Safety + /// + /// Beyond accepting a raw pointer, this is unsafe because it semantically + /// moves the value out of `self` without preventing further usage of `self`. + /// If `T` is not `Copy`, then care must be taken to ensure that the value at + /// `self` is not used before the data is overwritten again (e.g. with `write`, + /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use + /// because it will attempt to drop the value previously at `*self`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let x = 12; + /// let y = &x as *const i32; + /// + /// unsafe { + /// assert_eq!(y.read_volatile(), 12); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn read_volatile(self) -> T + where T: Sized, + { + read_volatile(self) + } + + /// Reads the value from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// Unlike `read`, the pointer may be unaligned. + /// + /// # Safety + /// + /// Beyond accepting a raw pointer, this is unsafe because it semantically + /// moves the value out of `self` without preventing further usage of `self`. + /// If `T` is not `Copy`, then care must be taken to ensure that the value at + /// `self` is not used before the data is overwritten again (e.g. with `write`, + /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use + /// because it will attempt to drop the value previously at `*self`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let x = 12; + /// let y = &x as *const i32; + /// + /// unsafe { + /// assert_eq!(y.read_unaligned(), 12); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn read_unaligned(self) -> T + where T: Sized, + { + read_unaligned(self) + } + + /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// and destination may overlap. + /// + /// NOTE: this has the *same* argument order as `ptr::copy`. + /// + /// This is semantically equivalent to C's `memmove`. + /// + /// # Safety + /// + /// Care must be taken with the ownership of `self` and `dest`. + /// This method semantically moves the values of `self` into `dest`. + /// However it does not drop the contents of `self`, or prevent the contents + /// of `dest` from being dropped or used. + /// + /// # Examples + /// + /// Efficiently create a Rust vector from an unsafe buffer: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// # #[allow(dead_code)] + /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { + /// let mut dst = Vec::with_capacity(elts); + /// dst.set_len(elts); + /// ptr.copy_to(dst.as_mut_ptr(), elts); + /// dst + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn copy_to(self, dest: *mut T, count: usize) + where T: Sized, + { + copy(self, dest, count) + } + + /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// and destination may *not* overlap. + /// + /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`. + /// + /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`. + /// + /// # Safety + /// + /// Beyond requiring that the program must be allowed to access both regions + /// of memory, it is Undefined Behavior for source and destination to + /// overlap. Care must also be taken with the ownership of `self` and + /// `self`. This method semantically moves the values of `self` into `dest`. + /// However it does not drop the contents of `dest`, or prevent the contents + /// of `self` from being dropped or used. + /// + /// # Examples + /// + /// Efficiently create a Rust vector from an unsafe buffer: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// # #[allow(dead_code)] + /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { + /// let mut dst = Vec::with_capacity(elts); + /// dst.set_len(elts); + /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts); + /// dst + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) + where T: Sized, + { + copy_nonoverlapping(self, dest, count) + } + + /// Computes the byte offset that needs to be applied in order to + /// make the pointer aligned to `align`. + /// If it is not possible to align the pointer, the implementation returns + /// `usize::max_value()`. + /// + /// There are no guarantees whatsover that offsetting the pointer will not + /// overflow or go beyond the allocation that the pointer points into. + /// It is up to the caller to ensure that the returned offset is correct + /// in all terms other than alignment. + /// + /// # Examples + /// + /// Accessing adjacent `u8` as `u16` + /// + /// ``` + /// # #![feature(align_offset)] + /// # fn foo(n: usize) { + /// # use std::mem::align_of; + /// # unsafe { + /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; + /// let ptr = &x[n] as *const u8; + /// let offset = ptr.align_offset(align_of::()); + /// if offset < x.len() - n - 1 { + /// let u16_ptr = ptr.offset(offset as isize) as *const u16; + /// assert_ne!(*u16_ptr, 500); + /// } else { + /// // while the pointer can be aligned via `offset`, it would point + /// // outside the allocation + /// } + /// # } } + /// ``` + #[unstable(feature = "align_offset", issue = "44488")] + pub fn align_offset(self, align: usize) -> usize { + unsafe { + intrinsics::align_offset(self as *const _, align) + } + } } #[lang = "mut_ptr"] @@ -687,14 +1156,41 @@ impl *mut T { } } - /// Calculates the offset from a pointer. `count` is in units of T; e.g. a - /// `count` of 3 represents a pointer offset of `3 * size_of::()` bytes. + /// Calculates the offset from a pointer. + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. /// /// # Safety /// - /// The offset must be in-bounds of the object, or one-byte-past-the-end. - /// Otherwise `offset` invokes Undefined Behavior, regardless of whether - /// the pointer is used. + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of an allocated object. + /// + /// * The computed offset, **in bytes**, cannot overflow or underflow an + /// `isize`. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().offset(vec.len() as isize)` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 2^63 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using `wrapping_offset` instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. /// /// # Examples /// @@ -821,14 +1317,753 @@ impl *mut T { Some(diff / size as isize) } } -} -// Equality for pointers -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq for *const T { - #[inline] - fn eq(&self, other: &*const T) -> bool { *self == *other } -} + /// Computes the byte offset that needs to be applied in order to + /// make the pointer aligned to `align`. + /// If it is not possible to align the pointer, the implementation returns + /// `usize::max_value()`. + /// + /// There are no guarantees whatsover that offsetting the pointer will not + /// overflow or go beyond the allocation that the pointer points into. + /// It is up to the caller to ensure that the returned offset is correct + /// in all terms other than alignment. + /// + /// # Examples + /// + /// Accessing adjacent `u8` as `u16` + /// + /// ``` + /// # #![feature(align_offset)] + /// # fn foo(n: usize) { + /// # use std::mem::align_of; + /// # unsafe { + /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; + /// let ptr = &x[n] as *const u8; + /// let offset = ptr.align_offset(align_of::()); + /// if offset < x.len() - n - 1 { + /// let u16_ptr = ptr.offset(offset as isize) as *const u16; + /// assert_ne!(*u16_ptr, 500); + /// } else { + /// // while the pointer can be aligned via `offset`, it would point + /// // outside the allocation + /// } + /// # } } + /// ``` + #[unstable(feature = "align_offset", issue = "44488")] + pub fn align_offset(self, align: usize) -> usize { + unsafe { + intrinsics::align_offset(self as *const _, align) + } + } + + /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of an allocated object. + /// + /// * The computed offset, **in bytes**, cannot overflow or underflow an + /// `isize`. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum must fit in a `usize`. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 2^63 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using `wrapping_offset` instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let s: &str = "123"; + /// let ptr: *const u8 = s.as_ptr(); + /// + /// unsafe { + /// println!("{}", *ptr.add(1) as char); + /// println!("{}", *ptr.add(2) as char); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn add(self, count: usize) -> Self + where T: Sized, + { + self.offset(count as isize) + } + + /// Calculates the offset from a pointer (convenience for + /// `.offset((count as isize).wrapping_neg())`). + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of an allocated object. + /// + /// * The computed offset cannot exceed `isize::MAX` **bytes**. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum must fit in a usize. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 2^63 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using `wrapping_offset` instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let s: &str = "123"; + /// + /// unsafe { + /// let end: *const u8 = s.as_ptr().add(3); + /// println!("{}", *end.sub(1) as char); + /// println!("{}", *end.sub(2) as char); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn sub(self, count: usize) -> Self + where T: Sized, + { + self.offset((count as isize).wrapping_neg()) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// (convenience for `.wrapping_offset(count as isize)`) + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// The resulting pointer does not need to be in bounds, but it is + /// potentially hazardous to dereference (which requires `unsafe`). + /// + /// Always use `.add(count)` instead when possible, because `add` + /// allows the compiler to optimize better. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// // Iterate using a raw pointer in increments of two elements + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let step = 2; + /// let end_rounded_up = ptr.wrapping_add(6); + /// + /// // This loop prints "1, 3, 5, " + /// while ptr != end_rounded_up { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_add(step); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub fn wrapping_add(self, count: usize) -> Self + where T: Sized, + { + self.wrapping_offset(count as isize) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) + /// + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// The resulting pointer does not need to be in bounds, but it is + /// potentially hazardous to dereference (which requires `unsafe`). + /// + /// Always use `.sub(count)` instead when possible, because `sub` + /// allows the compiler to optimize better. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// // Iterate using a raw pointer in increments of two elements (backwards) + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let start_rounded_down = ptr.wrapping_sub(2); + /// ptr = ptr.wrapping_add(4); + /// let step = 2; + /// // This loop prints "5, 3, 1, " + /// while ptr != start_rounded_down { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_sub(step); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub fn wrapping_sub(self, count: usize) -> Self + where T: Sized, + { + self.wrapping_offset((count as isize).wrapping_neg()) + } + + /// Reads the value from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// # Safety + /// + /// Beyond accepting a raw pointer, this is unsafe because it semantically + /// moves the value out of `self` without preventing further usage of `self`. + /// If `T` is not `Copy`, then care must be taken to ensure that the value at + /// `self` is not used before the data is overwritten again (e.g. with `write`, + /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use + /// because it will attempt to drop the value previously at `*self`. + /// + /// The pointer must be aligned; use `read_unaligned` if that is not the case. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let x = 12; + /// let y = &x as *const i32; + /// + /// unsafe { + /// assert_eq!(y.read(), 12); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn read(self) -> T + where T: Sized, + { + read(self) + } + + /// Performs a volatile read of the value from `self` without moving it. This + /// leaves the memory in `self` unchanged. + /// + /// Volatile operations are intended to act on I/O memory, and are guaranteed + /// to not be elided or reordered by the compiler across other volatile + /// operations. + /// + /// # Notes + /// + /// Rust does not currently have a rigorously and formally defined memory model, + /// so the precise semantics of what "volatile" means here is subject to change + /// over time. That being said, the semantics will almost always end up pretty + /// similar to [C11's definition of volatile][c11]. + /// + /// The compiler shouldn't change the relative order or number of volatile + /// memory operations. However, volatile memory operations on zero-sized types + /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops + /// and may be ignored. + /// + /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf + /// + /// # Safety + /// + /// Beyond accepting a raw pointer, this is unsafe because it semantically + /// moves the value out of `self` without preventing further usage of `self`. + /// If `T` is not `Copy`, then care must be taken to ensure that the value at + /// `src` is not used before the data is overwritten again (e.g. with `write`, + /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use + /// because it will attempt to drop the value previously at `*self`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let x = 12; + /// let y = &x as *const i32; + /// + /// unsafe { + /// assert_eq!(y.read_volatile(), 12); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn read_volatile(self) -> T + where T: Sized, + { + read_volatile(self) + } + + /// Reads the value from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// Unlike `read`, the pointer may be unaligned. + /// + /// # Safety + /// + /// Beyond accepting a raw pointer, this is unsafe because it semantically + /// moves the value out of `self` without preventing further usage of `self`. + /// If `T` is not `Copy`, then care must be taken to ensure that the value at + /// `self` is not used before the data is overwritten again (e.g. with `write`, + /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use + /// because it will attempt to drop the value previously at `*self`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let x = 12; + /// let y = &x as *const i32; + /// + /// unsafe { + /// assert_eq!(y.read_unaligned(), 12); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn read_unaligned(self) -> T + where T: Sized, + { + read_unaligned(self) + } + + /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// and destination may overlap. + /// + /// NOTE: this has the *same* argument order as `ptr::copy`. + /// + /// This is semantically equivalent to C's `memmove`. + /// + /// # Safety + /// + /// Care must be taken with the ownership of `self` and `dest`. + /// This method semantically moves the values of `self` into `dest`. + /// However it does not drop the contents of `self`, or prevent the contents + /// of `dest` from being dropped or used. + /// + /// # Examples + /// + /// Efficiently create a Rust vector from an unsafe buffer: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// # #[allow(dead_code)] + /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { + /// let mut dst = Vec::with_capacity(elts); + /// dst.set_len(elts); + /// ptr.copy_to(dst.as_mut_ptr(), elts); + /// dst + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn copy_to(self, dest: *mut T, count: usize) + where T: Sized, + { + copy(self, dest, count) + } + + /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// and destination may *not* overlap. + /// + /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`. + /// + /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`. + /// + /// # Safety + /// + /// Beyond requiring that the program must be allowed to access both regions + /// of memory, it is Undefined Behavior for source and destination to + /// overlap. Care must also be taken with the ownership of `self` and + /// `self`. This method semantically moves the values of `self` into `dest`. + /// However it does not drop the contents of `dest`, or prevent the contents + /// of `self` from being dropped or used. + /// + /// # Examples + /// + /// Efficiently create a Rust vector from an unsafe buffer: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// # #[allow(dead_code)] + /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { + /// let mut dst = Vec::with_capacity(elts); + /// dst.set_len(elts); + /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts); + /// dst + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) + where T: Sized, + { + copy_nonoverlapping(self, dest, count) + } + + /// Copies `count * size_of` bytes from `src` to `self`. The source + /// and destination may overlap. + /// + /// NOTE: this has the *opposite* argument order of `ptr::copy`. + /// + /// This is semantically equivalent to C's `memmove`. + /// + /// # Safety + /// + /// Care must be taken with the ownership of `src` and `self`. + /// This method semantically moves the values of `src` into `self`. + /// However it does not drop the contents of `self`, or prevent the contents + /// of `src` from being dropped or used. + /// + /// # Examples + /// + /// Efficiently create a Rust vector from an unsafe buffer: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// # #[allow(dead_code)] + /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { + /// let mut dst = Vec::with_capacity(elts); + /// dst.set_len(elts); + /// dst.as_mut_ptr().copy_from(ptr, elts); + /// dst + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn copy_from(self, src: *const T, count: usize) + where T: Sized, + { + copy(src, self, count) + } + + /// Copies `count * size_of` bytes from `src` to `self`. The source + /// and destination may *not* overlap. + /// + /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`. + /// + /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`. + /// + /// # Safety + /// + /// Beyond requiring that the program must be allowed to access both regions + /// of memory, it is Undefined Behavior for source and destination to + /// overlap. Care must also be taken with the ownership of `src` and + /// `self`. This method semantically moves the values of `src` into `self`. + /// However it does not drop the contents of `self`, or prevent the contents + /// of `src` from being dropped or used. + /// + /// # Examples + /// + /// Efficiently create a Rust vector from an unsafe buffer: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// # #[allow(dead_code)] + /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { + /// let mut dst = Vec::with_capacity(elts); + /// dst.set_len(elts); + /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts); + /// dst + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize) + where T: Sized, + { + copy_nonoverlapping(src, self, count) + } + + /// Executes the destructor (if any) of the pointed-to value. + /// + /// This has two use cases: + /// + /// * It is *required* to use `drop_in_place` to drop unsized types like + /// trait objects, because they can't be read out onto the stack and + /// dropped normally. + /// + /// * It is friendlier to the optimizer to do this over `ptr::read` when + /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec), + /// as the compiler doesn't need to prove that it's sound to elide the + /// copy. + /// + /// # Safety + /// + /// This has all the same safety problems as `ptr::read` with respect to + /// invalid pointers, types, and double drops. + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn drop_in_place(self) { + drop_in_place(self) + } + + /// Overwrites a memory location with the given value without reading or + /// dropping the old value. + /// + /// # Safety + /// + /// This operation is marked unsafe because it writes through a raw pointer. + /// + /// It does not drop the contents of `self`. This is safe, but it could leak + /// allocations or resources, so care must be taken not to overwrite an object + /// that should be dropped. + /// + /// Additionally, it does not drop `val`. Semantically, `val` is moved into the + /// location pointed to by `self`. + /// + /// This is appropriate for initializing uninitialized memory, or overwriting + /// memory that has previously been `read` from. + /// + /// The pointer must be aligned; use `write_unaligned` if that is not the case. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let mut x = 0; + /// let y = &mut x as *mut i32; + /// let z = 12; + /// + /// unsafe { + /// y.write(z); + /// assert_eq!(y.read(), 12); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn write(self, val: T) + where T: Sized, + { + write(self, val) + } + + /// Invokes memset on the specified pointer, setting `count * size_of::()` + /// bytes of memory starting at `self` to `val`. + /// + /// # Examples + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let mut vec = vec![0; 4]; + /// unsafe { + /// let vec_ptr = vec.as_mut_ptr(); + /// vec_ptr.write_bytes(b'a', 2); + /// } + /// assert_eq!(vec, [b'a', b'a', 0, 0]); + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn write_bytes(self, val: u8, count: usize) + where T: Sized, + { + write_bytes(self, val, count) + } + + /// Performs a volatile write of a memory location with the given value without + /// reading or dropping the old value. + /// + /// Volatile operations are intended to act on I/O memory, and are guaranteed + /// to not be elided or reordered by the compiler across other volatile + /// operations. + /// + /// # Notes + /// + /// Rust does not currently have a rigorously and formally defined memory model, + /// so the precise semantics of what "volatile" means here is subject to change + /// over time. That being said, the semantics will almost always end up pretty + /// similar to [C11's definition of volatile][c11]. + /// + /// The compiler shouldn't change the relative order or number of volatile + /// memory operations. However, volatile memory operations on zero-sized types + /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops + /// and may be ignored. + /// + /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf + /// + /// # Safety + /// + /// This operation is marked unsafe because it accepts a raw pointer. + /// + /// It does not drop the contents of `self`. This is safe, but it could leak + /// allocations or resources, so care must be taken not to overwrite an object + /// that should be dropped. + /// + /// This is appropriate for initializing uninitialized memory, or overwriting + /// memory that has previously been `read` from. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let mut x = 0; + /// let y = &mut x as *mut i32; + /// let z = 12; + /// + /// unsafe { + /// y.write_volatile(z); + /// assert_eq!(y.read_volatile(), 12); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn write_volatile(self, val: T) + where T: Sized, + { + write_volatile(self, val) + } + + /// Overwrites a memory location with the given value without reading or + /// dropping the old value. + /// + /// Unlike `write`, the pointer may be unaligned. + /// + /// # Safety + /// + /// This operation is marked unsafe because it writes through a raw pointer. + /// + /// It does not drop the contents of `self`. This is safe, but it could leak + /// allocations or resources, so care must be taken not to overwrite an object + /// that should be dropped. + /// + /// Additionally, it does not drop `src`. Semantically, `src` is moved into the + /// location pointed to by `dst`. + /// + /// This is appropriate for initializing uninitialized memory, or overwriting + /// memory that has previously been `read` from. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(pointer_methods)] + /// + /// let mut x = 0; + /// let y = &mut x as *mut i32; + /// let z = 12; + /// + /// unsafe { + /// y.write_unaligned(z); + /// assert_eq!(y.read_unaligned(), 12); + /// } + /// ``` + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn write_unaligned(self, val: T) + where T: Sized, + { + write_unaligned(self, val) + } + + /// Replaces the value at `self` with `src`, returning the old + /// value, without dropping either. + /// + /// # Safety + /// + /// This is only unsafe because it accepts a raw pointer. + /// Otherwise, this operation is identical to `mem::replace`. + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn replace(self, src: T) -> T + where T: Sized, + { + replace(self, src) + } + + /// Swaps the values at two mutable locations of the same type, without + /// deinitializing either. They may overlap, unlike `mem::swap` which is + /// otherwise equivalent. + /// + /// # Safety + /// + /// This function copies the memory through the raw pointers passed to it + /// as arguments. + /// + /// Ensure that these pointers are valid before calling `swap`. + #[unstable(feature = "pointer_methods", issue = "43941")] + #[inline] + pub unsafe fn swap(self, with: *mut T) + where T: Sized, + { + swap(self, with) + } +} + +// Equality for pointers +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialEq for *const T { + #[inline] + fn eq(&self, other: &*const T) -> bool { *self == *other } +} #[stable(feature = "rust1", since = "1.0.0")] impl Eq for *const T {} @@ -875,36 +2110,9 @@ pub fn eq(a: *const T, b: *const T) -> bool { a == b } -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -impl Clone for *const T { - #[inline] - fn clone(&self) -> *const T { - *self - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -impl Clone for *mut T { - #[inline] - fn clone(&self) -> *mut T { - *self - } -} - // Impls for function pointers macro_rules! fnptr_impls_safety_abi { ($FnTy: ty, $($Arg: ident),*) => { - #[stable(feature = "rust1", since = "1.0.0")] - #[cfg(stage0)] - impl Clone for $FnTy { - #[inline] - fn clone(&self) -> Self { - *self - } - } - #[stable(feature = "fnptr_impls", since = "1.4.0")] impl PartialEq for $FnTy { #[inline] @@ -1124,6 +2332,8 @@ impl Unique { /// # Safety /// /// `ptr` must be non-null. + #[unstable(feature = "unique", issue = "27730")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_unique_new"))] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { Unique { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData } } @@ -1257,6 +2467,8 @@ impl Shared { /// # Safety /// /// `ptr` must be non-null. + #[unstable(feature = "shared", issue = "27730")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_shared_new"))] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { Shared { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData } } diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index 31d8266510..5039bef631 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -16,6 +16,9 @@ #![stable(feature = "rust1", since = "1.0.0")] +// FIXME: after next stage0, change RangeInclusive { ... } back to ..= +use ops::RangeInclusive; + // How this module is organized. // // The library infrastructure for slices is fairly messy. There's @@ -35,7 +38,6 @@ // * The `raw` and `bytes` submodules. // * Boilerplate trait implementations. -use borrow::Borrow; use cmp::Ordering::{self, Less, Equal, Greater}; use cmp; use fmt; @@ -122,19 +124,17 @@ pub trait SliceExt { fn as_ptr(&self) -> *const Self::Item; #[stable(feature = "core", since = "1.6.0")] - fn binary_search(&self, x: &Q) -> Result - where Self::Item: Borrow, - Q: Ord; + fn binary_search(&self, x: &Self::Item) -> Result + where Self::Item: Ord; #[stable(feature = "core", since = "1.6.0")] fn binary_search_by<'a, F>(&'a self, f: F) -> Result where F: FnMut(&'a Self::Item) -> Ordering; #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")] - fn binary_search_by_key<'a, B, F, Q: ?Sized>(&'a self, b: &Q, f: F) -> Result + fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result where F: FnMut(&'a Self::Item) -> B, - B: Borrow, - Q: Ord; + B: Ord; #[stable(feature = "core", since = "1.6.0")] fn len(&self) -> usize; @@ -635,11 +635,10 @@ impl SliceExt for [T] { m >= n && needle == &self[m-n..] } - fn binary_search(&self, x: &Q) -> Result - where T: Borrow, - Q: Ord + fn binary_search(&self, x: &T) -> Result + where T: Ord { - self.binary_search_by(|p| p.borrow().cmp(x)) + self.binary_search_by(|p| p.cmp(x)) } fn rotate(&mut self, mid: usize) { @@ -687,12 +686,11 @@ impl SliceExt for [T] { } #[inline] - fn binary_search_by_key<'a, B, F, Q: ?Sized>(&'a self, b: &Q, mut f: F) -> Result + fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result where F: FnMut(&'a Self::Item) -> B, - B: Borrow, - Q: Ord + B: Ord { - self.binary_search_by(|k| f(k).borrow().cmp(b)) + self.binary_search_by(|k| f(k).cmp(b)) } #[inline] @@ -1049,32 +1047,32 @@ impl SliceIndex<[T]> for ops::RangeToInclusive { #[inline] fn get(self, slice: &[T]) -> Option<&[T]> { - (0...self.end).get(slice) + (RangeInclusive { start: 0, end: self.end }).get(slice) } #[inline] fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> { - (0...self.end).get_mut(slice) + (RangeInclusive { start: 0, end: self.end }).get_mut(slice) } #[inline] unsafe fn get_unchecked(self, slice: &[T]) -> &[T] { - (0...self.end).get_unchecked(slice) + (RangeInclusive { start: 0, end: self.end }).get_unchecked(slice) } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] { - (0...self.end).get_unchecked_mut(slice) + (RangeInclusive { start: 0, end: self.end }).get_unchecked_mut(slice) } #[inline] fn index(self, slice: &[T]) -> &[T] { - (0...self.end).index(slice) + (RangeInclusive { start: 0, end: self.end }).index(slice) } #[inline] fn index_mut(self, slice: &mut [T]) -> &mut [T] { - (0...self.end).index_mut(slice) + (RangeInclusive { start: 0, end: self.end }).index_mut(slice) } } @@ -1656,7 +1654,7 @@ impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for Split<'a, T, P> where P: FnMut(&T } } -// FIXME(#19839) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T, P> Clone for Split<'a, T, P> where P: Clone + FnMut(&T) -> bool { fn clone(&self) -> Split<'a, T, P> { @@ -2095,7 +2093,7 @@ pub struct Windows<'a, T:'a> { size: usize } -// FIXME(#19839) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Windows<'a, T> { fn clone(&self) -> Windows<'a, T> { @@ -2197,7 +2195,7 @@ pub struct Chunks<'a, T:'a> { size: usize } -// FIXME(#19839) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Chunks<'a, T> { fn clone(&self) -> Chunks<'a, T> { diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index a5f6e49a53..0af9fcf0a3 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -18,9 +18,9 @@ use self::pattern::Pattern; use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use char; -use convert::TryFrom; use fmt; -use iter::{Map, Cloned, FusedIterator}; +use iter::{Map, Cloned, FusedIterator, TrustedLen}; +use iter_private::TrustedRandomAccess; use slice::{self, SliceIndex}; use mem; @@ -301,6 +301,37 @@ pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> { } /// Converts a mutable slice of bytes to a mutable string slice. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::str; +/// +/// // "Hello, Rust!" as a mutable vector +/// let mut hellorust = vec![72, 101, 108, 108, 111, 44, 32, 82, 117, 115, 116, 33]; +/// +/// // As we know these bytes are valid, we can use `unwrap()` +/// let outstr = str::from_utf8_mut(&mut hellorust).unwrap(); +/// +/// assert_eq!("Hello, Rust!", outstr); +/// ``` +/// +/// Incorrect bytes: +/// +/// ``` +/// use std::str; +/// +/// // Some invalid bytes in a mutable vector +/// let mut invalid = vec![128, 223]; +/// +/// assert!(str::from_utf8_mut(&mut invalid).is_err()); +/// ``` +/// See the docs for [`Utf8Error`][error] for more details on the kinds of +/// errors that can be returned. +/// +/// [error]: struct.Utf8Error.html #[stable(feature = "str_mut_extras", since = "1.20.0")] pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> { run_utf8_validation(v)?; @@ -372,7 +403,7 @@ unsafe fn from_raw_parts_mut<'a>(p: *mut u8, len: usize) -> &'a mut str { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn from_utf8_unchecked(v: &[u8]) -> &str { - mem::transmute(v) + &*(v as *const [u8] as *const str) } /// Converts a slice of bytes to a string slice without checking @@ -381,10 +412,23 @@ pub unsafe fn from_utf8_unchecked(v: &[u8]) -> &str { /// See the immutable version, [`from_utf8_unchecked()`][fromutf8], for more information. /// /// [fromutf8]: fn.from_utf8_unchecked.html +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::str; +/// +/// let mut heart = vec![240, 159, 146, 150]; +/// let heart = unsafe { str::from_utf8_unchecked_mut(&mut heart) }; +/// +/// assert_eq!("💖", heart); +/// ``` #[inline] #[stable(feature = "str_mut_extras", since = "1.20.0")] pub unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str { - mem::transmute(v) + &mut *(v as *mut [u8] as *mut str) } #[stable(feature = "rust1", since = "1.0.0")] @@ -774,6 +818,17 @@ impl<'a> ExactSizeIterator for Bytes<'a> { #[unstable(feature = "fused", issue = "35602")] impl<'a> FusedIterator for Bytes<'a> {} +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a> TrustedLen for Bytes<'a> {} + +#[doc(hidden)] +unsafe impl<'a> TrustedRandomAccess for Bytes<'a> { + unsafe fn get_unchecked(&mut self, i: usize) -> u8 { + self.0.get_unchecked(i) + } + fn may_have_side_effect() -> bool { false } +} + /// This macro generates a Clone impl for string pattern API /// wrapper types of the form X<'a, P> macro_rules! derive_pattern_clone { @@ -1355,9 +1410,6 @@ Section: Comparing strings */ /// Bytewise slice equality -/// NOTE: This function is (ab)used in rustc::middle::trans::_match -/// to compare &[u8] byte slices that are not necessarily valid UTF-8. -#[lang = "str_eq"] #[inline] fn eq_slice(a: &str, b: &str) -> bool { a.as_bytes() == b.as_bytes() @@ -1468,7 +1520,10 @@ fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { // When the pointer is aligned, read 2 words of data per iteration // until we find a word containing a non-ascii byte. let ptr = v.as_ptr(); - let align = (ptr as usize + index) & (usize_bytes - 1); + let align = unsafe { + // the offset is safe, because `index` is guaranteed inbounds + ptr.offset(index as isize).align_offset(usize_bytes) + }; if align == 0 { while index < blocks_end { unsafe { @@ -2142,7 +2197,7 @@ pub trait StrExt { #[stable(feature = "core", since = "1.6.0")] fn is_empty(&self) -> bool; #[stable(feature = "core", since = "1.6.0")] - fn parse<'a, T: TryFrom<&'a str>>(&'a self) -> Result; + fn parse(&self) -> Result; } // truncate `&str` to length at most equal to `max` @@ -2399,12 +2454,12 @@ impl StrExt for str { #[inline] fn as_bytes(&self) -> &[u8] { - unsafe { mem::transmute(self) } + unsafe { &*(self as *const str as *const [u8]) } } #[inline] unsafe fn as_bytes_mut(&mut self) -> &mut [u8] { - mem::transmute(self) + &mut *(self as *mut str as *mut [u8]) } fn find<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option { @@ -2462,9 +2517,7 @@ impl StrExt for str { fn is_empty(&self) -> bool { self.len() == 0 } #[inline] - fn parse<'a, T>(&'a self) -> Result where T: TryFrom<&'a str> { - T::try_from(self) - } + fn parse(&self) -> Result { FromStr::from_str(self) } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 450afbe3fb..524f4508c9 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -119,7 +119,9 @@ pub fn hint_core_should_pause() /// A boolean type which can be safely shared between threads. /// -/// This type has the same in-memory representation as a `bool`. +/// This type has the same in-memory representation as a [`bool`]. +/// +/// [`bool`]: ../../../std/primitive.bool.html #[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] pub struct AtomicBool { @@ -241,15 +243,18 @@ impl AtomicBool { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_atomic_bool_new"))] pub const fn new(v: bool) -> AtomicBool { AtomicBool { v: UnsafeCell::new(v as u8) } } - /// Returns a mutable reference to the underlying `bool`. + /// Returns a mutable reference to the underlying [`bool`]. /// /// This is safe because the mutable reference guarantees that no other threads are /// concurrently accessing the atomic data. /// + /// [`bool`]: ../../../std/primitive.bool.html + /// /// # Examples /// /// ``` @@ -368,7 +373,7 @@ impl AtomicBool { unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 } } - /// Stores a value into the `bool` if the current value is the same as the `current` value. + /// Stores a value into the [`bool`] if the current value is the same as the `current` value. /// /// The return value is always the previous value. If it is equal to `current`, then the value /// was updated. @@ -377,6 +382,7 @@ impl AtomicBool { /// ordering of this operation. /// /// [`Ordering`]: enum.Ordering.html + /// [`bool`]: ../../../std/primitive.bool.html /// /// # Examples /// @@ -400,7 +406,7 @@ impl AtomicBool { } } - /// Stores a value into the `bool` if the current value is the same as the `current` value. + /// Stores a value into the [`bool`] if the current value is the same as the `current` value. /// /// The return value is a result indicating whether the new value was written and containing /// the previous value. On success this value is guaranteed to be equal to `current`. @@ -411,6 +417,7 @@ impl AtomicBool { /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must /// be equivalent or weaker than the success ordering. /// + /// [`bool`]: ../../../std/primitive.bool.html /// [`Ordering`]: enum.Ordering.html /// [`Release`]: enum.Ordering.html#variant.Release /// [`AcqRel`]: enum.Ordering.html#variant.Release @@ -451,7 +458,7 @@ impl AtomicBool { } } - /// Stores a value into the `bool` if the current value is the same as the `current` value. + /// Stores a value into the [`bool`] if the current value is the same as the `current` value. /// /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the /// comparison succeeds, which can result in more efficient code on some platforms. The @@ -464,6 +471,7 @@ impl AtomicBool { /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or /// weaker than the success ordering. /// + /// [`bool`]: ../../../std/primitive.bool.html /// [`compare_exchange`]: #method.compare_exchange /// [`Ordering`]: enum.Ordering.html /// [`Release`]: enum.Ordering.html#variant.Release @@ -649,6 +657,7 @@ impl AtomicPtr { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_atomic_ptr_new"))] pub const fn new(p: *mut T) -> AtomicPtr { AtomicPtr { p: UnsafeCell::new(p) } } @@ -920,14 +929,28 @@ impl AtomicPtr { #[cfg(target_has_atomic = "ptr")] macro_rules! atomic_int { - ($stable:meta, + ($stable:meta, $const_unstable:meta, $stable_cxchg:meta, $stable_debug:meta, $stable_access:meta, + $s_int_type:expr, $int_ref:expr, $int_type:ident $atomic_type:ident $atomic_init:ident) => { /// An integer type which can be safely shared between threads. /// - /// This type has the same in-memory representation as the underlying integer type. + /// This type has the same in-memory representation as the underlying + /// integer type, [` + #[doc = $s_int_type] + /// `]( + #[doc = $int_ref] + /// ). For more about the differences between atomic types and + /// non-atomic types, please see the [module-level documentation]. + /// + /// Please note that examples are shared between atomic variants of + /// primitive integer types, so it's normal that they are all + /// demonstrating [`AtomicIsize`]. + /// + /// [module-level documentation]: index.html + /// [`AtomicIsize`]: struct.AtomicIsize.html #[$stable] pub struct $atomic_type { v: UnsafeCell<$int_type>, @@ -969,6 +992,7 @@ macro_rules! atomic_int { /// ``` #[inline] #[$stable] + #[cfg_attr(not(stage0), $const_unstable)] pub const fn new(v: $int_type) -> Self { $atomic_type {v: UnsafeCell::new(v)} } @@ -1332,81 +1356,101 @@ macro_rules! atomic_int { #[cfg(target_has_atomic = "8")] atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), + rustc_const_unstable(feature = "const_atomic_i8_new"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + "i8", "../../../std/primitive.i8.html", i8 AtomicI8 ATOMIC_I8_INIT } #[cfg(target_has_atomic = "8")] atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), + rustc_const_unstable(feature = "const_atomic_u8_new"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + "u8", "../../../std/primitive.u8.html", u8 AtomicU8 ATOMIC_U8_INIT } #[cfg(target_has_atomic = "16")] atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), + rustc_const_unstable(feature = "const_atomic_i16_new"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + "i16", "../../../std/primitive.i16.html", i16 AtomicI16 ATOMIC_I16_INIT } #[cfg(target_has_atomic = "16")] atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), + rustc_const_unstable(feature = "const_atomic_u16_new"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + "u16", "../../../std/primitive.u16.html", u16 AtomicU16 ATOMIC_U16_INIT } #[cfg(target_has_atomic = "32")] atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), + rustc_const_unstable(feature = "const_atomic_i32_new"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + "i32", "../../../std/primitive.i32.html", i32 AtomicI32 ATOMIC_I32_INIT } #[cfg(target_has_atomic = "32")] atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), + rustc_const_unstable(feature = "const_atomic_u32_new"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + "u32", "../../../std/primitive.u32.html", u32 AtomicU32 ATOMIC_U32_INIT } #[cfg(target_has_atomic = "64")] atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), + rustc_const_unstable(feature = "const_atomic_i64_new"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + "i64", "../../../std/primitive.i64.html", i64 AtomicI64 ATOMIC_I64_INIT } #[cfg(target_has_atomic = "64")] atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), + rustc_const_unstable(feature = "const_atomic_u64_new"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + "u64", "../../../std/primitive.u64.html", u64 AtomicU64 ATOMIC_U64_INIT } #[cfg(target_has_atomic = "ptr")] atomic_int!{ stable(feature = "rust1", since = "1.0.0"), + rustc_const_unstable(feature = "const_atomic_isize_new"), stable(feature = "extended_compare_and_swap", since = "1.10.0"), stable(feature = "atomic_debug", since = "1.3.0"), stable(feature = "atomic_access", since = "1.15.0"), + "isize", "../../../std/primitive.isize.html", isize AtomicIsize ATOMIC_ISIZE_INIT } #[cfg(target_has_atomic = "ptr")] atomic_int!{ stable(feature = "rust1", since = "1.0.0"), + rustc_const_unstable(feature = "const_atomic_usize_new"), stable(feature = "extended_compare_and_swap", since = "1.10.0"), stable(feature = "atomic_debug", since = "1.3.0"), stable(feature = "atomic_access", since = "1.15.0"), + "usize", "../../../std/primitive.usize.html", usize AtomicUsize ATOMIC_USIZE_INIT } diff --git a/src/libcore/tests/char.rs b/src/libcore/tests/char.rs index 7c3b90c815..4e10ceac87 100644 --- a/src/libcore/tests/char.rs +++ b/src/libcore/tests/char.rs @@ -32,7 +32,6 @@ fn test_convert() { #[test] fn test_from_str() { assert_eq!(char::from_str("a").unwrap(), 'a'); - assert_eq!(char::try_from("a").unwrap(), 'a'); assert_eq!(char::from_str("\0").unwrap(), '\0'); assert_eq!(char::from_str("\u{D7FF}").unwrap(), '\u{d7FF}'); assert!(char::from_str("").is_err()); diff --git a/src/libcore/tests/hash/mod.rs b/src/libcore/tests/hash/mod.rs index 53ac17c052..43ba941f13 100644 --- a/src/libcore/tests/hash/mod.rs +++ b/src/libcore/tests/hash/mod.rs @@ -12,6 +12,7 @@ mod sip; use std::hash::{Hash, Hasher}; use std::default::Default; +use std::rc::Rc; struct MyHasher { hash: u64, @@ -64,12 +65,14 @@ fn test_writer_hasher() { assert_eq!(hash(& s), 97 + 0xFF); let s: Box = String::from("a").into_boxed_str(); assert_eq!(hash(& s), 97 + 0xFF); + let s: Rc<&str> = Rc::new("a"); + assert_eq!(hash(&s), 97 + 0xFF); let cs: &[u8] = &[1, 2, 3]; assert_eq!(hash(& cs), 9); let cs: Box<[u8]> = Box::new([1, 2, 3]); assert_eq!(hash(& cs), 9); - - // FIXME (#18248) Add tests for hashing Rc and Rc<[T]> + let cs: Rc<[u8]> = Rc::new([1, 2, 3]); + assert_eq!(hash(& cs), 9); let ptr = 5_usize as *const i32; assert_eq!(hash(&ptr), 5); @@ -109,3 +112,16 @@ fn test_custom_state() { assert_eq!(hash(&Custom { hash: 5 }), 5); } + +// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten. +// See https://github.com/kripken/emscripten-fastcomp/issues/169 +#[cfg(not(target_os = "emscripten"))] +#[test] +fn test_indirect_hasher() { + let mut hasher = MyHasher { hash: 0 }; + { + let mut indirect_hasher: &mut Hasher = &mut hasher; + 5u32.hash(&mut indirect_hasher); + } + assert_eq!(hasher.hash, 5); +} diff --git a/src/libcore/tests/iter.rs b/src/libcore/tests/iter.rs index ed6923929d..f8c6fc5c8f 100644 --- a/src/libcore/tests/iter.rs +++ b/src/libcore/tests/iter.rs @@ -248,6 +248,25 @@ fn test_filter_map() { assert_eq!(it.collect::>(), [0*0, 2*2, 4*4, 6*6, 8*8]); } +#[test] +fn test_filter_map_fold() { + let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let ys = [0*0, 2*2, 4*4, 6*6, 8*8]; + let it = xs.iter().filter_map(|&x| if x % 2 == 0 { Some(x*x) } else { None }); + let i = it.fold(0, |i, x| { + assert_eq!(x, ys[i]); + i + 1 + }); + assert_eq!(i, ys.len()); + + let it = xs.iter().filter_map(|&x| if x % 2 == 0 { Some(x*x) } else { None }); + let i = it.rfold(ys.len(), |i, x| { + assert_eq!(x, ys[i - 1]); + i - 1 + }); + assert_eq!(i, 0); +} + #[test] fn test_iterator_enumerate() { let xs = [0, 1, 2, 3, 4, 5]; @@ -282,7 +301,31 @@ fn test_iterator_enumerate_nth() { #[test] fn test_iterator_enumerate_count() { let xs = [0, 1, 2, 3, 4, 5]; - assert_eq!(xs.iter().count(), 6); + assert_eq!(xs.iter().enumerate().count(), 6); +} + +#[test] +fn test_iterator_enumerate_fold() { + let xs = [0, 1, 2, 3, 4, 5]; + let mut it = xs.iter().enumerate(); + // steal a couple to get an interesting offset + assert_eq!(it.next(), Some((0, &0))); + assert_eq!(it.next(), Some((1, &1))); + let i = it.fold(2, |i, (j, &x)| { + assert_eq!(i, j); + assert_eq!(x, xs[j]); + i + 1 + }); + assert_eq!(i, xs.len()); + + let mut it = xs.iter().enumerate(); + assert_eq!(it.next(), Some((0, &0))); + let i = it.rfold(xs.len() - 1, |i, (j, &x)| { + assert_eq!(i, j); + assert_eq!(x, xs[j]); + i - 1 + }); + assert_eq!(i, 0); } #[test] @@ -291,6 +334,25 @@ fn test_iterator_filter_count() { assert_eq!(xs.iter().filter(|&&x| x % 2 == 0).count(), 5); } +#[test] +fn test_iterator_filter_fold() { + let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let ys = [0, 2, 4, 6, 8]; + let it = xs.iter().filter(|&&x| x % 2 == 0); + let i = it.fold(0, |i, &x| { + assert_eq!(x, ys[i]); + i + 1 + }); + assert_eq!(i, ys.len()); + + let it = xs.iter().filter(|&&x| x % 2 == 0); + let i = it.rfold(ys.len(), |i, &x| { + assert_eq!(x, ys[i - 1]); + i - 1 + }); + assert_eq!(i, 0); +} + #[test] fn test_iterator_peekable() { let xs = vec![0, 1, 2, 3, 4, 5]; @@ -381,6 +443,18 @@ fn test_iterator_peekable_last() { assert_eq!(it.last(), None); } +#[test] +fn test_iterator_peekable_fold() { + let xs = [0, 1, 2, 3, 4, 5]; + let mut it = xs.iter().peekable(); + assert_eq!(it.peek(), Some(&&0)); + let i = it.fold(0, |i, &x| { + assert_eq!(x, xs[i]); + i + 1 + }); + assert_eq!(i, xs.len()); +} + /// This is an iterator that follows the Iterator contract, /// but it is not fused. After having returned None once, it will start /// producing elements if .next() is called again. @@ -470,6 +544,26 @@ fn test_iterator_skip_while() { assert_eq!(i, ys.len()); } +#[test] +fn test_iterator_skip_while_fold() { + let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19]; + let ys = [15, 16, 17, 19]; + let it = xs.iter().skip_while(|&x| *x < 15); + let i = it.fold(0, |i, &x| { + assert_eq!(x, ys[i]); + i + 1 + }); + assert_eq!(i, ys.len()); + + let mut it = xs.iter().skip_while(|&x| *x < 15); + assert_eq!(it.next(), Some(&ys[0])); // process skips before folding + let i = it.fold(1, |i, &x| { + assert_eq!(x, ys[i]); + i + 1 + }); + assert_eq!(i, ys.len()); +} + #[test] fn test_iterator_skip() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; @@ -566,6 +660,26 @@ fn test_iterator_skip_last() { assert_eq!(it.last(), Some(&30)); } +#[test] +fn test_iterator_skip_fold() { + let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; + let ys = [13, 15, 16, 17, 19, 20, 30]; + let it = xs.iter().skip(5); + let i = it.fold(0, |i, &x| { + assert_eq!(x, ys[i]); + i + 1 + }); + assert_eq!(i, ys.len()); + + let mut it = xs.iter().skip(5); + assert_eq!(it.next(), Some(&ys[0])); // process skips before folding + let i = it.fold(1, |i, &x| { + assert_eq!(x, ys[i]); + i + 1 + }); + assert_eq!(i, ys.len()); +} + #[test] fn test_iterator_take() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19]; @@ -654,6 +768,31 @@ fn test_iterator_flat_map() { assert_eq!(i, ys.len()); } +/// Test `FlatMap::fold` with items already picked off the front and back, +/// to make sure all parts of the `FlatMap` are folded correctly. +#[test] +fn test_iterator_flat_map_fold() { + let xs = [0, 3, 6]; + let ys = [1, 2, 3, 4, 5, 6, 7]; + let mut it = xs.iter().flat_map(|&x| x..x+3); + assert_eq!(it.next(), Some(0)); + assert_eq!(it.next_back(), Some(8)); + let i = it.fold(0, |i, x| { + assert_eq!(x, ys[i]); + i + 1 + }); + assert_eq!(i, ys.len()); + + let mut it = xs.iter().flat_map(|&x| x..x+3); + assert_eq!(it.next(), Some(0)); + assert_eq!(it.next_back(), Some(8)); + let i = it.rfold(ys.len(), |i, x| { + assert_eq!(x, ys[i - 1]); + i - 1 + }); + assert_eq!(i, 0); +} + #[test] fn test_inspect() { let xs = [1, 2, 3, 4]; @@ -668,6 +807,32 @@ fn test_inspect() { assert_eq!(&xs[..], &ys[..]); } +#[test] +fn test_inspect_fold() { + let xs = [1, 2, 3, 4]; + let mut n = 0; + { + let it = xs.iter().inspect(|_| n += 1); + let i = it.fold(0, |i, &x| { + assert_eq!(x, xs[i]); + i + 1 + }); + assert_eq!(i, xs.len()); + } + assert_eq!(n, xs.len()); + + let mut n = 0; + { + let it = xs.iter().inspect(|_| n += 1); + let i = it.rfold(xs.len(), |i, &x| { + assert_eq!(x, xs[i - 1]); + i - 1 + }); + assert_eq!(i, 0); + } + assert_eq!(n, xs.len()); +} + #[test] fn test_cycle() { let cycle_len = 3; @@ -1078,21 +1243,21 @@ fn test_range() { #[test] fn test_range_inclusive_exhaustion() { - let mut r = 10...10; + let mut r = 10..=10; assert_eq!(r.next(), Some(10)); - assert_eq!(r, 1...0); + assert_eq!(r, 1..=0); - let mut r = 10...10; + let mut r = 10..=10; assert_eq!(r.next_back(), Some(10)); - assert_eq!(r, 1...0); + assert_eq!(r, 1..=0); - let mut r = 10...12; + let mut r = 10..=12; assert_eq!(r.nth(2), Some(12)); - assert_eq!(r, 1...0); + assert_eq!(r, 1..=0); - let mut r = 10...12; + let mut r = 10..=12; assert_eq!(r.nth(5), None); - assert_eq!(r, 1...0); + assert_eq!(r, 1..=0); } @@ -1129,20 +1294,20 @@ fn test_range_from_nth() { #[test] fn test_range_inclusive_nth() { - assert_eq!((10...15).nth(0), Some(10)); - assert_eq!((10...15).nth(1), Some(11)); - assert_eq!((10...15).nth(5), Some(15)); - assert_eq!((10...15).nth(6), None); + assert_eq!((10..=15).nth(0), Some(10)); + assert_eq!((10..=15).nth(1), Some(11)); + assert_eq!((10..=15).nth(5), Some(15)); + assert_eq!((10..=15).nth(6), None); - let mut r = 10_u8...20; + let mut r = 10_u8..=20; assert_eq!(r.nth(2), Some(12)); - assert_eq!(r, 13...20); + assert_eq!(r, 13..=20); assert_eq!(r.nth(2), Some(15)); - assert_eq!(r, 16...20); + assert_eq!(r, 16..=20); assert_eq!(r.is_empty(), false); assert_eq!(r.nth(10), None); assert_eq!(r.is_empty(), true); - assert_eq!(r, 1...0); // We may not want to document/promise this detail + assert_eq!(r, 1..=0); // We may not want to document/promise this detail } #[test] @@ -1225,6 +1390,31 @@ fn test_fuse_count() { // Can't check len now because count consumes. } +#[test] +fn test_fuse_fold() { + let xs = [0, 1, 2]; + let it = xs.iter(); // `FusedIterator` + let i = it.fuse().fold(0, |i, &x| { + assert_eq!(x, xs[i]); + i + 1 + }); + assert_eq!(i, xs.len()); + + let it = xs.iter(); // `FusedIterator` + let i = it.fuse().rfold(xs.len(), |i, &x| { + assert_eq!(x, xs[i - 1]); + i - 1 + }); + assert_eq!(i, 0); + + let it = xs.iter().scan((), |_, &x| Some(x)); // `!FusedIterator` + let i = it.fuse().fold(0, |i, x| { + assert_eq!(x, xs[i]); + i + 1 + }); + assert_eq!(i, xs.len()); +} + #[test] fn test_once() { let mut it = once(42); diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs index 1ba9d78f9d..afc5de7b0e 100644 --- a/src/libcore/tests/lib.rs +++ b/src/libcore/tests/lib.rs @@ -11,7 +11,6 @@ #![deny(warnings)] #![feature(box_syntax)] -#![feature(const_fn)] #![feature(core_float)] #![feature(core_private_bignum)] #![feature(core_private_diy_float)] @@ -26,6 +25,7 @@ #![feature(inclusive_range)] #![feature(inclusive_range_syntax)] #![feature(iter_rfind)] +#![feature(iter_rfold)] #![feature(nonzero)] #![feature(rand)] #![feature(raw)] @@ -39,8 +39,13 @@ #![feature(test)] #![feature(trusted_len)] #![feature(try_from)] +#![feature(try_trait)] #![feature(unique)] +#![feature(const_atomic_bool_new)] +#![feature(const_atomic_usize_new)] +#![feature(const_atomic_isize_new)] + extern crate core; extern crate test; extern crate rand; diff --git a/src/libcore/tests/num/mod.rs b/src/libcore/tests/num/mod.rs index 400d53ce51..7eb5ff9885 100644 --- a/src/libcore/tests/num/mod.rs +++ b/src/libcore/tests/num/mod.rs @@ -8,10 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::convert::TryFrom; +use core::convert::{TryFrom, TryInto}; use core::cmp::PartialEq; use core::fmt::Debug; use core::marker::Copy; +use core::num::TryFromIntError; use core::ops::{Add, Sub, Mul, Div, Rem}; use core::option::Option; use core::option::Option::{Some, None}; @@ -134,6 +135,13 @@ fn test_empty() { assert_eq!("".parse::().ok(), None); } +#[test] +fn test_infallible_try_from_int_error() { + let func = |x: i8| -> Result { Ok(x.try_into()?) }; + + assert!(func(0).is_ok()); +} + macro_rules! test_impl_from { ($fn_name: ident, $Small: ty, $Large: ty) => { #[test] diff --git a/src/libcore/tests/option.rs b/src/libcore/tests/option.rs index 6bac55575f..22109e28ed 100644 --- a/src/libcore/tests/option.rs +++ b/src/libcore/tests/option.rs @@ -270,3 +270,30 @@ fn test_cloned() { assert_eq!(opt_ref_ref.clone().cloned(), Some(&val)); assert_eq!(opt_ref_ref.cloned().cloned(), Some(1)); } + +#[test] +fn test_try() { + fn try_option_some() -> Option { + let val = Some(1)?; + Some(val) + } + assert_eq!(try_option_some(), Some(1)); + + fn try_option_none() -> Option { + let val = None?; + Some(val) + } + assert_eq!(try_option_none(), None); + + fn try_option_ok() -> Result { + let val = Some(1)?; + Ok(val) + } + assert_eq!(try_option_ok(), Ok(1)); + + fn try_option_err() -> Result { + let val = None?; + Ok(val) + } + assert_eq!(try_option_err(), Err(NoneError)); +} diff --git a/src/libcore/tests/result.rs b/src/libcore/tests/result.rs index 4c5f19dee1..ce41bde834 100644 --- a/src/libcore/tests/result.rs +++ b/src/libcore/tests/result.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use core::option::*; + fn op1() -> Result { Ok(666) } fn op2() -> Result { Err("sadface") } @@ -202,3 +204,30 @@ pub fn test_unwrap_or_default() { assert_eq!(op1().unwrap_or_default(), 666); assert_eq!(op2().unwrap_or_default(), 0); } + +#[test] +fn test_try() { + fn try_result_some() -> Option { + let val = Ok(1)?; + Some(val) + } + assert_eq!(try_result_some(), Some(1)); + + fn try_result_none() -> Option { + let val = Err(NoneError)?; + Some(val) + } + assert_eq!(try_result_none(), None); + + fn try_result_ok() -> Result { + let val = Ok(1)?; + Ok(val) + } + assert_eq!(try_result_ok(), Ok(1)); + + fn try_result_err() -> Result { + let val = Err(1)?; + Ok(val) + } + assert_eq!(try_result_err(), Err(1)); +} diff --git a/src/libcore/tuple.rs b/src/libcore/tuple.rs index 555843dba4..4c5370194f 100644 --- a/src/libcore/tuple.rs +++ b/src/libcore/tuple.rs @@ -21,14 +21,6 @@ macro_rules! tuple_impls { } )+) => { $( - #[stable(feature = "rust1", since = "1.0.0")] - #[cfg(stage0)] - impl<$($T:Clone),+> Clone for ($($T,)+) { - fn clone(&self) -> ($($T,)+) { - ($(self.$idx.clone(),)+) - } - } - #[stable(feature = "rust1", since = "1.0.0")] impl<$($T:PartialEq),+> PartialEq for ($($T,)+) where last_type!($($T,)+): ?Sized { #[inline] diff --git a/src/libgetopts/lib.rs b/src/libgetopts/lib.rs index a0eacc817c..81fa0374f5 100644 --- a/src/libgetopts/lib.rs +++ b/src/libgetopts/lib.rs @@ -731,7 +731,9 @@ pub fn usage(brief: &str, opts: &[OptGroup]) -> String { } } - // FIXME: #5516 should be graphemes not codepoints + // FIXME(https://github.com/rust-lang-nursery/getopts/issues/7) + // should be graphemes not codepoints + // // here we just need to indent the start of the description let rowlen = row.chars().count(); if rowlen < 24 { @@ -749,14 +751,17 @@ pub fn usage(brief: &str, opts: &[OptGroup]) -> String { desc_normalized_whitespace.push(' '); } - // FIXME: #5516 should be graphemes not codepoints + // FIXME(https://github.com/rust-lang-nursery/getopts/issues/7) + // should be graphemes not codepoints let mut desc_rows = Vec::new(); each_split_within(&desc_normalized_whitespace[..], 54, |substr| { desc_rows.push(substr.to_owned()); true }); - // FIXME: #5516 should be graphemes not codepoints + // FIXME(https://github.com/rust-lang-nursery/getopts/issues/7) + // should be graphemes not codepoints + // // wrapped description row.push_str(&desc_rows.join(&desc_sep[..])); diff --git a/src/liblibc/Cargo.lock b/src/liblibc/Cargo.lock index c6598e4288..f83faad644 100644 --- a/src/liblibc/Cargo.lock +++ b/src/liblibc/Cargo.lock @@ -3,7 +3,7 @@ name = "libc-test" version = "0.1.0" dependencies = [ "ctest 0.1.4 (git+https://github.com/alexcrichton/ctest?branch=long)", - "libc 0.2.32", + "libc 0.2.31", ] [[package]] @@ -75,12 +75,12 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.31" +version = "0.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.32" +version = "0.2.31" [[package]] name = "log" @@ -119,7 +119,7 @@ name = "rand" version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)", "magenta 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -205,7 +205,7 @@ name = "syntex_errors" version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "syntex_pos 0.59.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -278,7 +278,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb" "checksum itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "d1419b2939a0bc44b77feb34661583c7546b532b192feab36249ab584b86856c" +"checksum libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)" = "2370ca07ec338939e356443dac2296f581453c35fe1e3a3ed06023c49435f915" "checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" "checksum magenta 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf0336886480e671965f794bc9b6fce88503563013d1bfb7a502c81fe3ac527" "checksum magenta-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40d014c7011ac470ae28e2f76a02bfea4a8480f73e701353b49ad7a8d75f4699" diff --git a/src/liblibc/Cargo.toml b/src/liblibc/Cargo.toml index 06782c5c85..2417da3eaf 100644 --- a/src/liblibc/Cargo.toml +++ b/src/liblibc/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "libc" -version = "0.2.32" +version = "0.2.31" authors = ["The Rust Project Developers"] license = "MIT/Apache-2.0" readme = "README.md" diff --git a/src/liblibc/README.md b/src/liblibc/README.md index c3333da147..7b2d778ea1 100644 --- a/src/liblibc/README.md +++ b/src/liblibc/README.md @@ -138,9 +138,6 @@ Tested: * [`x86_64-unknown-linux-musl`](https://doc.rust-lang.org/libc/x86_64-unknown-linux-musl/libc/) (Linux MUSL) * [`aarch64-unknown-linux-gnu`](https://doc.rust-lang.org/libc/aarch64-unknown-linux-gnu/libc/) - (Linux) - * [`aarch64-unknown-linux-musl`](https://doc.rust-lang.org/libc/aarch64-unknown-linux-musl/libc/) - (Linux MUSL) * [`mips-unknown-linux-gnu`](https://doc.rust-lang.org/libc/mips-unknown-linux-gnu/libc/) * [`arm-unknown-linux-gnueabihf`](https://doc.rust-lang.org/libc/arm-unknown-linux-gnueabihf/libc/) * [`arm-linux-androideabi`](https://doc.rust-lang.org/libc/arm-linux-androideabi/libc/) diff --git a/src/liblibc/ci/README.md b/src/liblibc/ci/README.md index aef6ef1db1..13c7c8da52 100644 --- a/src/liblibc/ci/README.md +++ b/src/liblibc/ci/README.md @@ -39,7 +39,7 @@ running tests. The triples tested are: * `{i686,x86_64}-pc-windows-{msvc,gnu}` * Travis * `{i686,x86_64,mips,aarch64}-unknown-linux-gnu` - * `{x86_64,aarch64}-unknown-linux-musl` + * `x86_64-unknown-linux-musl` * `arm-unknown-linux-gnueabihf` * `arm-linux-androideabi` * `{i686,x86_64}-apple-{darwin,ios}` diff --git a/src/liblibc/ci/android-install-sdk.sh b/src/liblibc/ci/android-install-sdk.sh index 7e6147cc0c..d03b7623bf 100644 --- a/src/liblibc/ci/android-install-sdk.sh +++ b/src/liblibc/ci/android-install-sdk.sh @@ -49,7 +49,7 @@ esac; filter="$filter,sys-img-$abi-android-24" -./android-accept-licenses.sh "android - update sdk -a --no-ui --filter $filter --no-https" +./android-accept-licenses.sh "android - update sdk -a --no-ui --filter $filter" echo "no" | android create avd \ --name $1 \ diff --git a/src/liblibc/ci/docker/aarch64-unknown-linux-musl/Dockerfile b/src/liblibc/ci/docker/aarch64-unknown-linux-musl/Dockerfile deleted file mode 100644 index 6aca3b8bee..0000000000 --- a/src/liblibc/ci/docker/aarch64-unknown-linux-musl/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM ubuntu:17.10 - -RUN apt-get update && apt-get install -y --no-install-recommends \ - gcc make libc6-dev git curl ca-certificates \ - gcc-aarch64-linux-gnu qemu-user -RUN curl https://www.musl-libc.org/releases/musl-1.1.16.tar.gz | \ - tar xzf - && \ - cd musl-1.1.16 && \ - CC=aarch64-linux-gnu-gcc \ - ./configure --prefix=/musl-aarch64 --enable-wrapper=yes && \ - make install -j4 && \ - cd .. && \ - rm -rf musl-1.1.16 && \ -# Install linux kernel headers sanitized for use with musl - curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-5.tar.gz | \ - tar xzf - && \ - cd kernel-headers-3.12.6-5 && \ - make ARCH=arm64 prefix=/musl-aarch64 install -j4 && \ - cd .. && \ - rm -rf kernel-headers-3.12.6-5 -ENV PATH=$PATH:/musl-aarch64/bin:/rust/bin \ - CC_aarch64_unknown_linux_musl=musl-gcc \ - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER=musl-gcc \ - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUNNER="qemu-aarch64 -L /musl-aarch64" diff --git a/src/liblibc/libc-test/build.rs b/src/liblibc/libc-test/build.rs index 95ffebb9e5..a62c7c4446 100644 --- a/src/liblibc/libc-test/build.rs +++ b/src/liblibc/libc-test/build.rs @@ -239,13 +239,6 @@ fn main() { } } - if linux || android { - // DCCP support - if !uclibc && !musl && !emscripten { - cfg.header("linux/dccp.h"); - } - } - if freebsd { cfg.header("pthread_np.h"); cfg.header("sched.h"); @@ -260,9 +253,6 @@ fn main() { cfg.header("ufs/ufs/quota.h"); cfg.header("ufs/ufs/quota1.h"); cfg.header("sys/ioctl_compat.h"); - - // DCCP support - cfg.header("netinet/dccp.h"); } if openbsd { diff --git a/src/liblibc/src/redox/mod.rs b/src/liblibc/src/redox.rs similarity index 55% rename from src/liblibc/src/redox/mod.rs rename to src/liblibc/src/redox.rs index 82b296f965..7ff4ea05fd 100644 --- a/src/liblibc/src/redox/mod.rs +++ b/src/liblibc/src/redox.rs @@ -11,17 +11,40 @@ pub type pid_t = usize; pub type gid_t = usize; pub type uid_t = usize; -pub type suseconds_t = i64; +pub type in_addr_t = u32; +pub type in_port_t = u16; + +pub type socklen_t = u32; +pub type sa_family_t = u16; s! { - pub struct timeval { - pub tv_sec: time_t, - pub tv_usec: suseconds_t, + pub struct in_addr { + pub s_addr: in_addr_t, + } + + pub struct in6_addr { + pub s6_addr: [u8; 16], + __align: [u32; 0], } - pub struct timespec { - pub tv_sec: time_t, - pub tv_nsec: c_long, + pub struct sockaddr { + pub sa_family: sa_family_t, + pub sa_data: [::c_char; 14], + } + + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: ::in_port_t, + pub sin_addr: ::in_addr, + pub sin_zero: [u8; 8], + } + + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: ::in6_addr, + pub sin6_scope_id: u32, } } @@ -60,45 +83,14 @@ pub const S_IXOTH: mode_t = 0x1; pub const S_IWOTH: mode_t = 0x2; pub const S_IROTH: mode_t = 0x4; -pub const F_DUPFD: ::c_int = 0; -pub const F_GETFD: ::c_int = 1; -pub const F_SETFD: ::c_int = 2; -pub const F_GETFL: ::c_int = 3; -pub const F_SETFL: ::c_int = 4; - -pub const O_RDONLY: ::c_int = 0x0001_0000; -pub const O_WRONLY: ::c_int = 0x0002_0000; -pub const O_RDWR: ::c_int = 0x0003_0000; -pub const O_NONBLOCK: ::c_int = 0x0004_0000; -pub const O_APPEND: ::c_int = 0x0008_0000; -pub const O_SHLOCK: ::c_int = 0x0010_0000; -pub const O_EXLOCK: ::c_int = 0x0020_0000; -pub const O_ASYNC: ::c_int = 0x0040_0000; -pub const O_FSYNC: ::c_int = 0x0080_0000; -pub const O_CLOEXEC: ::c_int = 0x0100_0000; -pub const O_CREAT: ::c_int = 0x0200_0000; -pub const O_TRUNC: ::c_int = 0x0400_0000; -pub const O_EXCL: ::c_int = 0x0800_0000; -pub const O_DIRECTORY: ::c_int = 0x1000_0000; -pub const O_STAT: ::c_int = 0x2000_0000; -pub const O_SYMLINK: ::c_int = 0x4000_0000; -pub const O_NOFOLLOW: ::c_int = 0x8000_0000; -pub const O_ACCMODE: ::c_int = O_RDONLY | O_WRONLY | O_RDWR; - extern { pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void; pub fn read(fd: ::c_int, buf: *mut ::c_void, count: ::size_t) -> ::ssize_t; pub fn write(fd: ::c_int, buf: *const ::c_void, count: ::size_t) -> ::ssize_t; - pub fn fcntl(fd: ::c_int, cmd: ::c_int, ...) -> ::c_int; - pub fn close(fd: ::c_int) -> ::c_int; } #[link(name = "c")] #[link(name = "m")] extern {} - -pub use self::net::*; - -mod net; diff --git a/src/liblibc/src/redox/net.rs b/src/liblibc/src/redox/net.rs deleted file mode 100644 index 0916916430..0000000000 --- a/src/liblibc/src/redox/net.rs +++ /dev/null @@ -1,110 +0,0 @@ -pub type in_addr_t = u32; -pub type in_port_t = u16; - -pub type socklen_t = u32; -pub type sa_family_t = u16; - -s! { - pub struct in_addr { - pub s_addr: in_addr_t, - } - - pub struct in6_addr { - pub s6_addr: [u8; 16], - __align: [u32; 0], - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ipv6_mreq { - pub ipv6mr_multiaddr: in6_addr, - pub ipv6mr_interface: ::c_uint, - } - - pub struct linger { - pub l_onoff: ::c_int, - pub l_linger: ::c_int, - } - - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [::c_char; 14], - } - - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: ::in_port_t, - pub sin_addr: ::in_addr, - pub sin_zero: [u8; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: ::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_storage { - pub ss_family: sa_family_t, - pub __ss_padding: [u8; 26], - } -} - -pub const AF_INET: ::c_int = 2; -pub const AF_INET6: ::c_int = 23; - -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; - -pub const IPPROTO_TCP: ::c_int = 6; -pub const IPPROTO_IP: ::c_int = 0; -pub const IPPROTO_IPV6: ::c_int = 41; - -pub const TCP_KEEPIDLE: ::c_int = 4; -pub const TCP_NODELAY: ::c_int = 8193; - -pub const IP_TTL: ::c_int = 8; -pub const IP_MULTICAST_LOOP: ::c_int = 9; -pub const IP_MULTICAST_TTL: ::c_int = 10; -pub const IP_ADD_MEMBERSHIP: ::c_int = 11; -pub const IP_DROP_MEMBERSHIP: ::c_int = 12; - -pub const IPV6_MULTICAST_LOOP: ::c_int = 19; -pub const IPV6_ADD_MEMBERSHIP: ::c_int = 20; -pub const IPV6_DROP_MEMBERSHIP: ::c_int = 21; -pub const IPV6_V6ONLY: ::c_int = 26; - -pub const SOL_SOCKET: ::c_int = 65535; - -pub const SO_REUSEADDR: ::c_int = 4; -pub const SO_BROADCAST: ::c_int = 6; -pub const SO_KEEPALIVE: ::c_int = 8; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; -pub const SO_LINGER: ::c_int = 128; -pub const SO_SNDBUF: ::c_int = 4097; -pub const SO_RCVBUF: ::c_int = 4098; -pub const SO_ERROR: ::c_int = 4105; - -extern { - pub fn socket(domain: ::c_int, ty: ::c_int, protocol: ::c_int) -> ::c_int; - pub fn bind(fd: ::c_int, addr: *const sockaddr, len: socklen_t) -> ::c_int; - pub fn connect(socket: ::c_int, address: *const sockaddr, - len: socklen_t) -> ::c_int; - pub fn listen(socket: ::c_int, backlog: ::c_int) -> ::c_int; - pub fn getsockname(socket: ::c_int, address: *mut sockaddr, - address_len: *mut socklen_t) -> ::c_int; - pub fn getsockopt(sockfd: ::c_int, - level: ::c_int, - optname: ::c_int, - optval: *mut ::c_void, - optlen: *mut ::socklen_t) -> ::c_int; - pub fn setsockopt(socket: ::c_int, level: ::c_int, name: ::c_int, - value: *const ::c_void, - option_len: socklen_t) -> ::c_int; -} diff --git a/src/liblibc/src/unix/bsd/apple/mod.rs b/src/liblibc/src/unix/bsd/apple/mod.rs index 6407e77bf3..0caef9fbdb 100644 --- a/src/liblibc/src/unix/bsd/apple/mod.rs +++ b/src/liblibc/src/unix/bsd/apple/mod.rs @@ -14,7 +14,6 @@ pub type rlim_t = u64; pub type mach_timebase_info_data_t = mach_timebase_info; pub type pthread_key_t = c_ulong; pub type sigset_t = u32; -pub type clockid_t = ::c_uint; pub type fsblkcnt_t = ::c_uint; pub type fsfilcnt_t = ::c_uint; pub type speed_t = ::c_ulong; @@ -494,11 +493,6 @@ pub const ABMON_10: ::nl_item = 42; pub const ABMON_11: ::nl_item = 43; pub const ABMON_12: ::nl_item = 44; -pub const CLOCK_REALTIME: ::clockid_t = 0; -pub const CLOCK_MONOTONIC: ::clockid_t = 6; -pub const CLOCK_PROCESS_CPUTIME_ID: ::clockid_t = 12; -pub const CLOCK_THREAD_CPUTIME_ID: ::clockid_t = 16; - pub const ERA: ::nl_item = 45; pub const ERA_D_FMT: ::nl_item = 46; pub const ERA_D_T_FMT: ::nl_item = 47; @@ -1982,8 +1976,6 @@ extern { pub fn aio_suspend(aiocb_list: *const *const aiocb, nitems: ::c_int, timeout: *const ::timespec) -> ::c_int; pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn clock_getres(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; pub fn lio_listio(mode: ::c_int, aiocb_list: *const *mut aiocb, nitems: ::c_int, sevp: *mut sigevent) -> ::c_int; diff --git a/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs b/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs index 6fec75eaaf..2fe2214223 100644 --- a/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs +++ b/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs @@ -651,8 +651,6 @@ pub const IPPROTO_DIVERT: ::c_int = 258; /// SeND pseudo-protocol pub const IPPROTO_SEND: ::c_int = 259; -pub const IP_BINDANY: ::c_int = 24; - pub const PF_SLOW: ::c_int = AF_SLOW; pub const PF_SCLUSTER: ::c_int = AF_SCLUSTER; pub const PF_ARP: ::c_int = AF_ARP; diff --git a/src/liblibc/src/unix/bsd/mod.rs b/src/liblibc/src/unix/bsd/mod.rs index ab256d1eb8..ef8dafee5f 100644 --- a/src/liblibc/src/unix/bsd/mod.rs +++ b/src/liblibc/src/unix/bsd/mod.rs @@ -299,7 +299,6 @@ pub const MDMBUF: ::tcflag_t = 0x00100000; pub const WNOHANG: ::c_int = 0x00000001; pub const WUNTRACED: ::c_int = 0x00000002; -pub const RTLD_LAZY: ::c_int = 0x1; pub const RTLD_NOW: ::c_int = 0x2; pub const RTLD_NEXT: *mut ::c_void = -1isize as *mut ::c_void; pub const RTLD_DEFAULT: *mut ::c_void = -2isize as *mut ::c_void; @@ -314,12 +313,6 @@ pub const TCP_MAXSEG: ::c_int = 2; pub const PIPE_BUF: usize = 512; -pub const POLLIN: ::c_short = 0x1; -pub const POLLPRI: ::c_short = 0x2; -pub const POLLOUT: ::c_short = 0x4; -pub const POLLERR: ::c_short = 0x8; -pub const POLLHUP: ::c_short = 0x10; -pub const POLLNVAL: ::c_short = 0x20; pub const POLLRDNORM: ::c_short = 0x040; pub const POLLWRNORM: ::c_short = 0x004; pub const POLLRDBAND: ::c_short = 0x080; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/mod.rs index f9caca9e7a..e43f2aba8b 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/mod.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/mod.rs @@ -589,8 +589,6 @@ extern { iovcnt: ::c_int, offset: ::off_t) -> ::ssize_t; pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int; - pub fn utimensat(dirfd: ::c_int, path: *const ::c_char, - times: *const ::timespec, flag: ::c_int) -> ::c_int; pub fn fdatasync(fd: ::c_int) -> ::c_int; pub fn openpty(amaster: *mut ::c_int, aslave: *mut ::c_int, diff --git a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs index 916ed4f298..df9bbcb9b7 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs @@ -465,65 +465,6 @@ pub const MAP_NORESERVE : ::c_int = 0x40; pub const MAP_HASSEMAPHORE : ::c_int = 0x200; pub const MAP_WIRED: ::c_int = 0x800; -pub const DCCP_TYPE_REQUEST: ::c_int = 0; -pub const DCCP_TYPE_RESPONSE: ::c_int = 1; -pub const DCCP_TYPE_DATA: ::c_int = 2; -pub const DCCP_TYPE_ACK: ::c_int = 3; -pub const DCCP_TYPE_DATAACK: ::c_int = 4; -pub const DCCP_TYPE_CLOSEREQ: ::c_int = 5; -pub const DCCP_TYPE_CLOSE: ::c_int = 6; -pub const DCCP_TYPE_RESET: ::c_int = 7; -pub const DCCP_TYPE_MOVE: ::c_int = 8; - -pub const DCCP_FEATURE_CC: ::c_int = 1; -pub const DCCP_FEATURE_ECN: ::c_int = 2; -pub const DCCP_FEATURE_ACKRATIO: ::c_int = 3; -pub const DCCP_FEATURE_ACKVECTOR: ::c_int = 4; -pub const DCCP_FEATURE_MOBILITY: ::c_int = 5; -pub const DCCP_FEATURE_LOSSWINDOW: ::c_int = 6; -pub const DCCP_FEATURE_CONN_NONCE: ::c_int = 8; -pub const DCCP_FEATURE_IDENTREG: ::c_int = 7; - -pub const DCCP_OPT_PADDING: ::c_int = 0; -pub const DCCP_OPT_DATA_DISCARD: ::c_int = 1; -pub const DCCP_OPT_SLOW_RECV: ::c_int = 2; -pub const DCCP_OPT_BUF_CLOSED: ::c_int = 3; -pub const DCCP_OPT_CHANGE_L: ::c_int = 32; -pub const DCCP_OPT_CONFIRM_L: ::c_int = 33; -pub const DCCP_OPT_CHANGE_R: ::c_int = 34; -pub const DCCP_OPT_CONFIRM_R: ::c_int = 35; -pub const DCCP_OPT_INIT_COOKIE: ::c_int = 36; -pub const DCCP_OPT_NDP_COUNT: ::c_int = 37; -pub const DCCP_OPT_ACK_VECTOR0: ::c_int = 38; -pub const DCCP_OPT_ACK_VECTOR1: ::c_int = 39; -pub const DCCP_OPT_RECV_BUF_DROPS: ::c_int = 40; -pub const DCCP_OPT_TIMESTAMP: ::c_int = 41; -pub const DCCP_OPT_TIMESTAMP_ECHO: ::c_int = 42; -pub const DCCP_OPT_ELAPSEDTIME: ::c_int = 43; -pub const DCCP_OPT_DATACHECKSUM: ::c_int = 44; - -pub const DCCP_REASON_UNSPEC: ::c_int = 0; -pub const DCCP_REASON_CLOSED: ::c_int = 1; -pub const DCCP_REASON_INVALID: ::c_int = 2; -pub const DCCP_REASON_OPTION_ERR: ::c_int = 3; -pub const DCCP_REASON_FEA_ERR: ::c_int = 4; -pub const DCCP_REASON_CONN_REF: ::c_int = 5; -pub const DCCP_REASON_BAD_SNAME: ::c_int = 6; -pub const DCCP_REASON_BAD_COOKIE: ::c_int = 7; -pub const DCCP_REASON_INV_MOVE: ::c_int = 8; -pub const DCCP_REASON_UNANSW_CH: ::c_int = 10; -pub const DCCP_REASON_FRUITLESS_NEG: ::c_int = 11; - -pub const DCCP_CCID: ::c_int = 1; -pub const DCCP_CSLEN: ::c_int = 2; -pub const DCCP_MAXSEG: ::c_int = 4; -pub const DCCP_SERVICE: ::c_int = 8; - -pub const DCCP_NDP_LIMIT: ::c_int = 16; -pub const DCCP_SEQ_NUM_LIMIT: ::c_int = 16777216; -pub const DCCP_MAX_OPTIONS: ::c_int = 32; -pub const DCCP_MAX_PKTS: ::c_int = 100; - pub const _PC_LINK_MAX : ::c_int = 1; pub const _PC_MAX_CANON : ::c_int = 2; pub const _PC_MAX_INPUT : ::c_int = 3; diff --git a/src/liblibc/src/unix/haiku/mod.rs b/src/liblibc/src/unix/haiku/mod.rs index 2b05e8ef87..bce79e7a8b 100644 --- a/src/liblibc/src/unix/haiku/mod.rs +++ b/src/liblibc/src/unix/haiku/mod.rs @@ -169,14 +169,6 @@ s! { pub c_cc: [::cc_t; ::NCCS], } - pub struct flock { - pub l_type: ::c_short, - pub l_whence: ::c_short, - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_pid: ::pid_t, - } - pub struct stat { pub st_dev: dev_t, pub st_ino: ino_t, @@ -298,14 +290,6 @@ s! { sa_userdata: *mut ::c_void, } - pub struct sigevent { - pub sigev_notify: ::c_int, - pub sigev_signo: ::c_int, - pub sigev_value: ::sigval, - __unused1: *mut ::c_void, // actually a function pointer - pub sigev_notify_attributes: *mut ::pthread_attr_t, - } - pub struct sem_t { pub se_type: i32, pub se_named_id: i32, // this is actually a union @@ -346,27 +330,6 @@ pub const F_GETFD: ::c_int = 0x0002; pub const F_SETFD: ::c_int = 0x0004; pub const F_GETFL: ::c_int = 0x0008; pub const F_SETFL: ::c_int = 0x0010; -pub const F_GETLK: ::c_int = 0x0020; -pub const F_SETLK: ::c_int = 0x0080; -pub const F_SETLKW: ::c_int = 0x0100; -pub const F_DUPFD_CLOEXEC: ::c_int = 0x0200; - -pub const AT_FDCWD: ::c_int = -1; -pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x01; -pub const AT_SYMLINK_FOLLOW: ::c_int = 0x02; -pub const AT_REMOVEDIR: ::c_int = 0x04; -pub const AT_EACCESS: ::c_int = 0x08; - -pub const POLLIN: ::c_short = 0x0001; -pub const POLLOUT: ::c_short = 0x0002; -pub const POLLRDNORM: ::c_short = POLLIN; -pub const POLLWRNORM: ::c_short = POLLOUT; -pub const POLLRDBAND: ::c_short = 0x0008; -pub const POLLWRBAND: ::c_short = 0x0010; -pub const POLLPRI: ::c_short = 0x0020; -pub const POLLERR: ::c_short = 0x0004; -pub const POLLHUP: ::c_short = 0x0080; -pub const POLLNVAL: ::c_short = 0x1000; pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0; pub const PTHREAD_CREATE_DETACHED: ::c_int = 1; @@ -386,8 +349,6 @@ pub const RLIMIT_NLIMITS: ::c_int = 8; pub const RUSAGE_SELF: ::c_int = 0; -pub const RTLD_LAXY: ::c_int = 0; - pub const NCCS: usize = 11; pub const O_RDONLY: ::c_int = 0x0000; @@ -470,14 +431,6 @@ pub const SIGXCPU: ::c_int = 28; pub const SIGXFSZ: ::c_int = 29; pub const SIGBUS: ::c_int = 30; -pub const SIG_BLOCK: ::c_int = 1; -pub const SIG_UNBLOCK: ::c_int = 2; -pub const SIG_SETMASK: ::c_int = 3; - -pub const SIGEV_NONE: ::c_int = 0; -pub const SIGEV_SIGNAL: ::c_int = 1; -pub const SIGEV_THREAD: ::c_int = 2; - pub const EAI_SYSTEM: ::c_int = 11; pub const PROT_NONE: ::c_int = 0; @@ -497,9 +450,7 @@ pub const LC_MESSAGES: ::c_int = 6; pub const MAP_FILE: ::c_int = 0x00; pub const MAP_SHARED: ::c_int = 0x01; pub const MAP_PRIVATE: ::c_int = 0x02; -pub const MAP_FIXED: ::c_int = 0x04; -pub const MAP_ANONYMOUS: ::c_int = 0x08; -pub const MAP_ANON: ::c_int = MAP_ANONYMOUS; +pub const MAP_FIXED: ::c_int = 0x004; pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; @@ -591,7 +542,7 @@ pub const EISDIR : ::c_int = -2147459063; pub const ENOTEMPTY : ::c_int = -2147459066; pub const ENOSPC : ::c_int = -2147459065; pub const EROFS : ::c_int = -2147459064; -pub const EMFILE : ::c_int = -2147459062; +pub const EMFILE : ::c_int = -214745962; pub const EXDEV : ::c_int = -2147459061; pub const ELOOP : ::c_int = -2147459060; pub const ENOEXEC : ::c_int = -2147478782; @@ -608,20 +559,9 @@ pub const MADV_DONTNEED: ::c_int = 5; pub const IFF_LOOPBACK: ::c_int = 0x0008; -pub const AF_UNSEC: ::c_int = 0; +pub const AF_UNIX: ::c_int = 9; pub const AF_INET: ::c_int = 1; -pub const AF_APPLETALK: ::c_int = 2; -pub const AF_ROUTE: ::c_int = 3; -pub const AF_LINK: ::c_int = 4; -pub const AF_INET6: ::c_int = 5; -pub const AF_DLI: ::c_int = 6; -pub const AF_IPX: ::c_int = 7; -pub const AF_NOTIFY: ::c_int = 8; -pub const AF_LOCAL: ::c_int = 9; -pub const AF_UNIX: ::c_int = AF_LOCAL; -pub const AF_BLUETOOTH: ::c_int = 10; -pub const AF_MAX: ::c_int = 11; - +pub const AF_INET6: ::c_int = 6; pub const IP_MULTICAST_TTL: ::c_int = 10; pub const IP_MULTICAST_LOOP: ::c_int = 11; pub const IP_TTL: ::c_int = 4; @@ -639,17 +579,9 @@ pub const IPV6_JOIN_GROUP: ::c_int = 28; pub const IPV6_LEAVE_GROUP: ::c_int = 29; pub const IPV6_V6ONLY: ::c_int = 30; -pub const MSG_OOB: ::c_int = 0x0001; -pub const MSG_PEEK: ::c_int = 0x0002; -pub const MSG_DONTROUTE: ::c_int = 0x0004; -pub const MSG_EOR: ::c_int = 0x0008; -pub const MSG_TRUNC: ::c_int = 0x0010; -pub const MSG_CTRUNC: ::c_int = 0x0020; -pub const MSG_WAITALL: ::c_int = 0x0040; -pub const MSG_DONTWAIT: ::c_int = 0x0080; -pub const MSG_BCAST: ::c_int = 0x0100; -pub const MSG_MCAST: ::c_int = 0x0200; -pub const MSG_EOF: ::c_int = 0x0400; +pub const SO_DEBUG: ::c_int = 0x00000004; + +pub const MSG_PEEK: ::c_int = 0x2; pub const MSG_NOSIGNAL: ::c_int = 0x0800; pub const SHUT_RD: ::c_int = 0; @@ -663,18 +595,10 @@ pub const LOCK_UN: ::c_int = 0x08; pub const SIGSTKSZ: ::size_t = 16384; -pub const PATH_MAX: ::c_int = 1024; - -pub const SA_NOCLDSTOP: ::c_int = 0x01; -pub const SA_NOCLDWAIT: ::c_int = 0x02; -pub const SA_RESETHAND: ::c_int = 0x04; pub const SA_NODEFER: ::c_int = 0x08; +pub const SA_RESETHAND: ::c_int = 0x04; pub const SA_RESTART: ::c_int = 0x10; -pub const SA_ONSTACK: ::c_int = 0x20; -pub const SA_SIGINFO: ::c_int = 0x40; -pub const SA_NOMASK: ::c_int = SA_NODEFER; -pub const SA_STACK: ::c_int = SA_ONSTACK; -pub const SA_ONESHOT: ::c_int = SA_RESETHAND; +pub const SA_NOCLDSTOP: ::c_int = 0x01; pub const FD_SETSIZE: usize = 1024; @@ -686,78 +610,21 @@ pub const FILENAME_MAX: ::c_uint = 256; pub const FOPEN_MAX: ::c_uint = 128; pub const L_tmpnam: ::c_uint = 512; pub const TMP_MAX: ::c_uint = 32768; - -pub const _PC_CHOWN_RESTRICTED: ::c_int = 1; -pub const _PC_MAX_CANON: ::c_int = 2; -pub const _PC_MAX_INPUT: ::c_int = 3; pub const _PC_NAME_MAX: ::c_int = 4; -pub const _PC_NO_TRUNC: ::c_int = 5; -pub const _PC_PATH_MAX: ::c_int = 6; -pub const _PC_PIPE_BUF: ::c_int = 7; -pub const _PC_VDISABLE: ::c_int = 8; -pub const _PC_LINK_MAX: ::c_int = 25; -pub const _PC_SYNC_IO: ::c_int = 26; -pub const _PC_ASYNC_IO: ::c_int = 27; -pub const _PC_PRIO_IO: ::c_int = 28; -pub const _PC_SOCK_MAXBUF: ::c_int = 29; -pub const _PC_FILESIZEBITS: ::c_int = 30; -pub const _PC_REC_INCR_XFER_SIZE: ::c_int = 31; -pub const _PC_REC_MAX_XFER_SIZE: ::c_int = 32; -pub const _PC_REC_MIN_XFER_SIZE: ::c_int = 33; -pub const _PC_REC_XFER_ALIGN: ::c_int = 34; -pub const _PC_ALLOC_SIZE_MIN: ::c_int = 35; -pub const _PC_SYMLINK_MAX: ::c_int = 36; -pub const _PC_2_SYMLINKS: ::c_int = 37; -pub const _PC_XATTR_EXISTS: ::c_int = 38; -pub const _PC_XATTR_ENABLED: ::c_int = 39; pub const FIONBIO: ::c_int = 0xbe000000; -pub const _SC_ARG_MAX : ::c_int = 15; -pub const _SC_CHILD_MAX : ::c_int = 16; -pub const _SC_CLK_TCK : ::c_int = 17; -pub const _SC_JOB_CONTROL : ::c_int = 18; -pub const _SC_NGROUPS_MAX : ::c_int = 19; -pub const _SC_OPEN_MAX : ::c_int = 20; -pub const _SC_SAVED_IDS : ::c_int = 21; -pub const _SC_STREAM_MAX : ::c_int = 22; -pub const _SC_TZNAME_MAX : ::c_int = 23; -pub const _SC_VERSION : ::c_int = 24; +pub const _SC_IOV_MAX : ::c_int = 32; pub const _SC_GETGR_R_SIZE_MAX : ::c_int = 25; pub const _SC_GETPW_R_SIZE_MAX : ::c_int = 26; pub const _SC_PAGESIZE : ::c_int = 27; -pub const _SC_PAGE_SIZE : ::c_int = 27; -pub const _SC_SEM_NSEMS_MAX : ::c_int = 28; -pub const _SC_SEM_VALUE_MAX : ::c_int = 29; -pub const _SC_SEMAPHORES : ::c_int = 30; -pub const _SC_THREADS : ::c_int = 31; -pub const _SC_IOV_MAX : ::c_int = 32; -pub const _SC_UIO_MAXIOV : ::c_int = 32; -pub const _SC_NPROCESSORS_CONF : ::c_int = 34; -pub const _SC_NPROCESSORS_ONLN : ::c_int = 35; -pub const _SC_ATEXIT_MAX : ::c_int = 37; -pub const _SC_PASS_MAX : ::c_int = 39; -pub const _SC_PHYS_PAGES : ::c_int = 40; -pub const _SC_AVPHYS_PAGES : ::c_int = 41; -pub const _SC_PIPE : ::c_int = 42; -pub const _SC_SELECT : ::c_int = 43; -pub const _SC_POLL : ::c_int = 44; -pub const _SC_MAPPED_FILES : ::c_int = 45; -pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 46; -pub const _SC_THREAD_STACK_MIN : ::c_int = 47; pub const _SC_THREAD_ATTR_STACKADDR : ::c_int = 48; pub const _SC_THREAD_ATTR_STACKSIZE : ::c_int = 49; pub const _SC_THREAD_PRIORITY_SCHEDULING : ::c_int = 50; -pub const _SC_REALTIME_SIGNALS : ::c_int = 51; -pub const _SC_MEMORY_PROTECTION : ::c_int = 52; -pub const _SC_SIGQUEUE_MAX : ::c_int = 53; -pub const _SC_RTSIG_MAX : ::c_int = 54; -pub const _SC_MONOTONIC_CLOCK : ::c_int = 55; -pub const _SC_DELAYTIMER_MAX : ::c_int = 56; -pub const _SC_TIMER_MAX : ::c_int = 57; -pub const _SC_TIMERS : ::c_int = 58; -pub const _SC_CPUTIME : ::c_int = 59; -pub const _SC_THREAD_CPUTIME : ::c_int = 60; +pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 46; +pub const _SC_THREAD_STACK_MIN : ::c_int = 47; +pub const _SC_THREADS : ::c_int = 31; +pub const _SC_ATEXIT_MAX : ::c_int = 37; pub const PTHREAD_STACK_MIN: ::size_t = 8192; @@ -792,6 +659,12 @@ pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 3; pub const FIOCLEX: c_ulong = 0; // TODO: does not exist on Haiku! +pub const SA_ONSTACK: c_ulong = 0x20; +pub const SA_SIGINFO: c_ulong = 0x40; +pub const SA_NOCLDWAIT: c_ulong = 0x02; + +pub const SIG_SETMASK: ::c_int = 3; + pub const RUSAGE_CHILDREN: ::c_int = -1; pub const SOCK_STREAM: ::c_int = 1; @@ -802,7 +675,6 @@ pub const SOCK_SEQPACKET: ::c_int = 5; pub const SOL_SOCKET: ::c_int = -1; pub const SO_ACCEPTCONN: ::c_int = 0x00000001; pub const SO_BROADCAST: ::c_int = 0x00000002; -pub const SO_DEBUG: ::c_int = 0x00000004; pub const SO_DONTROUTE: ::c_int = 0x00000008; pub const SO_KEEPALIVE: ::c_int = 0x00000010; pub const SO_OOBINLINE: ::c_int = 0x00000020; @@ -822,8 +694,6 @@ pub const SO_NONBLOCK: ::c_int = 0x40000009; pub const SO_BINDTODEVICE: ::c_int = 0x4000000a; pub const SO_PEERCRED: ::c_int = 0x4000000b; -pub const SCM_RIGHTS: ::c_int = 0x01; - pub const NI_MAXHOST: ::size_t = 1025; pub const WNOHANG: ::c_int = 0x01; @@ -850,83 +720,34 @@ pub const VSWTCH: usize = 7; pub const VSTART: usize = 8; pub const VSTOP: usize = 9; pub const VSUSP: usize = 10; - -pub const IGNBRK: ::tcflag_t = 0x01; -pub const BRKINT: ::tcflag_t = 0x02; -pub const IGNPAR: ::tcflag_t = 0x04; -pub const PARMRK: ::tcflag_t = 0x08; -pub const INPCK: ::tcflag_t = 0x10; -pub const ISTRIP: ::tcflag_t = 0x20; -pub const INLCR: ::tcflag_t = 0x40; -pub const IGNCR: ::tcflag_t = 0x80; -pub const ICRNL: ::tcflag_t = 0x100; -pub const IUCLC: ::tcflag_t = 0x200; -pub const IXON: ::tcflag_t = 0x400; -pub const IXANY: ::tcflag_t = 0x800; -pub const IXOFF: ::tcflag_t = 0x1000; - -pub const OPOST: ::tcflag_t = 0x00000001; -pub const OLCUC: ::tcflag_t = 0x00000002; -pub const ONLCR: ::tcflag_t = 0x00000004; -pub const OCRNL: ::tcflag_t = 0x00000008; -pub const ONOCR: ::tcflag_t = 0x00000010; -pub const ONLRET: ::tcflag_t = 0x00000020; -pub const OFILL: ::tcflag_t = 0x00000040; -pub const OFDEL: ::tcflag_t = 0x00000080; -pub const NLDLY: ::tcflag_t = 0x00000100; -pub const NL0: ::tcflag_t = 0x00000000; -pub const NL1: ::tcflag_t = 0x00000100; -pub const CRDLY: ::tcflag_t = 0x00000600; -pub const CR0: ::tcflag_t = 0x00000000; -pub const CR1: ::tcflag_t = 0x00000200; -pub const CR2: ::tcflag_t = 0x00000400; -pub const CR3: ::tcflag_t = 0x00000600; -pub const TABDLY: ::tcflag_t = 0x00001800; -pub const TAB0: ::tcflag_t = 0x00000000; -pub const TAB1: ::tcflag_t = 0x00000800; -pub const TAB2: ::tcflag_t = 0x00001000; -pub const TAB3: ::tcflag_t = 0x00001800; -pub const BSDLY: ::tcflag_t = 0x00002000; -pub const BS0: ::tcflag_t = 0x00000000; -pub const BS1: ::tcflag_t = 0x00002000; -pub const VTDLY: ::tcflag_t = 0x00004000; -pub const VT0: ::tcflag_t = 0x00000000; -pub const VT1: ::tcflag_t = 0x00004000; -pub const FFDLY: ::tcflag_t = 0x00008000; -pub const FF0: ::tcflag_t = 0x00000000; -pub const FF1: ::tcflag_t = 0x00008000; - -pub const CSIZE: ::tcflag_t = 0x00000020; -pub const CS5: ::tcflag_t = 0x00000000; -pub const CS6: ::tcflag_t = 0x00000000; -pub const CS7: ::tcflag_t = 0x00000000; -pub const CS8: ::tcflag_t = 0x00000020; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; -pub const XLOBLK: ::tcflag_t = 0x00001000; -pub const CTSFLOW: ::tcflag_t = 0x00002000; -pub const RTSFLOW: ::tcflag_t = 0x00004000; -pub const CRTSCTS: ::tcflag_t = RTSFLOW | CTSFLOW; - -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const XCASE: ::tcflag_t = 0x00000004; -pub const ECHO: ::tcflag_t = 0x00000008; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const NOFLSH: ::tcflag_t = 0x00000080; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const IEXTEN: ::tcflag_t = 0x00000200; -pub const ECHOCTL: ::tcflag_t = 0x00000400; -pub const ECHOPRT: ::tcflag_t = 0x00000800; -pub const ECHOKE: ::tcflag_t = 0x00001000; -pub const FLUSHO: ::tcflag_t = 0x00002000; -pub const PENDIN: ::tcflag_t = 0x00004000; +pub const OLCUC: ::tcflag_t = 0o000002; +pub const OCRNL: ::tcflag_t = 0o000010; +pub const ONOCR: ::tcflag_t = 0o000020; +pub const ONLRET: ::tcflag_t = 0o000040; +pub const OFILL: ::tcflag_t = 0o000100; +pub const OFDEL: ::tcflag_t = 0o000200; +pub const NLDLY: ::tcflag_t = 0o000400; +pub const NL0: ::tcflag_t = 0o000000; +pub const NL1: ::tcflag_t = 0o000400; +pub const CRDLY: ::tcflag_t = 0o003000; +pub const CR0: ::tcflag_t = 0o000000; +pub const CR1: ::tcflag_t = 0o001000; +pub const CR2: ::tcflag_t = 0o002000; +pub const CR3: ::tcflag_t = 0o003000; +pub const TABDLY: ::tcflag_t = 0o014000; +pub const TAB0: ::tcflag_t = 0o000000; +pub const TAB1: ::tcflag_t = 0o004000; +pub const TAB2: ::tcflag_t = 0o010000; +pub const TAB3: ::tcflag_t = 0o014000; +pub const BSDLY: ::tcflag_t = 0o020000; +pub const BS0: ::tcflag_t = 0o000000; +pub const BS1: ::tcflag_t = 0o020000; +pub const FFDLY: ::tcflag_t = 0o100000; +pub const FF0: ::tcflag_t = 0o000000; +pub const FF1: ::tcflag_t = 0o100000; +pub const VTDLY: ::tcflag_t = 0o040000; +pub const VT0: ::tcflag_t = 0o000000; +pub const VT1: ::tcflag_t = 0o040000; pub const TCGB_CTS: ::c_int = 0x01; pub const TCGB_DSR: ::c_int = 0x02; @@ -940,40 +761,6 @@ pub const TIOCM_DSR: ::c_int = TCGB_DSR; pub const TIOCM_DTR: ::c_int = 0x10; pub const TIOCM_RTS: ::c_int = 0x20; -pub const B0: speed_t = 0x00; -pub const B50: speed_t = 0x01; -pub const B75: speed_t = 0x02; -pub const B110: speed_t = 0x03; -pub const B134: speed_t = 0x04; -pub const B150: speed_t = 0x05; -pub const B200: speed_t = 0x06; -pub const B300: speed_t = 0x07; -pub const B600: speed_t = 0x08; -pub const B1200: speed_t = 0x09; -pub const B1800: speed_t = 0x0A; -pub const B2400: speed_t = 0x0B; -pub const B4800: speed_t = 0x0C; -pub const B9600: speed_t = 0x0D; -pub const B19200: speed_t = 0x0E; -pub const B38400: speed_t = 0x0F; -pub const B57600: speed_t = 0x10; -pub const B115200: speed_t = 0x11; -pub const B230400: speed_t = 0x12; -pub const B31250: speed_t = 0x13; - -pub const TCSANOW: ::c_int = 0x01; -pub const TCSADRAIN: ::c_int = 0x02; -pub const TCSAFLUSH: ::c_int = 0x04; - -pub const TCOOFF: ::c_int = 0x01; -pub const TCOON: ::c_int = 0x02; -pub const TCIOFF: ::c_int = 0x04; -pub const TCION: ::c_int = 0x08; - -pub const TCIFLUSH: ::c_int = 0x01; -pub const TCOFLUSH: ::c_int = 0x02; -pub const TCIOFLUSH: ::c_int = 0x03; - f! { pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () { let fd = fd as usize; @@ -1002,40 +789,18 @@ f! { } pub fn WIFEXITED(status: ::c_int) -> bool { - (status & !0xff) == 0 + (status >> 8) == 0 } pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { (status & 0xff) } - pub fn WIFSIGNALED(status: ::c_int) -> bool { - ((status >> 8) & 0xff) != 0 - } - pub fn WTERMSIG(status: ::c_int) -> ::c_int { (status >> 8) & 0xff } - - pub fn WIFSTOPPED(status: ::c_int) -> bool { - ((status >> 16) & 0xff) != 0 - } - - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - (status >> 16) & 0xff - } - - // actually WIFCORED, but this is used everywhere else - pub fn WCOREDUMP(status: ::c_int) -> bool { - (status & 0x10000) != 0 - } - - pub fn WIFCONTINUED(status: ::c_int) -> bool { - (status & 0x20000) != 0 - } } -#[link(name = "bsd")] extern { pub fn clock_gettime(clk_id: ::c_int, tp: *mut ::timespec) -> ::c_int; pub fn clock_settime(clk_id: ::c_int, tp: *const ::timespec) -> ::c_int; @@ -1082,8 +847,6 @@ extern { pub fn posix_madvise(addr: *mut ::c_void, len: ::size_t, advice: ::c_int) -> ::c_int; - pub fn shm_open(name: *const ::c_char, oflag: ::c_int, mode: ::mode_t) - -> ::c_int; pub fn shm_unlink(name: *const ::c_char) -> ::c_int; pub fn seekdir(dirp: *mut ::DIR, loc: ::c_long); @@ -1173,16 +936,6 @@ extern { link_name = "popen$UNIX2003")] pub fn popen(command: *const c_char, mode: *const c_char) -> *mut ::FILE; - pub fn openpty(amaster: *mut ::c_int, - aslave: *mut ::c_int, - name: *mut ::c_char, - termp: *mut termios, - winp: *mut ::winsize) -> ::c_int; - pub fn forkpty(amaster: *mut ::c_int, - name: *mut ::c_char, - termp: *mut termios, - winp: *mut ::winsize) -> ::pid_t; - pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int; } cfg_if! { diff --git a/src/liblibc/src/unix/mod.rs b/src/liblibc/src/unix/mod.rs index cc2de9136f..f60985c4b2 100644 --- a/src/liblibc/src/unix/mod.rs +++ b/src/liblibc/src/unix/mod.rs @@ -163,8 +163,17 @@ pub const S_ISUID: ::c_int = 0x800; pub const S_ISGID: ::c_int = 0x400; pub const S_ISVTX: ::c_int = 0x200; +pub const POLLIN: ::c_short = 0x1; +pub const POLLPRI: ::c_short = 0x2; +pub const POLLOUT: ::c_short = 0x4; +pub const POLLERR: ::c_short = 0x8; +pub const POLLHUP: ::c_short = 0x10; +pub const POLLNVAL: ::c_short = 0x20; + pub const IF_NAMESIZE: ::size_t = 16; +pub const RTLD_LAZY: ::c_int = 0x1; + pub const LOG_EMERG: ::c_int = 0; pub const LOG_ALERT: ::c_int = 1; pub const LOG_CRIT: ::c_int = 2; diff --git a/src/liblibc/src/unix/newlib/mod.rs b/src/liblibc/src/unix/newlib/mod.rs index d895541124..aec3bca367 100644 --- a/src/liblibc/src/unix/newlib/mod.rs +++ b/src/liblibc/src/unix/newlib/mod.rs @@ -435,15 +435,6 @@ pub const O_NONBLOCK: ::c_int = 16384; pub const O_ACCMODE: ::c_int = 3; pub const O_CLOEXEC: ::c_int = 0x80000; -pub const POLLIN: ::c_short = 0x1; -pub const POLLPRI: ::c_short = 0x2; -pub const POLLOUT: ::c_short = 0x4; -pub const POLLERR: ::c_short = 0x8; -pub const POLLHUP: ::c_short = 0x10; -pub const POLLNVAL: ::c_short = 0x20; - -pub const RTLD_LAZY: ::c_int = 0x1; - pub const STDIN_FILENO: ::c_int = 0; pub const STDOUT_FILENO: ::c_int = 1; pub const STDERR_FILENO: ::c_int = 2; diff --git a/src/liblibc/src/unix/notbsd/android/mod.rs b/src/liblibc/src/unix/notbsd/android/mod.rs index 892532c0ea..a038d66783 100644 --- a/src/liblibc/src/unix/notbsd/android/mod.rs +++ b/src/liblibc/src/unix/notbsd/android/mod.rs @@ -485,8 +485,6 @@ pub const ENOTRECOVERABLE: ::c_int = 131; pub const SOCK_STREAM: ::c_int = 1; pub const SOCK_DGRAM: ::c_int = 2; pub const SOCK_SEQPACKET: ::c_int = 5; -pub const SOCK_DCCP: ::c_int = 6; -pub const SOCK_PACKET: ::c_int = 10; pub const SOL_SOCKET: ::c_int = 1; pub const SOL_SCTP: ::c_int = 132; @@ -501,27 +499,6 @@ pub const AF_MAX: ::c_int = 43; #[doc(hidden)] pub const PF_MAX: ::c_int = AF_MAX; -/* DCCP socket options */ -pub const DCCP_SOCKOPT_PACKET_SIZE: ::c_int = 1; -pub const DCCP_SOCKOPT_SERVICE: ::c_int = 2; -pub const DCCP_SOCKOPT_CHANGE_L: ::c_int = 3; -pub const DCCP_SOCKOPT_CHANGE_R: ::c_int = 4; -pub const DCCP_SOCKOPT_GET_CUR_MPS: ::c_int = 5; -pub const DCCP_SOCKOPT_SERVER_TIMEWAIT: ::c_int = 6; -pub const DCCP_SOCKOPT_SEND_CSCOV: ::c_int = 10; -pub const DCCP_SOCKOPT_RECV_CSCOV: ::c_int = 11; -pub const DCCP_SOCKOPT_AVAILABLE_CCIDS: ::c_int = 12; -pub const DCCP_SOCKOPT_CCID: ::c_int = 13; -pub const DCCP_SOCKOPT_TX_CCID: ::c_int = 14; -pub const DCCP_SOCKOPT_RX_CCID: ::c_int = 15; -pub const DCCP_SOCKOPT_QPOLICY_ID: ::c_int = 16; -pub const DCCP_SOCKOPT_QPOLICY_TXQLEN: ::c_int = 17; -pub const DCCP_SOCKOPT_CCID_RX_INFO: ::c_int = 128; -pub const DCCP_SOCKOPT_CCID_TX_INFO: ::c_int = 192; - -/// maximum number of services provided on the same listening port -pub const DCCP_SERVICE_LIST_MAX_LEN: ::c_int = 32; - pub const SO_REUSEADDR: ::c_int = 2; pub const SO_TYPE: ::c_int = 3; pub const SO_ERROR: ::c_int = 4; diff --git a/src/liblibc/src/unix/notbsd/linux/mips/mod.rs b/src/liblibc/src/unix/notbsd/linux/mips/mod.rs index 7681999cc1..150b607869 100644 --- a/src/liblibc/src/unix/notbsd/linux/mips/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/mips/mod.rs @@ -294,8 +294,6 @@ pub const MAP_STACK: ::c_int = 0x40000; pub const SOCK_STREAM: ::c_int = 2; pub const SOCK_DGRAM: ::c_int = 1; pub const SOCK_SEQPACKET: ::c_int = 5; -pub const SOCK_DCCP: ::c_int = 6; -pub const SOCK_PACKET: ::c_int = 10; pub const SOL_SOCKET: ::c_int = 0xffff; @@ -355,27 +353,6 @@ pub const SO_INCOMING_CPU: ::c_int = 49; pub const SO_ATTACH_BPF: ::c_int = 50; pub const SO_DETACH_BPF: ::c_int = SO_DETACH_FILTER; -/* DCCP socket options */ -pub const DCCP_SOCKOPT_PACKET_SIZE: ::c_int = 1; -pub const DCCP_SOCKOPT_SERVICE: ::c_int = 2; -pub const DCCP_SOCKOPT_CHANGE_L: ::c_int = 3; -pub const DCCP_SOCKOPT_CHANGE_R: ::c_int = 4; -pub const DCCP_SOCKOPT_GET_CUR_MPS: ::c_int = 5; -pub const DCCP_SOCKOPT_SERVER_TIMEWAIT: ::c_int = 6; -pub const DCCP_SOCKOPT_SEND_CSCOV: ::c_int = 10; -pub const DCCP_SOCKOPT_RECV_CSCOV: ::c_int = 11; -pub const DCCP_SOCKOPT_AVAILABLE_CCIDS: ::c_int = 12; -pub const DCCP_SOCKOPT_CCID: ::c_int = 13; -pub const DCCP_SOCKOPT_TX_CCID: ::c_int = 14; -pub const DCCP_SOCKOPT_RX_CCID: ::c_int = 15; -pub const DCCP_SOCKOPT_QPOLICY_ID: ::c_int = 16; -pub const DCCP_SOCKOPT_QPOLICY_TXQLEN: ::c_int = 17; -pub const DCCP_SOCKOPT_CCID_RX_INFO: ::c_int = 128; -pub const DCCP_SOCKOPT_CCID_TX_INFO: ::c_int = 192; - -/// maximum number of services provided on the same listening port -pub const DCCP_SERVICE_LIST_MAX_LEN: ::c_int = 32; - pub const FIOCLEX: ::c_ulong = 0x6601; pub const FIONBIO: ::c_ulong = 0x667e; @@ -646,11 +623,6 @@ pub const EHWPOISON: ::c_int = 168; pub const SIGEV_THREAD_ID: ::c_int = 4; pub const EPOLLWAKEUP: ::c_int = 0x20000000; -#[doc(hidden)] -pub const AF_MAX: ::c_int = 42; -#[doc(hidden)] -pub const PF_MAX: ::c_int = AF_MAX; - #[link(name = "util")] extern { pub fn sysctl(name: *mut ::c_int, diff --git a/src/liblibc/src/unix/notbsd/linux/mod.rs b/src/liblibc/src/unix/notbsd/linux/mod.rs index c656e1dcb1..1e0856b0b1 100644 --- a/src/liblibc/src/unix/notbsd/linux/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/mod.rs @@ -100,12 +100,8 @@ s! { __align: [::c_int; 0], #[cfg(not(any(target_arch = "x86_64", target_arch = "powerpc64", target_arch = "mips64", target_arch = "s390x", - target_arch = "sparc64", target_arch = "aarch64")))] + target_arch = "sparc64")))] __align: [::c_long; 0], - #[cfg(all(target_arch = "aarch64", target_env = "gnu"))] - __align: [::c_long; 0], - #[cfg(all(target_arch = "aarch64", target_env = "musl"))] - __align: [::c_int; 0], size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], } @@ -741,10 +737,14 @@ pub const AF_IB: ::c_int = 27; pub const AF_MPLS: ::c_int = 28; pub const AF_NFC: ::c_int = 39; pub const AF_VSOCK: ::c_int = 40; +#[doc(hidden)] +pub const AF_MAX: ::c_int = 42; pub const PF_IB: ::c_int = AF_IB; pub const PF_MPLS: ::c_int = AF_MPLS; pub const PF_NFC: ::c_int = AF_NFC; pub const PF_VSOCK: ::c_int = AF_VSOCK; +#[doc(hidden)] +pub const PF_MAX: ::c_int = AF_MAX; // System V IPC pub const IPC_PRIVATE: ::key_t = 0; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b32/mod.rs b/src/liblibc/src/unix/notbsd/linux/musl/b32/mod.rs index f8a62deab6..58c971b043 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b32/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b32/mod.rs @@ -1,7 +1,6 @@ pub type c_long = i32; pub type c_ulong = u32; pub type nlink_t = u32; -pub type blksize_t = ::c_long; pub type __u64 = ::c_ulonglong; s! { @@ -32,31 +31,11 @@ s! { pub struct sem_t { __val: [::c_int; 4], } - - pub struct ipc_perm { - pub __ipc_perm_key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::mode_t, - pub __seq: ::c_int, - __unused1: ::c_long, - __unused2: ::c_long - } } -pub const SIGSTKSZ: ::size_t = 8192; -pub const MINSIGSTKSZ: ::size_t = 2048; - pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; -#[doc(hidden)] -pub const AF_MAX: ::c_int = 42; -#[doc(hidden)] -pub const PF_MAX: ::c_int = AF_MAX; - cfg_if! { if #[cfg(any(target_arch = "x86"))] { mod x86; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs index fc5863d35c..fa103aacaa 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs @@ -1,81 +1,7 @@ pub type c_char = u8; pub type __u64 = ::c_ulonglong; -pub type wchar_t = u32; -pub type nlink_t = u32; -pub type blksize_t = ::c_int; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad0: ::c_ulong, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - __pad1: ::c_int, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused: [::c_uint; 2], - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad0: ::c_ulong, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - __pad1: ::c_int, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused: [::c_uint; 2], - } - - pub struct ipc_perm { - pub __ipc_perm_key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::mode_t, - pub __seq: ::c_ushort, - __unused1: ::c_ulong, - __unused2: ::c_ulong, - } -} pub const SYS_pivot_root: ::c_long = 41; pub const SYS_gettid: ::c_long = 178; pub const SYS_perf_event_open: ::c_long = 241; pub const SYS_memfd_create: ::c_long = 279; - -pub const O_DIRECT: ::c_int = 0x10000; -pub const O_DIRECTORY: ::c_int = 0x4000; -pub const O_LARGEFILE: ::c_int = 0x20000; -pub const O_NOFOLLOW: ::c_int = 0x8000; - -pub const MINSIGSTKSZ: ::size_t = 6144; -pub const SIGSTKSZ: ::size_t = 12288; - -#[doc(hidden)] -pub const PF_MAX: ::c_int = 43; -#[doc(hidden)] -pub const AF_MAX: ::c_int = PF_MAX; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/mod.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/mod.rs index 9a1243a171..cdce288b3b 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b64/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b64/mod.rs @@ -1,7 +1,51 @@ +pub type wchar_t = i32; pub type c_long = i64; pub type c_ulong = u64; +pub type nlink_t = u64; s! { + pub struct stat { + pub st_dev: ::dev_t, + pub st_ino: ::ino_t, + pub st_nlink: ::nlink_t, + pub st_mode: ::mode_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + __pad0: ::c_int, + pub st_rdev: ::dev_t, + pub st_size: ::off_t, + pub st_blksize: ::blksize_t, + pub st_blocks: ::blkcnt_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + __unused: [::c_long; 3], + } + + pub struct stat64 { + pub st_dev: ::dev_t, + pub st_ino: ::ino64_t, + pub st_nlink: ::nlink_t, + pub st_mode: ::mode_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + __pad0: ::c_int, + pub st_rdev: ::dev_t, + pub st_size: ::off_t, + pub st_blksize: ::blksize_t, + pub st_blocks: ::blkcnt64_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + __reserved: [::c_long; 3], + } + pub struct statfs64 { pub f_type: ::c_ulong, pub f_bsize: ::c_ulong, @@ -123,7 +167,11 @@ s! { pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const O_DIRECT: ::c_int = 0x4000; +pub const O_DIRECTORY: ::c_int = 0x10000; +pub const O_NOFOLLOW: ::c_int = 0x20000; pub const O_ASYNC: ::c_int = 0x2000; +pub const O_LARGEFILE: ::c_int = 0; pub const FIOCLEX: ::c_int = 0x5451; pub const FIONBIO: ::c_int = 0x5421; @@ -154,6 +202,7 @@ pub const MAP_NORESERVE: ::c_int = 0x04000; pub const MAP_POPULATE: ::c_int = 0x08000; pub const MAP_NONBLOCK: ::c_int = 0x010000; pub const MAP_STACK: ::c_int = 0x020000; +pub const MAP_32BIT: ::c_int = 0x0040; pub const SOCK_STREAM: ::c_int = 1; pub const SOCK_DGRAM: ::c_int = 2; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs index 5f31ab8962..4e8b9adbf7 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs @@ -1,80 +1,7 @@ pub type c_char = u8; -pub type wchar_t = i32; pub type __u64 = ::c_ulong; -pub type nlink_t = u64; -pub type blksize_t = ::c_long; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused: [::c_long; 3], - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino64_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __reserved: [::c_long; 3], - } - - pub struct ipc_perm { - pub __ipc_perm_key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::mode_t, - pub __seq: ::c_int, - __unused1: ::c_long, - __unused2: ::c_long - } -} pub const SYS_pivot_root: ::c_long = 203; pub const SYS_gettid: ::c_long = 207; pub const SYS_perf_event_open: ::c_long = 319; pub const SYS_memfd_create: ::c_long = 360; - -pub const MAP_32BIT: ::c_int = 0x0040; -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_LARGEFILE: ::c_int = 0; -pub const O_NOFOLLOW: ::c_int = 0x20000; - -pub const SIGSTKSZ: ::size_t = 8192; -pub const MINSIGSTKSZ: ::size_t = 2048; - -#[doc(hidden)] -pub const AF_MAX: ::c_int = 42; -#[doc(hidden)] -pub const PF_MAX: ::c_int = AF_MAX; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs index 78d38e49e8..f81efdc6d1 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs @@ -1,52 +1,7 @@ pub type c_char = i8; -pub type wchar_t = i32; -pub type nlink_t = u64; -pub type blksize_t = ::c_long; pub type __u64 = ::c_ulonglong; s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused: [::c_long; 3], - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino64_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __reserved: [::c_long; 3], - } - pub struct mcontext_t { __private: [u64; 32], } @@ -59,18 +14,6 @@ s! { pub uc_sigmask: ::sigset_t, __private: [u8; 512], } - - pub struct ipc_perm { - pub __ipc_perm_key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::mode_t, - pub __seq: ::c_int, - __unused1: ::c_long, - __unused2: ::c_long - } } // Syscall table @@ -435,17 +378,3 @@ pub const DS: ::c_int = 23; pub const ES: ::c_int = 24; pub const FS: ::c_int = 25; pub const GS: ::c_int = 26; - -pub const MAP_32BIT: ::c_int = 0x0040; -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_LARGEFILE: ::c_int = 0; -pub const O_NOFOLLOW: ::c_int = 0x20000; - -pub const SIGSTKSZ: ::size_t = 8192; -pub const MINSIGSTKSZ: ::size_t = 2048; - -#[doc(hidden)] -pub const AF_MAX: ::c_int = 42; -#[doc(hidden)] -pub const PF_MAX: ::c_int = AF_MAX; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/mod.rs b/src/liblibc/src/unix/notbsd/linux/musl/mod.rs index ab20d4c749..5156d2f249 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/mod.rs @@ -5,6 +5,7 @@ pub type ino_t = u64; pub type off_t = i64; pub type blkcnt_t = i64; +pub type blksize_t = c_long; pub type fsblkcnt_t = ::c_ulonglong; pub type fsfilcnt_t = ::c_ulonglong; pub type rlim_t = ::c_ulonglong; @@ -37,6 +38,18 @@ s! { pub sa_restorer: ::dox::Option, } + pub struct ipc_perm { + pub __ipc_perm_key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::mode_t, + pub __seq: ::c_int, + __unused1: ::c_long, + __unused2: ::c_long + } + pub struct termios { pub c_iflag: ::tcflag_t, pub c_oflag: ::tcflag_t, @@ -127,9 +140,6 @@ pub const RLIMIT_NLIMITS: ::c_int = 16; pub const MAP_ANONYMOUS: ::c_int = MAP_ANON; -pub const SOCK_DCCP: ::c_int = 6; -pub const SOCK_PACKET: ::c_int = 10; - pub const TCP_COOKIE_TRANSACTIONS: ::c_int = 15; pub const TCP_THIN_LINEAR_TIMEOUTS: ::c_int = 16; pub const TCP_THIN_DUPACK: ::c_int = 17; @@ -202,6 +212,8 @@ pub const CLOCK_TAI: ::clockid_t = 11; pub const MCL_CURRENT: ::c_int = 0x0001; pub const MCL_FUTURE: ::c_int = 0x0002; +pub const SIGSTKSZ: ::size_t = 8192; +pub const MINSIGSTKSZ: ::size_t = 2048; pub const CBAUD: ::tcflag_t = 0o0010017; pub const TAB1: ::c_int = 0x00000800; pub const TAB2: ::c_int = 0x00001000; diff --git a/src/liblibc/src/unix/notbsd/linux/other/mod.rs b/src/liblibc/src/unix/notbsd/linux/other/mod.rs index 7a58c181b1..7363ca61cb 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/mod.rs @@ -247,8 +247,6 @@ pub const EREMOTEIO: ::c_int = 121; pub const SOCK_STREAM: ::c_int = 1; pub const SOCK_DGRAM: ::c_int = 2; pub const SOCK_SEQPACKET: ::c_int = 5; -pub const SOCK_DCCP: ::c_int = 6; -pub const SOCK_PACKET: ::c_int = 10; pub const TCP_COOKIE_TRANSACTIONS: ::c_int = 15; pub const TCP_THIN_LINEAR_TIMEOUTS: ::c_int = 16; @@ -261,27 +259,6 @@ pub const TCP_REPAIR_OPTIONS: ::c_int = 22; pub const TCP_FASTOPEN: ::c_int = 23; pub const TCP_TIMESTAMP: ::c_int = 24; -/* DCCP socket options */ -pub const DCCP_SOCKOPT_PACKET_SIZE: ::c_int = 1; -pub const DCCP_SOCKOPT_SERVICE: ::c_int = 2; -pub const DCCP_SOCKOPT_CHANGE_L: ::c_int = 3; -pub const DCCP_SOCKOPT_CHANGE_R: ::c_int = 4; -pub const DCCP_SOCKOPT_GET_CUR_MPS: ::c_int = 5; -pub const DCCP_SOCKOPT_SERVER_TIMEWAIT: ::c_int = 6; -pub const DCCP_SOCKOPT_SEND_CSCOV: ::c_int = 10; -pub const DCCP_SOCKOPT_RECV_CSCOV: ::c_int = 11; -pub const DCCP_SOCKOPT_AVAILABLE_CCIDS: ::c_int = 12; -pub const DCCP_SOCKOPT_CCID: ::c_int = 13; -pub const DCCP_SOCKOPT_TX_CCID: ::c_int = 14; -pub const DCCP_SOCKOPT_RX_CCID: ::c_int = 15; -pub const DCCP_SOCKOPT_QPOLICY_ID: ::c_int = 16; -pub const DCCP_SOCKOPT_QPOLICY_TXQLEN: ::c_int = 17; -pub const DCCP_SOCKOPT_CCID_RX_INFO: ::c_int = 128; -pub const DCCP_SOCKOPT_CCID_TX_INFO: ::c_int = 192; - -/// maximum number of services provided on the same listening port -pub const DCCP_SERVICE_LIST_MAX_LEN: ::c_int = 32; - pub const SIGTTIN: ::c_int = 21; pub const SIGTTOU: ::c_int = 22; pub const SIGXCPU: ::c_int = 24; @@ -539,11 +516,6 @@ pub const TIOCM_DSR: ::c_int = 0x100; pub const TIOCM_CD: ::c_int = TIOCM_CAR; pub const TIOCM_RI: ::c_int = TIOCM_RNG; -#[doc(hidden)] -pub const AF_MAX: ::c_int = 42; -#[doc(hidden)] -pub const PF_MAX: ::c_int = AF_MAX; - cfg_if! { if #[cfg(any(target_arch = "arm", target_arch = "x86", target_arch = "x86_64"))] { diff --git a/src/liblibc/src/unix/notbsd/mod.rs b/src/liblibc/src/unix/notbsd/mod.rs index 07a6043032..859312e0ad 100644 --- a/src/liblibc/src/unix/notbsd/mod.rs +++ b/src/liblibc/src/unix/notbsd/mod.rs @@ -760,7 +760,6 @@ pub const SPLICE_F_MORE: ::c_uint = 0x04; pub const SPLICE_F_GIFT: ::c_uint = 0x08; pub const RTLD_LOCAL: ::c_int = 0; -pub const RTLD_LAZY: ::c_int = 1; pub const POSIX_FADV_NORMAL: ::c_int = 0; pub const POSIX_FADV_RANDOM: ::c_int = 1; @@ -794,12 +793,6 @@ pub const P_PGID: idtype_t = 2; pub const UTIME_OMIT: c_long = 1073741822; pub const UTIME_NOW: c_long = 1073741823; -pub const POLLIN: ::c_short = 0x1; -pub const POLLPRI: ::c_short = 0x2; -pub const POLLOUT: ::c_short = 0x4; -pub const POLLERR: ::c_short = 0x8; -pub const POLLHUP: ::c_short = 0x10; -pub const POLLNVAL: ::c_short = 0x20; pub const POLLRDNORM: ::c_short = 0x040; pub const POLLRDBAND: ::c_short = 0x080; @@ -908,8 +901,6 @@ extern { pub fn uselocale(loc: ::locale_t) -> ::locale_t; pub fn creat64(path: *const c_char, mode: mode_t) -> ::c_int; pub fn fstat64(fildes: ::c_int, buf: *mut stat64) -> ::c_int; - pub fn fstatat64(dirfd: ::c_int, pathname: *const c_char, - buf: *mut stat64, flags: ::c_int) -> ::c_int; pub fn ftruncate64(fd: ::c_int, length: off64_t) -> ::c_int; pub fn getrlimit64(resource: ::c_int, rlim: *mut rlimit64) -> ::c_int; pub fn lseek64(fd: ::c_int, offset: off64_t, whence: ::c_int) -> off64_t; @@ -927,16 +918,8 @@ extern { oflag: ::c_int, ...) -> ::c_int; pub fn pread64(fd: ::c_int, buf: *mut ::c_void, count: ::size_t, offset: off64_t) -> ::ssize_t; - pub fn preadv64(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int, - offset: ::off64_t) -> ::ssize_t; pub fn pwrite64(fd: ::c_int, buf: *const ::c_void, count: ::size_t, offset: off64_t) -> ::ssize_t; - pub fn pwritev64(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int, - offset: ::off64_t) -> ::ssize_t; pub fn readdir64(dirp: *mut ::DIR) -> *mut ::dirent64; pub fn readdir64_r(dirp: *mut ::DIR, entry: *mut ::dirent64, result: *mut *mut ::dirent64) -> ::c_int; diff --git a/src/liblibc/src/unix/solaris/mod.rs b/src/liblibc/src/unix/solaris/mod.rs index a6c2d4bf97..5edf4178ec 100644 --- a/src/liblibc/src/unix/solaris/mod.rs +++ b/src/liblibc/src/unix/solaris/mod.rs @@ -591,9 +591,6 @@ pub const WSTOPPED: ::c_int = WUNTRACED; pub const WCONTINUED: ::c_int = 0x08; pub const WNOWAIT: ::c_int = 0x80; -pub const AT_FDCWD: ::c_int = 0xffd19553; -pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x1000; - // Solaris defines a great many more of these; we only expose the // standardized ones. pub const P_PID: idtype_t = 0; @@ -769,13 +766,6 @@ pub const GLOB_NOSPACE : ::c_int = -2; pub const GLOB_ABORTED : ::c_int = -1; pub const GLOB_NOMATCH : ::c_int = -3; -pub const POLLIN: ::c_short = 0x1; -pub const POLLPRI: ::c_short = 0x2; -pub const POLLOUT: ::c_short = 0x4; -pub const POLLERR: ::c_short = 0x8; -pub const POLLHUP: ::c_short = 0x10; -pub const POLLNVAL: ::c_short = 0x20; - pub const POSIX_MADV_NORMAL: ::c_int = 0; pub const POSIX_MADV_RANDOM: ::c_int = 1; pub const POSIX_MADV_SEQUENTIAL: ::c_int = 2; @@ -1110,7 +1100,6 @@ pub const RTLD_DEFAULT: *mut ::c_void = -2isize as *mut ::c_void; pub const RTLD_SELF: *mut ::c_void = -3isize as *mut ::c_void; pub const RTLD_PROBE: *mut ::c_void = -4isize as *mut ::c_void; -pub const RTLD_LAZY: ::c_int = 0x1; pub const RTLD_NOW: ::c_int = 0x2; pub const RTLD_NOLOAD: ::c_int = 0x4; pub const RTLD_GLOBAL: ::c_int = 0x100; @@ -1267,8 +1256,6 @@ extern { addrlen: *mut ::socklen_t) -> ::ssize_t; pub fn mkstemps(template: *mut ::c_char, suffixlen: ::c_int) -> ::c_int; pub fn futimes(fd: ::c_int, times: *const ::timeval) -> ::c_int; - pub fn utimensat(dirfd: ::c_int, path: *const ::c_char, - times: *const ::timespec, flag: ::c_int) -> ::c_int; pub fn nl_langinfo(item: ::nl_item) -> *mut ::c_char; pub fn bind(socket: ::c_int, address: *const ::sockaddr, diff --git a/src/liblibc/src/unix/uclibc/mod.rs b/src/liblibc/src/unix/uclibc/mod.rs index 87521b2a81..28b4136350 100644 --- a/src/liblibc/src/unix/uclibc/mod.rs +++ b/src/liblibc/src/unix/uclibc/mod.rs @@ -928,7 +928,6 @@ pub const SPLICE_F_MORE: ::c_uint = 0x04; pub const SPLICE_F_GIFT: ::c_uint = 0x08; pub const RTLD_LOCAL: ::c_int = 0; -pub const RTLD_LAZY: ::c_int = 1; pub const POSIX_FADV_NORMAL: ::c_int = 0; pub const POSIX_FADV_RANDOM: ::c_int = 1; @@ -945,13 +944,6 @@ pub const LOG_AUTHPRIV: ::c_int = 10 << 3; pub const LOG_FTP: ::c_int = 11 << 3; pub const LOG_PERROR: ::c_int = 0x20; -pub const POLLIN: ::c_short = 0x1; -pub const POLLPRI: ::c_short = 0x2; -pub const POLLOUT: ::c_short = 0x4; -pub const POLLERR: ::c_short = 0x8; -pub const POLLHUP: ::c_short = 0x10; -pub const POLLNVAL: ::c_short = 0x20; - pub const PIPE_BUF: usize = 4096; pub const SI_LOAD_SHIFT: ::c_uint = 16; diff --git a/src/libproc_macro/Cargo.toml b/src/libproc_macro/Cargo.toml index 1b5141773a..cfd83e348a 100644 --- a/src/libproc_macro/Cargo.toml +++ b/src/libproc_macro/Cargo.toml @@ -10,3 +10,4 @@ crate-type = ["dylib"] [dependencies] syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } +rustc_errors = { path = "../librustc_errors" } diff --git a/src/libproc_macro/diagnostic.rs b/src/libproc_macro/diagnostic.rs new file mode 100644 index 0000000000..c39aec896e --- /dev/null +++ b/src/libproc_macro/diagnostic.rs @@ -0,0 +1,134 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use Span; + +use rustc_errors as rustc; + +/// An enum representing a diagnostic level. +#[unstable(feature = "proc_macro", issue = "38356")] +#[derive(Copy, Clone, Debug)] +pub enum Level { + /// An error. + Error, + /// A warning. + Warning, + /// A note. + Note, + /// A help message. + Help, + #[doc(hidden)] + __Nonexhaustive, +} + +/// A structure representing a diagnostic message and associated children +/// messages. +#[unstable(feature = "proc_macro", issue = "38356")] +#[derive(Clone, Debug)] +pub struct Diagnostic { + level: Level, + message: String, + span: Option, + children: Vec +} + +macro_rules! diagnostic_child_methods { + ($spanned:ident, $regular:ident, $level:expr) => ( + /// Add a new child diagnostic message to `self` with the level + /// identified by this methods name with the given `span` and `message`. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn $spanned>(mut self, span: Span, message: T) -> Diagnostic { + self.children.push(Diagnostic::spanned(span, $level, message)); + self + } + + /// Add a new child diagnostic message to `self` with the level + /// identified by this method's name with the given `message`. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn $regular>(mut self, message: T) -> Diagnostic { + self.children.push(Diagnostic::new($level, message)); + self + } + ) +} + +impl Diagnostic { + /// Create a new diagnostic with the given `level` and `message`. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn new>(level: Level, message: T) -> Diagnostic { + Diagnostic { + level: level, + message: message.into(), + span: None, + children: vec![] + } + } + + /// Create a new diagnostic with the given `level` and `message` pointing to + /// the given `span`. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn spanned>(span: Span, level: Level, message: T) -> Diagnostic { + Diagnostic { + level: level, + message: message.into(), + span: Some(span), + children: vec![] + } + } + + diagnostic_child_methods!(span_error, error, Level::Error); + diagnostic_child_methods!(span_warning, warning, Level::Warning); + diagnostic_child_methods!(span_note, note, Level::Note); + diagnostic_child_methods!(span_help, help, Level::Help); + + /// Returns the diagnostic `level` for `self`. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn level(&self) -> Level { + self.level + } + + /// Emit the diagnostic. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn emit(self) { + ::__internal::with_sess(move |(sess, _)| { + let handler = &sess.span_diagnostic; + let level = __internal::level_to_internal_level(self.level); + let mut diag = rustc::DiagnosticBuilder::new(handler, level, &*self.message); + + if let Some(span) = self.span { + diag.set_span(span.0); + } + + for child in self.children { + let span = child.span.map(|s| s.0); + let level = __internal::level_to_internal_level(child.level); + diag.sub(level, &*child.message, span); + } + + diag.emit(); + }); + } +} + +#[unstable(feature = "proc_macro_internals", issue = "27812")] +#[doc(hidden)] +pub mod __internal { + use super::{Level, rustc}; + + pub fn level_to_internal_level(level: Level) -> rustc::Level { + match level { + Level::Error => rustc::Level::Error, + Level::Warning => rustc::Level::Warning, + Level::Note => rustc::Level::Note, + Level::Help => rustc::Level::Help, + Level::__Nonexhaustive => unreachable!("Level::__Nonexhaustive") + } + } +} diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs index 3f425c24a9..e6307f10c1 100644 --- a/src/libproc_macro/lib.rs +++ b/src/libproc_macro/lib.rs @@ -42,18 +42,24 @@ #[macro_use] extern crate syntax; extern crate syntax_pos; +extern crate rustc_errors; + +mod diagnostic; + +#[unstable(feature = "proc_macro", issue = "38356")] +pub use diagnostic::{Diagnostic, Level}; use std::{ascii, fmt, iter}; +use std::rc::Rc; use std::str::FromStr; use syntax::ast; use syntax::errors::DiagnosticBuilder; -use syntax::parse::{self, token, parse_stream_from_source_str}; -use syntax::print::pprust; +use syntax::parse::{self, token}; use syntax::symbol::Symbol; use syntax::tokenstream; use syntax_pos::DUMMY_SP; -use syntax_pos::SyntaxContext; +use syntax_pos::{FileMap, Pos, SyntaxContext}; use syntax_pos::hygiene::Mark; /// The main type provided by this crate, representing an abstract stream of @@ -89,10 +95,7 @@ impl FromStr for TokenStream { // notify the expansion info that it is unhygienic let mark = Mark::fresh(mark); mark.set_expn_info(expn_info); - let span = syntax_pos::Span { - ctxt: SyntaxContext::empty().apply_mark(mark), - ..call_site - }; + let span = call_site.with_ctxt(SyntaxContext::empty().apply_mark(mark)); let stream = parse::parse_stream_from_source_str(name, src, sess, Some(span)); Ok(__internal::token_stream_wrap(stream)) }) @@ -171,16 +174,16 @@ impl TokenStream { /// A region of source code, along with macro expansion information. #[unstable(feature = "proc_macro", issue = "38356")] -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct Span(syntax_pos::Span); #[unstable(feature = "proc_macro", issue = "38356")] impl Default for Span { fn default() -> Span { - ::__internal::with_sess(|(_, mark)| Span(syntax_pos::Span { - ctxt: SyntaxContext::empty().apply_mark(mark), - ..mark.expn_info().unwrap().call_site - })) + ::__internal::with_sess(|(_, mark)| { + let call_site = mark.expn_info().unwrap().call_site; + Span(call_site.with_ctxt(SyntaxContext::empty().apply_mark(mark))) + }) } } @@ -191,12 +194,148 @@ pub fn quote_span(span: Span) -> TokenStream { TokenStream(quote::Quote::quote(&span.0)) } +macro_rules! diagnostic_method { + ($name:ident, $level:expr) => ( + /// Create a new `Diagnostic` with the given `message` at the span + /// `self`. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn $name>(self, message: T) -> Diagnostic { + Diagnostic::spanned(self, $level, message) + } + ) +} + impl Span { /// The span of the invocation of the current procedural macro. #[unstable(feature = "proc_macro", issue = "38356")] pub fn call_site() -> Span { ::__internal::with_sess(|(_, mark)| Span(mark.expn_info().unwrap().call_site)) } + + /// The original source file into which this span points. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn source_file(&self) -> SourceFile { + SourceFile { + filemap: __internal::lookup_char_pos(self.0.lo()).file, + } + } + + /// Get the starting line/column in the source file for this span. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn start(&self) -> LineColumn { + let loc = __internal::lookup_char_pos(self.0.lo()); + LineColumn { + line: loc.line, + column: loc.col.to_usize() + } + } + + /// Get the ending line/column in the source file for this span. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn end(&self) -> LineColumn { + let loc = __internal::lookup_char_pos(self.0.hi()); + LineColumn { + line: loc.line, + column: loc.col.to_usize() + } + } + + /// Create a new span encompassing `self` and `other`. + /// + /// Returns `None` if `self` and `other` are from different files. + #[unstable(feature = "proc_macro", issue = "38356")] + pub fn join(&self, other: Span) -> Option { + let self_loc = __internal::lookup_char_pos(self.0.lo()); + let other_loc = __internal::lookup_char_pos(self.0.lo()); + + if self_loc.file.name != other_loc.file.name { return None } + + Some(Span(self.0.to(other.0))) + } + + diagnostic_method!(error, Level::Error); + diagnostic_method!(warning, Level::Warning); + diagnostic_method!(note, Level::Note); + diagnostic_method!(help, Level::Help); +} + +/// A line-column pair representing the start or end of a `Span`. +#[unstable(feature = "proc_macro", issue = "38356")] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct LineColumn { + /// The 1-indexed line in the source file on which the span starts or ends (inclusive). + line: usize, + /// The 0-indexed column (in UTF-8 characters) in the source file on which + /// the span starts or ends (inclusive). + column: usize +} + +/// The source file of a given `Span`. +#[unstable(feature = "proc_macro", issue = "38356")] +#[derive(Clone)] +pub struct SourceFile { + filemap: Rc, +} + +impl SourceFile { + /// Get the path to this source file as a string. + /// + /// ### Note + /// If the code span associated with this `SourceFile` was generated by an external macro, this + /// may not be an actual path on the filesystem. Use [`is_real`] to check. + /// + /// Also note that even if `is_real` returns `true`, if `-Z remap-path-prefix-*` was passed on + /// the command line, the path as given may not actually be valid. + /// + /// [`is_real`]: #method.is_real + # [unstable(feature = "proc_macro", issue = "38356")] + pub fn as_str(&self) -> &str { + &self.filemap.name + } + + /// Returns `true` if this source file is a real source file, and not generated by an external + /// macro's expansion. + # [unstable(feature = "proc_macro", issue = "38356")] + pub fn is_real(&self) -> bool { + // This is a hack until intercrate spans are implemented and we can have real source files + // for spans generated in external macros. + // https://github.com/rust-lang/rust/pull/43604#issuecomment-333334368 + self.filemap.is_real_file() + } +} + +#[unstable(feature = "proc_macro", issue = "38356")] +impl AsRef for SourceFile { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +#[unstable(feature = "proc_macro", issue = "38356")] +impl fmt::Debug for SourceFile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SourceFile") + .field("path", &self.as_str()) + .field("is_real", &self.is_real()) + .finish() + } +} + +#[unstable(feature = "proc_macro", issue = "38356")] +impl PartialEq for SourceFile { + fn eq(&self, other: &Self) -> bool { + Rc::ptr_eq(&self.filemap, &other.filemap) + } +} + +#[unstable(feature = "proc_macro", issue = "38356")] +impl Eq for SourceFile {} + +#[unstable(feature = "proc_macro", issue = "38356")] +impl PartialEq for SourceFile { + fn eq(&self, other: &str) -> bool { + self.as_ref() == other + } } /// A single token or a delimited sequence of token trees (e.g. `[1, (), ..]`). @@ -491,6 +630,7 @@ impl TokenTree { Dot => op!('.'), DotDot => joint!('.', Dot), DotDotDot => joint!('.', DotDot), + DotDotEq => joint!('.', DotEq), Comma => op!(','), Semi => op!(';'), Colon => op!(':'), @@ -506,50 +646,14 @@ impl TokenTree { Ident(ident) | Lifetime(ident) => TokenNode::Term(Term(ident.name)), Literal(..) | DocComment(..) => TokenNode::Literal(self::Literal(token)), - Interpolated(ref nt) => { - // An `Interpolated` token means that we have a `Nonterminal` - // which is often a parsed AST item. At this point we now need - // to convert the parsed AST to an actual token stream, e.g. - // un-parse it basically. - // - // Unfortunately there's not really a great way to do that in a - // guaranteed lossless fashion right now. The fallback here is - // to just stringify the AST node and reparse it, but this loses - // all span information. - // - // As a result, some AST nodes are annotated with the token - // stream they came from. Attempt to extract these lossless - // token streams before we fall back to the stringification. - let mut tokens = None; - - match nt.0 { - Nonterminal::NtItem(ref item) => { - tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span); - } - Nonterminal::NtTraitItem(ref item) => { - tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span); - } - Nonterminal::NtImplItem(ref item) => { - tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span); - } - _ => {} - } - - tokens.map(|tokens| { - TokenNode::Group(Delimiter::None, - TokenStream(tokens.clone())) - }).unwrap_or_else(|| { - __internal::with_sess(|(sess, _)| { - TokenNode::Group(Delimiter::None, TokenStream(nt.1.force(|| { - // FIXME(jseyfried): Avoid this pretty-print + reparse hack - let name = "".to_owned(); - let source = pprust::token_to_string(&token); - parse_stream_from_source_str(name, source, sess, Some(span)) - }))) - }) + Interpolated(_) => { + __internal::with_sess(|(sess, _)| { + let tts = token.interpolated_to_tokenstream(sess, span); + TokenNode::Group(Delimiter::None, TokenStream(tts)) }) } + DotEq => unreachable!(), OpenDelim(..) | CloseDelim(..) => unreachable!(), Whitespace | Comment | Shebang(..) | Eof => unreachable!(), }; @@ -570,7 +674,7 @@ impl TokenTree { }).into(); }, TokenNode::Term(symbol) => { - let ident = ast::Ident { name: symbol.0, ctxt: self.span.0.ctxt }; + let ident = ast::Ident { name: symbol.0, ctxt: self.span.0.ctxt() }; let token = if symbol.0.as_str().starts_with("'") { Lifetime(ident) } else { Ident(ident) }; return TokenTree::Token(self.span.0, token).into(); @@ -612,34 +716,6 @@ impl TokenTree { } } -fn prepend_attrs(attrs: &[ast::Attribute], - tokens: Option<&tokenstream::TokenStream>, - span: syntax_pos::Span) - -> Option -{ - let tokens = match tokens { - Some(tokens) => tokens, - None => return None, - }; - if attrs.len() == 0 { - return Some(tokens.clone()) - } - let mut builder = tokenstream::TokenStreamBuilder::new(); - for attr in attrs { - assert_eq!(attr.style, ast::AttrStyle::Outer, - "inner attributes should prevent cached tokens from existing"); - let stream = __internal::with_sess(|(sess, _)| { - // FIXME: Avoid this pretty-print + reparse hack as bove - let name = "".to_owned(); - let source = pprust::attr_to_string(attr); - parse_stream_from_source_str(name, source, sess, Some(span)) - }); - builder.push(stream); - } - builder.push(tokens.clone()); - Some(builder.build()) -} - /// Permanently unstable internal implementation details of this crate. This /// should not be used. /// @@ -663,10 +739,14 @@ pub mod __internal { use syntax::parse::{self, ParseSess}; use syntax::parse::token::{self, Token}; use syntax::tokenstream; - use syntax_pos::DUMMY_SP; + use syntax_pos::{BytePos, Loc, DUMMY_SP}; use super::{TokenStream, LexError}; + pub fn lookup_char_pos(pos: BytePos) -> Loc { + with_sess(|(sess, _)| sess.codemap().lookup_char_pos(pos)) + } + pub fn new_token_stream(item: P) -> TokenStream { let token = Token::interpolated(token::NtItem(item)); TokenStream(tokenstream::TokenTree::Token(DUMMY_SP, token).into()) diff --git a/src/libproc_macro/quote.rs b/src/libproc_macro/quote.rs index 0db2b86b15..8c1f6bfc11 100644 --- a/src/libproc_macro/quote.rs +++ b/src/libproc_macro/quote.rs @@ -202,8 +202,8 @@ impl Quote for Token { gen_match! { Eq, Lt, Le, EqEq, Ne, Ge, Gt, AndAnd, OrOr, Not, Tilde, At, Dot, DotDot, DotDotDot, - Comma, Semi, Colon, ModSep, RArrow, LArrow, FatArrow, Pound, Dollar, Question, - Underscore; + DotDotEq, Comma, Semi, Colon, ModSep, RArrow, LArrow, FatArrow, Pound, Dollar, + Question, Underscore; Token::OpenDelim(delim) => quote!(rt::token::OpenDelim((quote delim))), Token::CloseDelim(delim) => quote!(rt::token::CloseDelim((quote delim))), diff --git a/src/libprofiler_builtins/Cargo.toml b/src/libprofiler_builtins/Cargo.toml index a60db31367..eb31f5730d 100644 --- a/src/libprofiler_builtins/Cargo.toml +++ b/src/libprofiler_builtins/Cargo.toml @@ -15,4 +15,4 @@ doc = false core = { path = "../libcore" } [build-dependencies] -gcc = "0.3.50" +cc = "1.0" diff --git a/src/libprofiler_builtins/build.rs b/src/libprofiler_builtins/build.rs index 55df14ea21..8508b2dae2 100644 --- a/src/libprofiler_builtins/build.rs +++ b/src/libprofiler_builtins/build.rs @@ -12,14 +12,14 @@ //! //! See the build.rs for libcompiler_builtins crate for details. -extern crate gcc; +extern crate cc; use std::env; use std::path::Path; fn main() { let target = env::var("TARGET").expect("TARGET was not set"); - let cfg = &mut gcc::Config::new(); + let cfg = &mut cc::Build::new(); let mut profile_sources = vec!["GCDAProfiling.c", "InstrProfiling.c", diff --git a/src/librand/isaac.rs b/src/librand/isaac.rs index 96ce0905e3..17ecf79dae 100644 --- a/src/librand/isaac.rs +++ b/src/librand/isaac.rs @@ -543,7 +543,8 @@ impl Clone for Isaac64Rng { } impl Rng for Isaac64Rng { - // FIXME #7771: having next_u32 like this should be unnecessary + // FIXME(https://github.com/rust-lang/rfcs/issues/628) + // having next_u32 like this should be unnecessary #[inline] fn next_u32(&mut self) -> u32 { self.next_u64() as u32 diff --git a/src/librand/lib.rs b/src/librand/lib.rs index 90b3020fff..82344f0ec0 100644 --- a/src/librand/lib.rs +++ b/src/librand/lib.rs @@ -115,7 +115,8 @@ pub trait Rng: Sized { /// /// This rarely needs to be called directly, prefer `r.gen()` to /// `r.next_u32()`. - // FIXME #7771: Should be implemented in terms of next_u64 + // FIXME(https://github.com/rust-lang/rfcs/issues/628) + // Should be implemented in terms of next_u64 fn next_u32(&mut self) -> u32; /// Return the next random u64. diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index 89169548bb..0b62e1bd5a 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -10,13 +10,13 @@ crate-type = ["dylib"] [dependencies] arena = { path = "../libarena" } +bitflags = "1.0" fmt_macros = { path = "../libfmt_macros" } graphviz = { path = "../libgraphviz" } jobserver = "0.1" log = "0.3" owning_ref = "0.3.3" rustc_back = { path = "../librustc_back" } -rustc_bitflags = { path = "../librustc_bitflags" } rustc_const_math = { path = "../librustc_const_math" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } diff --git a/src/librustc/README.md b/src/librustc/README.md index c24d3d82b2..3ac2949b83 100644 --- a/src/librustc/README.md +++ b/src/librustc/README.md @@ -13,162 +13,191 @@ https://github.com/rust-lang/rust/issues Your concerns are probably the same as someone else's. +You may also be interested in the +[Rust Forge](https://forge.rust-lang.org/), which includes a number of +interesting bits of information. + +Finally, at the end of this file is a GLOSSARY defining a number of +common (and not necessarily obvious!) names that are used in the Rust +compiler code. If you see some funky name and you'd like to know what +it stands for, check there! + The crates of rustc =================== -Rustc consists of a number of crates, including `libsyntax`, -`librustc`, `librustc_back`, `librustc_trans`, and `librustc_driver` -(the names and divisions are not set in stone and may change; -in general, a finer-grained division of crates is preferable): - -- [`libsyntax`][libsyntax] contains those things concerned purely with syntax – - that is, the AST, parser, pretty-printer, lexer, macro expander, and - utilities for traversing ASTs – are in a separate crate called - "syntax", whose files are in `./../libsyntax`, where `.` is the - current directory (that is, the parent directory of front/, middle/, - back/, and so on). - -- `librustc` (the current directory) contains the high-level analysis - passes, such as the type checker, borrow checker, and so forth. - It is the heart of the compiler. - -- [`librustc_back`][back] contains some very low-level details that are - specific to different LLVM targets and so forth. - -- [`librustc_trans`][trans] contains the code to convert from Rust IR into LLVM - IR, and then from LLVM IR into machine code, as well as the main - driver that orchestrates all the other passes and various other bits - of miscellany. In general it contains code that runs towards the - end of the compilation process. - -- [`librustc_driver`][driver] invokes the compiler from - [`libsyntax`][libsyntax], then the analysis phases from `librustc`, and - finally the lowering and codegen passes from [`librustc_trans`][trans]. - -Roughly speaking the "order" of the three crates is as follows: - - librustc_driver - | - +-----------------+-------------------+ - | | - libsyntax -> librustc -> librustc_trans - - -The compiler process: -===================== - -The Rust compiler is comprised of six main compilation phases. - -1. Parsing input -2. Configuration & expanding (cfg rules & syntax extension expansion) -3. Running analysis passes -4. Translation to LLVM -5. LLVM passes -6. Linking - -Phase one is responsible for parsing & lexing the input to the compiler. The -output of this phase is an abstract syntax tree (AST). The AST at this point -includes all macro uses & attributes. This means code which will be later -expanded and/or removed due to `cfg` attributes is still present in this -version of the AST. Parsing abstracts away details about individual files which -have been read into the AST. - -Phase two handles configuration and macro expansion. You can think of this -phase as a function acting on the AST from the previous phase. The input for -this phase is the unexpanded AST from phase one, and the output is an expanded -version of the same AST. This phase will expand all macros & syntax -extensions and will evaluate all `cfg` attributes, potentially removing some -code. The resulting AST will not contain any macros or `macro_use` statements. - -The code for these first two phases is in [`libsyntax`][libsyntax]. - -After this phase, the compiler allocates IDs to each node in the AST -(technically not every node, but most of them). If we are writing out -dependencies, that happens now. - -The third phase is analysis. This is the most complex phase in the compiler, -and makes up much of the code. This phase included name resolution, type -checking, borrow checking, type & lifetime inference, trait selection, method -selection, linting and so on. Most of the error detection in the compiler comes -from this phase (with the exception of parse errors which arise during -parsing). The "output" of this phase is a set of side tables containing -semantic information about the source program. The analysis code is in -[`librustc`][rustc] and some other crates with the `librustc_` prefix. - -The fourth phase is translation. This phase translates the AST (and the side -tables from the previous phase) into LLVM IR (intermediate representation). -This is achieved by calling into the LLVM libraries. The code for this is in -[`librustc_trans`][trans]. - -Phase five runs the LLVM backend. This runs LLVM's optimization passes on the -generated IR and generates machine code resulting in object files. This phase -is not really part of the Rust compiler, as LLVM carries out all the work. -The interface between LLVM and Rust is in [`librustc_llvm`][llvm]. - -The final phase, phase six, links the object files into an executable. This is -again outsourced to other tools and not performed by the Rust compiler -directly. The interface is in [`librustc_back`][back] (which also contains some -things used primarily during translation). - -A module called the driver coordinates all these phases. It handles all the -highest level coordination of compilation from parsing command line arguments -all the way to invoking the linker to produce an executable. - -Modules in the librustc crate -============================= - -The librustc crate itself consists of the following submodules -(mostly, but not entirely, in their own directories): - -- session: options and data that pertain to the compilation session as - a whole -- middle: middle-end: name resolution, typechecking, LLVM code - generation -- metadata: encoder and decoder for data required by separate - compilation -- plugin: infrastructure for compiler plugins -- lint: infrastructure for compiler warnings -- util: ubiquitous types and helper functions -- lib: bindings to LLVM - -The entry-point for the compiler is main() in the [`librustc_driver`][driver] -crate. - -The 3 central data structures: ------------------------------- - -1. `./../libsyntax/ast.rs` defines the AST. The AST is treated as - immutable after parsing, but it depends on mutable context data - structures (mainly hash maps) to give it meaning. - - - Many – though not all – nodes within this data structure are - wrapped in the type `spanned`, meaning that the front-end has - marked the input coordinates of that node. The member `node` is - the data itself, the member `span` is the input location (file, - line, column; both low and high). - - - Many other nodes within this data structure carry a - `def_id`. These nodes represent the 'target' of some name - reference elsewhere in the tree. When the AST is resolved, by - `middle/resolve.rs`, all names wind up acquiring a def that they - point to. So anything that can be pointed-to by a name winds - up with a `def_id`. - -2. `middle/ty.rs` defines the datatype `sty`. This is the type that - represents types after they have been resolved and normalized by - the middle-end. The typeck phase converts every ast type to a - `ty::sty`, and the latter is used to drive later phases of - compilation. Most variants in the `ast::ty` tag have a - corresponding variant in the `ty::sty` tag. - -3. `./../librustc_llvm/lib.rs` defines the exported types - `ValueRef`, `TypeRef`, `BasicBlockRef`, and several others. - Each of these is an opaque pointer to an LLVM type, - manipulated through the `lib::llvm` interface. - -[libsyntax]: https://github.com/rust-lang/rust/tree/master/src/libsyntax/ -[trans]: https://github.com/rust-lang/rust/tree/master/src/librustc_trans/ -[llvm]: https://github.com/rust-lang/rust/tree/master/src/librustc_llvm/ -[back]: https://github.com/rust-lang/rust/tree/master/src/librustc_back/ -[rustc]: https://github.com/rust-lang/rust/tree/master/src/librustc/ -[driver]: https://github.com/rust-lang/rust/tree/master/src/librustc_driver +Rustc consists of a number of crates, including `syntax`, +`rustc`, `rustc_back`, `rustc_trans`, `rustc_driver`, and +many more. The source for each crate can be found in a directory +like `src/libXXX`, where `XXX` is the crate name. + +(NB. The names and divisions of these crates are not set in +stone and may change over time -- for the time being, we tend towards +a finer-grained division to help with compilation time, though as +incremental improves that may change.) + +The dependency structure of these crates is roughly a diamond: + +``` + rustc_driver + / | \ + / | \ + / | \ + / v \ +rustc_trans rustc_borrowck ... rustc_metadata + \ | / + \ | / + \ | / + \ v / + rustc + | + v + syntax + / \ + / \ + syntax_pos syntax_ext +``` + +The `rustc_driver` crate, at the top of this lattice, is effectively +the "main" function for the rust compiler. It doesn't have much "real +code", but instead ties together all of the code defined in the other +crates and defines the overall flow of execution. (As we transition +more and more to the [query model](ty/maps/README.md), however, the +"flow" of compilation is becoming less centrally defined.) + +At the other extreme, the `rustc` crate defines the common and +pervasive data structures that all the rest of the compiler uses +(e.g., how to represent types, traits, and the program itself). It +also contains some amount of the compiler itself, although that is +relatively limited. + +Finally, all the crates in the bulge in the middle define the bulk of +the compiler -- they all depend on `rustc`, so that they can make use +of the various types defined there, and they export public routines +that `rustc_driver` will invoke as needed (more and more, what these +crates export are "query definitions", but those are covered later +on). + +Below `rustc` lie various crates that make up the parser and error +reporting mechanism. For historical reasons, these crates do not have +the `rustc_` prefix, but they are really just as much an internal part +of the compiler and not intended to be stable (though they do wind up +getting used by some crates in the wild; a practice we hope to +gradually phase out). + +Each crate has a `README.md` file that describes, at a high-level, +what it contains, and tries to give some kind of explanation (some +better than others). + +The compiler process +==================== + +The Rust compiler is in a bit of transition right now. It used to be a +purely "pass-based" compiler, where we ran a number of passes over the +entire program, and each did a particular check of transformation. + +We are gradually replacing this pass-based code with an alternative +setup based on on-demand **queries**. In the query-model, we work +backwards, executing a *query* that expresses our ultimate goal (e.g., +"compiler this crate"). This query in turn may make other queries +(e.g., "get me a list of all modules in the crate"). Those queries +make other queries that ultimately bottom out in the base operations, +like parsing the input, running the type-checker, and so forth. This +on-demand model permits us to do exciting things like only do the +minimal amount of work needed to type-check a single function. It also +helps with incremental compilation. (For details on defining queries, +check out `src/librustc/ty/maps/README.md`.) + +Regardless of the general setup, the basic operations that the +compiler must perform are the same. The only thing that changes is +whether these operations are invoked front-to-back, or on demand. In +order to compile a Rust crate, these are the general steps that we +take: + +1. **Parsing input** + - this processes the `.rs` files and produces the AST ("abstract syntax tree") + - the AST is defined in `syntax/ast.rs`. It is intended to match the lexical + syntax of the Rust language quite closely. +2. **Name resolution, macro expansion, and configuration** + - once parsing is complete, we process the AST recursively, resolving paths + and expanding macros. This same process also processes `#[cfg]` nodes, and hence + may strip things out of the AST as well. +3. **Lowering to HIR** + - Once name resolution completes, we convert the AST into the HIR, + or "high-level IR". The HIR is defined in `src/librustc/hir/`; that module also includes + the lowering code. + - The HIR is a lightly desugared variant of the AST. It is more processed than the + AST and more suitable for the analyses that follow. It is **not** required to match + the syntax of the Rust language. + - As a simple example, in the **AST**, we preserve the parentheses + that the user wrote, so `((1 + 2) + 3)` and `1 + 2 + 3` parse + into distinct trees, even though they are equivalent. In the + HIR, however, parentheses nodes are removed, and those two + expressions are represented in the same way. +3. **Type-checking and subsequent analyses** + - An important step in processing the HIR is to perform type + checking. This process assigns types to every HIR expression, + for example, and also is responsible for resolving some + "type-dependent" paths, such as field accesses (`x.f` -- we + can't know what field `f` is being accessed until we know the + type of `x`) and associated type references (`T::Item` -- we + can't know what type `Item` is until we know what `T` is). + - Type checking creates "side-tables" (`TypeckTables`) that include + the types of expressions, the way to resolve methods, and so forth. + - After type-checking, we can do other analyses, such as privacy checking. +4. **Lowering to MIR and post-processing** + - Once type-checking is done, we can lower the HIR into MIR ("middle IR"), which + is a **very** desugared version of Rust, well suited to the borrowck but also + certain high-level optimizations. +5. **Translation to LLVM and LLVM optimizations** + - From MIR, we can produce LLVM IR. + - LLVM then runs its various optimizations, which produces a number of `.o` files + (one for each "codegen unit"). +6. **Linking** + - Finally, those `.o` files are linked together. + +Glossary +======== + +The compiler uses a number of...idiosyncratic abbreviations and +things. This glossary attempts to list them and give you a few +pointers for understanding them better. + +- AST -- the **abstract syntax tree** produced the `syntax` crate; reflects user syntax + very closely. +- codegen unit -- when we produce LLVM IR, we group the Rust code into a number of codegen + units. Each of these units is processed by LLVM independently from one another, + enabling parallelism. They are also the unit of incremental re-use. +- cx -- we tend to use "cx" as an abbrevation for context. See also tcx, infcx, etc. +- `DefId` -- an index identifying a **definition** (see `librustc/hir/def_id.rs`). Uniquely + identifies a `DefPath`. +- HIR -- the **High-level IR**, created by lowering and desugaring the AST. See `librustc/hir`. +- `HirId` -- identifies a particular node in the HIR by combining a + def-id with an "intra-definition offset". +- `'gcx` -- the lifetime of the global arena (see `librustc/ty`). +- generics -- the set of generic type parameters defined on a type or item +- ICE -- internal compiler error. When the compiler crashes. +- infcx -- the inference context (see `librustc/infer`) +- MIR -- the **Mid-level IR** that is created after type-checking for use by borrowck and trans. + Defined in the `src/librustc/mir/` module, but much of the code that manipulates it is + found in `src/librustc_mir`. +- obligation -- something that must be proven by the trait system; see `librustc/traits`. +- local crate -- the crate currently being compiled. +- node-id or `NodeId` -- an index identifying a particular node in the + AST or HIR; gradually being phased out and replaced with `HirId`. +- query -- perhaps some sub-computation during compilation; see `librustc/maps`. +- provider -- the function that executes a query; see `librustc/maps`. +- sess -- the **compiler session**, which stores global data used throughout compilation +- side tables -- because the AST and HIR are immutable once created, we often carry extra + information about them in the form of hashtables, indexed by the id of a particular node. +- span -- a location in the user's source code, used for error + reporting primarily. These are like a file-name/line-number/column + tuple on steroids: they carry a start/end point, and also track + macro expansions and compiler desugaring. All while being packed + into a few bytes (really, it's an index into a table). See the + `Span` datatype for more. +- substs -- the **substitutions** for a given generic type or item + (e.g., the `i32, u32` in `HashMap`) +- tcx -- the "typing context", main data structure of the compiler (see `librustc/ty`). +- trans -- the code to **translate** MIR into LLVM IR. +- trait reference -- a trait and values for its type parameters (see `librustc/ty`). +- ty -- the internal representation of a **type** (see `librustc/ty`). diff --git a/src/libcore/benches/mem.rs b/src/librustc/benches/dispatch.rs similarity index 65% rename from src/libcore/benches/mem.rs rename to src/librustc/benches/dispatch.rs index 8e541d92a7..63e74778fb 100644 --- a/src/libcore/benches/mem.rs +++ b/src/librustc/benches/dispatch.rs @@ -10,8 +10,6 @@ use test::Bencher; -// FIXME #13642 (these benchmarks should be in another place) -// Completely miscellaneous language-construct benchmarks. // Static/dynamic method dispatch struct Struct { @@ -44,27 +42,3 @@ fn trait_static_method_call(b: &mut Bencher) { s.method() }); } - -// Overhead of various match forms - -#[bench] -fn match_option_some(b: &mut Bencher) { - let x = Some(10); - b.iter(|| { - match x { - Some(y) => y, - None => 11 - } - }); -} - -#[bench] -fn match_vec_pattern(b: &mut Bencher) { - let x = [1,2,3,4,5,6]; - b.iter(|| { - match x { - [1,2,3,..] => 10, - _ => 11, - } - }); -} diff --git a/src/librustc/benches/lib.rs b/src/librustc/benches/lib.rs new file mode 100644 index 0000000000..24294ec49c --- /dev/null +++ b/src/librustc/benches/lib.rs @@ -0,0 +1,19 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![deny(warnings)] + +#![feature(slice_patterns)] +#![feature(test)] + +extern crate test; + +mod dispatch; +mod pattern; diff --git a/src/librustc/benches/pattern.rs b/src/librustc/benches/pattern.rs new file mode 100644 index 0000000000..638b1ce3f7 --- /dev/null +++ b/src/librustc/benches/pattern.rs @@ -0,0 +1,35 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use test::Bencher; + +// Overhead of various match forms + +#[bench] +fn option_some(b: &mut Bencher) { + let x = Some(10); + b.iter(|| { + match x { + Some(y) => y, + None => 11 + } + }); +} + +#[bench] +fn vec_pattern(b: &mut Bencher) { + let x = [1,2,3,4,5,6]; + b.iter(|| { + match x { + [1,2,3,..] => 10, + _ => 11, + } + }); +} diff --git a/src/librustc/cfg/construct.rs b/src/librustc/cfg/construct.rs index fa6b78045f..ff2c36416b 100644 --- a/src/librustc/cfg/construct.rs +++ b/src/librustc/cfg/construct.rs @@ -10,9 +10,8 @@ use rustc_data_structures::graph; use cfg::*; -use middle::region::CodeExtent; +use middle::region; use ty::{self, TyCtxt}; -use syntax::ast; use syntax::ptr::P; use hir::{self, PatKind}; @@ -30,13 +29,13 @@ struct CFGBuilder<'a, 'tcx: 'a> { #[derive(Copy, Clone)] struct BlockScope { - block_expr_id: ast::NodeId, // id of breakable block expr node + block_expr_id: hir::ItemLocalId, // id of breakable block expr node break_index: CFGIndex, // where to go on `break` } #[derive(Copy, Clone)] struct LoopScope { - loop_id: ast::NodeId, // id of loop/while node + loop_id: hir::ItemLocalId, // id of loop/while node continue_index: CFGIndex, // where to go on a `loop` break_index: CFGIndex, // where to go on a `break` } @@ -70,6 +69,7 @@ pub fn construct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, cfg_builder.add_contained_edge(body_exit, fn_exit); let CFGBuilder { graph, .. } = cfg_builder; CFG { + owner_def_id, graph, entry, exit: fn_exit, @@ -79,10 +79,10 @@ pub fn construct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn block(&mut self, blk: &hir::Block, pred: CFGIndex) -> CFGIndex { if blk.targeted_by_break { - let expr_exit = self.add_ast_node(blk.id, &[]); + let expr_exit = self.add_ast_node(blk.hir_id.local_id, &[]); self.breakable_block_scopes.push(BlockScope { - block_expr_id: blk.id, + block_expr_id: blk.hir_id.local_id, break_index: expr_exit, }); @@ -104,21 +104,22 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { let expr_exit = self.opt_expr(&blk.expr, stmts_exit); - self.add_ast_node(blk.id, &[expr_exit]) + self.add_ast_node(blk.hir_id.local_id, &[expr_exit]) } } fn stmt(&mut self, stmt: &hir::Stmt, pred: CFGIndex) -> CFGIndex { + let hir_id = self.tcx.hir.node_to_hir_id(stmt.node.id()); match stmt.node { - hir::StmtDecl(ref decl, id) => { + hir::StmtDecl(ref decl, _) => { let exit = self.decl(&decl, pred); - self.add_ast_node(id, &[exit]) + self.add_ast_node(hir_id.local_id, &[exit]) } - hir::StmtExpr(ref expr, id) | - hir::StmtSemi(ref expr, id) => { + hir::StmtExpr(ref expr, _) | + hir::StmtSemi(ref expr, _) => { let exit = self.expr(&expr, pred); - self.add_ast_node(id, &[exit]) + self.add_ast_node(hir_id.local_id, &[exit]) } } } @@ -140,31 +141,31 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { PatKind::Path(_) | PatKind::Lit(..) | PatKind::Range(..) | - PatKind::Wild => self.add_ast_node(pat.id, &[pred]), + PatKind::Wild => self.add_ast_node(pat.hir_id.local_id, &[pred]), PatKind::Box(ref subpat) | PatKind::Ref(ref subpat, _) | PatKind::Binding(.., Some(ref subpat)) => { let subpat_exit = self.pat(&subpat, pred); - self.add_ast_node(pat.id, &[subpat_exit]) + self.add_ast_node(pat.hir_id.local_id, &[subpat_exit]) } PatKind::TupleStruct(_, ref subpats, _) | PatKind::Tuple(ref subpats, _) => { let pats_exit = self.pats_all(subpats.iter(), pred); - self.add_ast_node(pat.id, &[pats_exit]) + self.add_ast_node(pat.hir_id.local_id, &[pats_exit]) } PatKind::Struct(_, ref subpats, _) => { let pats_exit = self.pats_all(subpats.iter().map(|f| &f.node.pat), pred); - self.add_ast_node(pat.id, &[pats_exit]) + self.add_ast_node(pat.hir_id.local_id, &[pats_exit]) } PatKind::Slice(ref pre, ref vec, ref post) => { let pre_exit = self.pats_all(pre.iter(), pred); let vec_exit = self.pats_all(vec.iter(), pre_exit); let post_exit = self.pats_all(post.iter(), vec_exit); - self.add_ast_node(pat.id, &[post_exit]) + self.add_ast_node(pat.hir_id.local_id, &[post_exit]) } } } @@ -180,7 +181,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { match expr.node { hir::ExprBlock(ref blk) => { let blk_exit = self.block(&blk, pred); - self.add_ast_node(expr.id, &[blk_exit]) + self.add_ast_node(expr.hir_id.local_id, &[blk_exit]) } hir::ExprIf(ref cond, ref then, None) => { @@ -200,7 +201,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // let cond_exit = self.expr(&cond, pred); // 1 let then_exit = self.expr(&then, cond_exit); // 2 - self.add_ast_node(expr.id, &[cond_exit, then_exit]) // 3,4 + self.add_ast_node(expr.hir_id.local_id, &[cond_exit, then_exit]) // 3,4 } hir::ExprIf(ref cond, ref then, Some(ref otherwise)) => { @@ -221,7 +222,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { let cond_exit = self.expr(&cond, pred); // 1 let then_exit = self.expr(&then, cond_exit); // 2 let else_exit = self.expr(&otherwise, cond_exit); // 3 - self.add_ast_node(expr.id, &[then_exit, else_exit]) // 4, 5 + self.add_ast_node(expr.hir_id.local_id, &[then_exit, else_exit]) // 4, 5 } hir::ExprWhile(ref cond, ref body, _) => { @@ -245,12 +246,12 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { let loopback = self.add_dummy_node(&[pred]); // 1 // Create expr_exit without pred (cond_exit) - let expr_exit = self.add_ast_node(expr.id, &[]); // 3 + let expr_exit = self.add_ast_node(expr.hir_id.local_id, &[]); // 3 // The LoopScope needs to be on the loop_scopes stack while evaluating the // condition and the body of the loop (both can break out of the loop) self.loop_scopes.push(LoopScope { - loop_id: expr.id, + loop_id: expr.hir_id.local_id, continue_index: loopback, break_index: expr_exit }); @@ -282,9 +283,9 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // may cause additional edges. let loopback = self.add_dummy_node(&[pred]); // 1 - let expr_exit = self.add_ast_node(expr.id, &[]); // 2 + let expr_exit = self.add_ast_node(expr.hir_id.local_id, &[]); // 2 self.loop_scopes.push(LoopScope { - loop_id: expr.id, + loop_id: expr.hir_id.local_id, continue_index: loopback, break_index: expr_exit, }); @@ -295,7 +296,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { } hir::ExprMatch(ref discr, ref arms, _) => { - self.match_(expr.id, &discr, &arms, pred) + self.match_(expr.hir_id.local_id, &discr, &arms, pred) } hir::ExprBinary(op, ref l, ref r) if op.node.is_lazy() => { @@ -315,30 +316,30 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // let l_exit = self.expr(&l, pred); // 1 let r_exit = self.expr(&r, l_exit); // 2 - self.add_ast_node(expr.id, &[l_exit, r_exit]) // 3,4 + self.add_ast_node(expr.hir_id.local_id, &[l_exit, r_exit]) // 3,4 } hir::ExprRet(ref v) => { let v_exit = self.opt_expr(v, pred); - let b = self.add_ast_node(expr.id, &[v_exit]); + let b = self.add_ast_node(expr.hir_id.local_id, &[v_exit]); self.add_returning_edge(expr, b); self.add_unreachable_node() } hir::ExprBreak(destination, ref opt_expr) => { let v = self.opt_expr(opt_expr, pred); - let (scope_id, break_dest) = + let (target_scope, break_dest) = self.find_scope_edge(expr, destination, ScopeCfKind::Break); - let b = self.add_ast_node(expr.id, &[v]); - self.add_exiting_edge(expr, b, scope_id, break_dest); + let b = self.add_ast_node(expr.hir_id.local_id, &[v]); + self.add_exiting_edge(expr, b, target_scope, break_dest); self.add_unreachable_node() } hir::ExprAgain(destination) => { - let (scope_id, cont_dest) = + let (target_scope, cont_dest) = self.find_scope_edge(expr, destination, ScopeCfKind::Continue); - let a = self.add_ast_node(expr.id, &[pred]); - self.add_exiting_edge(expr, a, scope_id, cont_dest); + let a = self.add_ast_node(expr.hir_id.local_id, &[pred]); + self.add_exiting_edge(expr, a, target_scope, cont_dest); self.add_unreachable_node() } @@ -389,6 +390,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { hir::ExprUnary(_, ref e) | hir::ExprField(ref e, _) | hir::ExprTupField(ref e, _) | + hir::ExprYield(ref e) | hir::ExprRepeat(ref e, _) => { self.straightline(expr, pred, Some(&**e).into_iter()) } @@ -396,7 +398,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { hir::ExprInlineAsm(_, ref outputs, ref inputs) => { let post_outputs = self.exprs(outputs.iter().map(|e| &*e), pred); let post_inputs = self.exprs(inputs.iter().map(|e| &*e), post_outputs); - self.add_ast_node(expr.id, &[post_inputs]) + self.add_ast_node(expr.hir_id.local_id, &[post_inputs]) } hir::ExprClosure(..) | @@ -443,10 +445,10 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { //! Handles case of an expression that evaluates `subexprs` in order let subexprs_exit = self.exprs(subexprs, pred); - self.add_ast_node(expr.id, &[subexprs_exit]) + self.add_ast_node(expr.hir_id.local_id, &[subexprs_exit]) } - fn match_(&mut self, id: ast::NodeId, discr: &hir::Expr, + fn match_(&mut self, id: hir::ItemLocalId, discr: &hir::Expr, arms: &[hir::Arm], pred: CFGIndex) -> CFGIndex { // The CFG for match expression is quite complex, so no ASCII // art for it (yet). @@ -551,8 +553,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.add_node(CFGNodeData::Dummy, preds) } - fn add_ast_node(&mut self, id: ast::NodeId, preds: &[CFGIndex]) -> CFGIndex { - assert!(id != ast::DUMMY_NODE_ID); + fn add_ast_node(&mut self, id: hir::ItemLocalId, preds: &[CFGIndex]) -> CFGIndex { self.add_node(CFGNodeData::AST(id), preds) } @@ -578,15 +579,14 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn add_exiting_edge(&mut self, from_expr: &hir::Expr, from_index: CFGIndex, - scope_id: ast::NodeId, + target_scope: region::Scope, to_index: CFGIndex) { let mut data = CFGEdgeData { exiting_scopes: vec![] }; - let mut scope = CodeExtent::Misc(from_expr.id); - let target_scope = CodeExtent::Misc(scope_id); - let region_maps = self.tcx.region_maps(self.owner_def_id); + let mut scope = region::Scope::Node(from_expr.hir_id.local_id); + let region_scope_tree = self.tcx.region_scope_tree(self.owner_def_id); while scope != target_scope { - data.exiting_scopes.push(scope.node_id()); - scope = region_maps.encl_scope(scope); + data.exiting_scopes.push(scope.item_local_id()); + scope = region_scope_tree.encl_scope(scope); } self.graph.add_edge(from_index, to_index, data); } @@ -606,13 +606,14 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn find_scope_edge(&self, expr: &hir::Expr, destination: hir::Destination, - scope_cf_kind: ScopeCfKind) -> (ast::NodeId, CFGIndex) { + scope_cf_kind: ScopeCfKind) -> (region::Scope, CFGIndex) { match destination.target_id { hir::ScopeTarget::Block(block_expr_id) => { for b in &self.breakable_block_scopes { - if b.block_expr_id == block_expr_id { - return (block_expr_id, match scope_cf_kind { + if b.block_expr_id == self.tcx.hir.node_to_hir_id(block_expr_id).local_id { + let scope_id = self.tcx.hir.node_to_hir_id(block_expr_id).local_id; + return (region::Scope::Node(scope_id), match scope_cf_kind { ScopeCfKind::Break => b.break_index, ScopeCfKind::Continue => bug!("can't continue to block"), }); @@ -622,8 +623,9 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { } hir::ScopeTarget::Loop(hir::LoopIdResult::Ok(loop_id)) => { for l in &self.loop_scopes { - if l.loop_id == loop_id { - return (loop_id, match scope_cf_kind { + if l.loop_id == self.tcx.hir.node_to_hir_id(loop_id).local_id { + let scope_id = self.tcx.hir.node_to_hir_id(loop_id).local_id; + return (region::Scope::Node(scope_id), match scope_cf_kind { ScopeCfKind::Break => l.break_index, ScopeCfKind::Continue => l.continue_index, }); diff --git a/src/librustc/cfg/graphviz.rs b/src/librustc/cfg/graphviz.rs index 944b77dbf0..9241240caf 100644 --- a/src/librustc/cfg/graphviz.rs +++ b/src/librustc/cfg/graphviz.rs @@ -15,40 +15,48 @@ use graphviz as dot; use graphviz::IntoCow; -use syntax::ast; - -use hir::map as hir_map; use cfg; +use hir; +use ty::TyCtxt; pub type Node<'a> = (cfg::CFGIndex, &'a cfg::CFGNode); pub type Edge<'a> = &'a cfg::CFGEdge; -pub struct LabelledCFG<'a, 'hir: 'a> { - pub hir_map: &'a hir_map::Map<'hir>, +pub struct LabelledCFG<'a, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, pub cfg: &'a cfg::CFG, pub name: String, /// `labelled_edges` controls whether we emit labels on the edges pub labelled_edges: bool, } -fn replace_newline_with_backslash_l(s: String) -> String { - // Replacing newlines with \\l causes each line to be left-aligned, - // improving presentation of (long) pretty-printed expressions. - if s.contains("\n") { - let mut s = s.replace("\n", "\\l"); - // Apparently left-alignment applies to the line that precedes - // \l, not the line that follows; so, add \l at end of string - // if not already present, ensuring last line gets left-aligned - // as well. - let mut last_two: Vec<_> = - s.chars().rev().take(2).collect(); - last_two.reverse(); - if last_two != ['\\', 'l'] { - s.push_str("\\l"); +impl<'a, 'tcx> LabelledCFG<'a, 'tcx> { + fn local_id_to_string(&self, local_id: hir::ItemLocalId) -> String { + assert!(self.cfg.owner_def_id.is_local()); + let node_id = self.tcx.hir.hir_to_node_id(hir::HirId { + owner: self.tcx.hir.def_index_to_hir_id(self.cfg.owner_def_id.index).owner, + local_id + }); + let s = self.tcx.hir.node_to_string(node_id); + + // Replacing newlines with \\l causes each line to be left-aligned, + // improving presentation of (long) pretty-printed expressions. + if s.contains("\n") { + let mut s = s.replace("\n", "\\l"); + // Apparently left-alignment applies to the line that precedes + // \l, not the line that follows; so, add \l at end of string + // if not already present, ensuring last line gets left-aligned + // as well. + let mut last_two: Vec<_> = + s.chars().rev().take(2).collect(); + last_two.reverse(); + if last_two != ['\\', 'l'] { + s.push_str("\\l"); + } + s + } else { + s } - s - } else { - s } } @@ -66,12 +74,10 @@ impl<'a, 'hir> dot::Labeller<'a> for LabelledCFG<'a, 'hir> { dot::LabelText::LabelStr("entry".into_cow()) } else if i == self.cfg.exit { dot::LabelText::LabelStr("exit".into_cow()) - } else if n.data.id() == ast::DUMMY_NODE_ID { + } else if n.data.id() == hir::DUMMY_ITEM_LOCAL_ID { dot::LabelText::LabelStr("(dummy_node)".into_cow()) } else { - let s = self.hir_map.node_to_string(n.data.id()); - // left-aligns the lines - let s = replace_newline_with_backslash_l(s); + let s = self.local_id_to_string(n.data.id()); dot::LabelText::EscStr(s.into_cow()) } } @@ -82,15 +88,13 @@ impl<'a, 'hir> dot::Labeller<'a> for LabelledCFG<'a, 'hir> { return dot::LabelText::EscStr(label.into_cow()); } let mut put_one = false; - for (i, &node_id) in e.data.exiting_scopes.iter().enumerate() { + for (i, &id) in e.data.exiting_scopes.iter().enumerate() { if put_one { label.push_str(",\\l"); } else { put_one = true; } - let s = self.hir_map.node_to_string(node_id); - // left-aligns the lines - let s = replace_newline_with_backslash_l(s); + let s = self.local_id_to_string(id); label.push_str(&format!("exiting scope_{} {}", i, &s[..])); diff --git a/src/librustc/cfg/mod.rs b/src/librustc/cfg/mod.rs index 1473dbb167..b379d3956e 100644 --- a/src/librustc/cfg/mod.rs +++ b/src/librustc/cfg/mod.rs @@ -13,13 +13,14 @@ use rustc_data_structures::graph; use ty::TyCtxt; -use syntax::ast; use hir; +use hir::def_id::DefId; mod construct; pub mod graphviz; pub struct CFG { + pub owner_def_id: DefId, pub graph: CFGGraph, pub entry: CFGIndex, pub exit: CFGIndex, @@ -27,7 +28,7 @@ pub struct CFG { #[derive(Copy, Clone, Debug, PartialEq)] pub enum CFGNodeData { - AST(ast::NodeId), + AST(hir::ItemLocalId), Entry, Exit, Dummy, @@ -35,18 +36,18 @@ pub enum CFGNodeData { } impl CFGNodeData { - pub fn id(&self) -> ast::NodeId { + pub fn id(&self) -> hir::ItemLocalId { if let CFGNodeData::AST(id) = *self { id } else { - ast::DUMMY_NODE_ID + hir::DUMMY_ITEM_LOCAL_ID } } } #[derive(Debug)] pub struct CFGEdgeData { - pub exiting_scopes: Vec + pub exiting_scopes: Vec } pub type CFGIndex = graph::NodeIndex; @@ -63,7 +64,7 @@ impl CFG { construct::construct(tcx, body) } - pub fn node_is_reachable(&self, id: ast::NodeId) -> bool { + pub fn node_is_reachable(&self, id: hir::ItemLocalId) -> bool { self.graph.depth_traverse(self.entry, graph::OUTGOING) .any(|idx| self.graph.node_data(idx).id() == id) } diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index 01fff60528..f6e8fd03cf 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -60,16 +60,18 @@ //! user of the `DepNode` API of having to know how to compute the expected //! fingerprint for a given set of node parameters. -use hir::def_id::{CrateNum, DefId}; +use hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX}; use hir::map::DefPathHash; +use hir::{HirId, ItemLocalId}; use ich::Fingerprint; -use ty::{TyCtxt, Instance, InstanceDef}; -use ty::fast_reject::SimplifiedType; +use ty::{TyCtxt, Instance, InstanceDef, ParamEnvAnd, Ty}; +use ty::subst::Substs; use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; use ich::StableHashingContext; use std::fmt; use std::hash::Hash; +use syntax_pos::symbol::InternedString; // erase!() just makes tokens go away. It's used to specify which macro argument // is repeated (i.e. which sub-expression of the macro we are in) but don't need @@ -78,14 +80,28 @@ macro_rules! erase { ($x:tt) => ({}) } -macro_rules! anon_attr_to_bool { - (anon) => (true) +macro_rules! is_anon_attr { + (anon) => (true); + ($attr:ident) => (false); +} + +macro_rules! is_input_attr { + (input) => (true); + ($attr:ident) => (false); +} + +macro_rules! contains_anon_attr { + ($($attr:ident),*) => ({$(is_anon_attr!($attr) | )* false}); +} + +macro_rules! contains_input_attr { + ($($attr:ident),*) => ({$(is_input_attr!($attr) | )* false}); } macro_rules! define_dep_nodes { (<$tcx:tt> $( - [$($anon:ident)*] + [$($attr:ident),* ] $variant:ident $(( $($tuple_arg:tt),* ))* $({ $($struct_arg_name:ident : $struct_arg_ty:ty),* })* ,)* @@ -103,7 +119,9 @@ macro_rules! define_dep_nodes { match *self { $( DepKind :: $variant => { - $(return !anon_attr_to_bool!($anon);)* + if contains_anon_attr!($($attr),*) { + return false; + } // tuple args $({ @@ -124,15 +142,20 @@ macro_rules! define_dep_nodes { } } - #[allow(unreachable_code)] #[inline] - pub fn is_anon<$tcx>(&self) -> bool { + pub fn is_anon(&self) -> bool { match *self { $( - DepKind :: $variant => { - $(return anon_attr_to_bool!($anon);)* - false - } + DepKind :: $variant => { contains_anon_attr!($($attr),*) } + )* + } + } + + #[inline] + pub fn is_input(&self) -> bool { + match *self { + $( + DepKind :: $variant => { contains_input_attr!($($attr),*) } )* } } @@ -316,6 +339,25 @@ macro_rules! define_dep_nodes { Ok(DepNode::new_no_params(kind)) } } + + /// Used in testing + pub fn has_label_string(label: &str) -> bool { + match label { + $( + stringify!($variant) => true, + )* + _ => false, + } + } + } + + /// Contains variant => str representations for constructing + /// DepNode groups for tests. + #[allow(dead_code, non_upper_case_globals)] + pub mod label_strs { + $( + pub const $variant: &'static str = stringify!($variant); + )* } ); } @@ -324,7 +366,7 @@ impl fmt::Debug for DepNode { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.kind)?; - if !self.kind.has_params() { + if !self.kind.has_params() && !self.kind.is_anon() { return Ok(()); } @@ -333,14 +375,14 @@ impl fmt::Debug for DepNode { ::ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { if let Some(def_id) = self.extract_def_id(tcx) { - write!(f, "{}", tcx.item_path_str(def_id))?; + write!(f, "{}", tcx.def_path_debug_str(def_id))?; } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*self) { write!(f, "{}", s)?; } else { - write!(f, "{:?}", self.hash)?; + write!(f, "{}", self.hash)?; } } else { - write!(f, "{:?}", self.hash)?; + write!(f, "{}", self.hash)?; } Ok(()) })?; @@ -364,6 +406,17 @@ impl DefId { } } +impl DepKind { + #[inline] + pub fn fingerprint_needed_for_crate_hash(self) -> bool { + match self { + DepKind::HirBody | + DepKind::Krate => true, + _ => false, + } + } +} + define_dep_nodes!( <'tcx> // Represents the `Krate` as a whole (the `hir::Krate` value) (as // distinct from the krate module). This is basically a hash of @@ -376,34 +429,33 @@ define_dep_nodes!( <'tcx> // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain // access to the krate, but you must remember to add suitable // edges yourself for the individual items that you read. - [] Krate, - - // Represents the HIR node with the given node-id - [] Hir(DefId), + [input] Krate, // Represents the body of a function or method. The def-id is that of the // function/method. - [] HirBody(DefId), + [input] HirBody(DefId), - // Represents the metadata for a given HIR node, typically found - // in an extern crate. - [] MetaData(DefId), + // Represents the HIR node with the given node-id + [input] Hir(DefId), + + // Represents metadata from an extern crate. + [input] CrateMetadata(CrateNum), // Represents some artifact that we save to disk. Note that these // do not have a def-id as part of their identifier. [] WorkProduct(WorkProductId), // Represents different phases in the compiler. - [] RegionMaps(DefId), + [] RegionScopeTree(DefId), [] Coherence, [] CoherenceInherentImplOverlapCheck, - [] Resolve, [] CoherenceCheckTrait(DefId), [] PrivacyAccessLevels(CrateNum), // Represents the MIR for a fn; also used as the task node for // things read/modify that MIR. [] MirConstQualif(DefId), + [] MirBuilt(DefId), [] MirConst(DefId), [] MirValidated(DefId), [] MirOptimized(DefId), @@ -412,11 +464,10 @@ define_dep_nodes!( <'tcx> [] BorrowCheckKrate, [] BorrowCheck(DefId), [] MirBorrowCheck(DefId), + [] UnsafetyCheckResult(DefId), - [] RvalueCheck(DefId), [] Reachability, [] MirKeys, - [] TransWriteMetadata, [] CrateVariances, // Nodes representing bits of computed IR in the tcx. Each shared @@ -434,6 +485,7 @@ define_dep_nodes!( <'tcx> [] ImplPolarity(DefId), [] ClosureKind(DefId), [] FnSignature(DefId), + [] GenSignature(DefId), [] CoerceUnsizedInfo(DefId), [] ItemVarianceConstraints(DefId), @@ -449,32 +501,23 @@ define_dep_nodes!( <'tcx> [] TypeckBodiesKrate, [] TypeckTables(DefId), [] HasTypeckTables(DefId), - [anon] ConstEval, + [] ConstEval { param_env: ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)> }, [] SymbolName(DefId), [] InstanceSymbolName { instance: Instance<'tcx> }, [] SpecializationGraph(DefId), [] ObjectSafety(DefId), - [anon] IsCopy, - [anon] IsSized, - [anon] IsFreeze, - [anon] NeedsDrop, - [anon] Layout, + [] IsCopy { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, + [] IsSized { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, + [] IsFreeze { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, + [] NeedsDrop { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, + [] Layout { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, // The set of impls for a given trait. [] TraitImpls(DefId), - [] RelevantTraitImpls(DefId, SimplifiedType), [] AllLocalTraitImpls, - // Nodes representing caches. To properly handle a true cache, we - // don't use a DepTrackingMap, but rather we push a task node. - // Otherwise the write into the map would be incorrectly - // attributed to the first task that happened to fill the cache, - // which would yield an overly conservative dep-graph. - [] TraitItems(DefId), - [] ReprHints(DefId), - // Trait selection cache is a little funny. Given a trait // reference like `Foo: SomeTrait`, there could be // arbitrarily many def-ids to map on in there (e.g., `Foo`, @@ -502,15 +545,11 @@ define_dep_nodes!( <'tcx> // trait-select node. [anon] TraitSelect, - // For proj. cache, we just keep a list of all def-ids, since it is - // not a hotspot. - [] ProjectionCache { def_ids: DefIdList }, - [] ParamEnv(DefId), [] DescribeDef(DefId), [] DefSpan(DefId), - [] Stability(DefId), - [] Deprecation(DefId), + [] LookupStability(DefId), + [] LookupDeprecationEntry(DefId), [] ItemBodyNestedBodies(DefId), [] ConstIsRvaluePromotableToStatic(DefId), [] ImplParent(DefId), @@ -519,13 +558,72 @@ define_dep_nodes!( <'tcx> [] IsMirAvailable(DefId), [] ItemAttrs(DefId), [] FnArgNames(DefId), - [] DylibDepFormats(DefId), - [] IsAllocator(DefId), - [] IsPanicRuntime(DefId), - [] IsCompilerBuiltins(DefId), - [] HasGlobalAllocator(DefId), + [] DylibDepFormats(CrateNum), + [] IsPanicRuntime(CrateNum), + [] IsCompilerBuiltins(CrateNum), + [] HasGlobalAllocator(CrateNum), [] ExternCrate(DefId), [] LintLevels, + [] Specializes { impl1: DefId, impl2: DefId }, + [input] InScopeTraits(DefIndex), + [] ModuleExports(DefId), + [] IsSanitizerRuntime(CrateNum), + [] IsProfilerRuntime(CrateNum), + [] GetPanicStrategy(CrateNum), + [] IsNoBuiltins(CrateNum), + [] ImplDefaultness(DefId), + [] ExportedSymbolIds(CrateNum), + [] NativeLibraries(CrateNum), + [] PluginRegistrarFn(CrateNum), + [] DeriveRegistrarFn(CrateNum), + [] CrateDisambiguator(CrateNum), + [] CrateHash(CrateNum), + [] OriginalCrateName(CrateNum), + + [] ImplementationsOfTrait { krate: CrateNum, trait_id: DefId }, + [] AllTraitImplementations(CrateNum), + + [] IsDllimportForeignItem(DefId), + [] IsStaticallyIncludedForeignItem(DefId), + [] NativeLibraryKind(DefId), + [] LinkArgs, + + [] NamedRegion(DefIndex), + [] IsLateBound(DefIndex), + [] ObjectLifetimeDefaults(DefIndex), + + [] Visibility(DefId), + [] DepKind(CrateNum), + [] CrateName(CrateNum), + [] ItemChildren(DefId), + [] ExternModStmtCnum(DefId), + [] GetLangItems, + [] DefinedLangItems(CrateNum), + [] MissingLangItems(CrateNum), + [] ExternConstBody(DefId), + [] VisibleParentMap, + [] MissingExternCrateItem(CrateNum), + [] UsedCrateSource(CrateNum), + [] PostorderCnums, + [] HasCloneClosures(CrateNum), + [] HasCopyClosures(CrateNum), + + [] Freevars(DefId), + [] MaybeUnusedTraitImport(DefId), + [] MaybeUnusedExternCrates, + [] StabilityIndex, + [] AllCrateNums, + [] ExportedSymbols(CrateNum), + [] CollectAndPartitionTranslationItems, + [] ExportName(DefId), + [] ContainsExternIndicator(DefId), + [] IsTranslatedFunction(DefId), + [] CodegenUnit(InternedString), + [] CompileCodegenUnit(InternedString), + [] OutputFilenames, + + // We use this for most things when incr. comp. is turned off. + [] Null, ); trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> : fmt::Debug { @@ -545,12 +643,12 @@ trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> : fmt::Debug { } impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a, T> DepNodeParams<'a, 'gcx, 'tcx> for T - where T: HashStable> + fmt::Debug + where T: HashStable> + fmt::Debug { default const CAN_RECONSTRUCT_QUERY_KEY: bool = false; default fn to_fingerprint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Fingerprint { - let mut hcx = StableHashingContext::new(tcx); + let mut hcx = tcx.create_stable_hashing_context(); let mut hasher = StableHasher::new(); self.hash_stable(&mut hcx, &mut hasher); @@ -575,6 +673,34 @@ impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefId,) { } } +impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefIndex,) { + const CAN_RECONSTRUCT_QUERY_KEY: bool = true; + + fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint { + tcx.hir.definitions().def_path_hash(self.0).0 + } + + fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String { + tcx.item_path_str(DefId::local(self.0)) + } +} + +impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (CrateNum,) { + const CAN_RECONSTRUCT_QUERY_KEY: bool = true; + + fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint { + let def_id = DefId { + krate: self.0, + index: CRATE_DEF_INDEX, + }; + tcx.def_path_hash(def_id).0 + } + + fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String { + tcx.crate_name(self.0).as_str().to_string() + } +} + impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefId, DefId) { const CAN_RECONSTRUCT_QUERY_KEY: bool = false; @@ -594,42 +720,27 @@ impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefId, De let (def_id_0, def_id_1) = *self; format!("({}, {})", - tcx.def_path(def_id_0).to_string(tcx), - tcx.def_path(def_id_1).to_string(tcx)) + tcx.def_path_debug_str(def_id_0), + tcx.def_path_debug_str(def_id_1)) } } - -impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefIdList,) { +impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (HirId,) { const CAN_RECONSTRUCT_QUERY_KEY: bool = false; // We actually would not need to specialize the implementation of this // method but it's faster to combine the hashes than to instantiate a full // hashing context and stable-hashing state. fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint { - let mut fingerprint = Fingerprint::zero(); - - for &def_id in self.0.iter() { - let def_path_hash = tcx.def_path_hash(def_id); - fingerprint = fingerprint.combine(def_path_hash.0); - } + let (HirId { + owner, + local_id: ItemLocalId(local_id), + },) = *self; - fingerprint - } - - fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String { - use std::fmt::Write; - - let mut s = String::new(); - write!(&mut s, "[").unwrap(); + let def_path_hash = tcx.def_path_hash(DefId::local(owner)); + let local_id = Fingerprint::from_smaller_hash(local_id as u64); - for &def_id in self.0.iter() { - write!(&mut s, "{}", tcx.def_path(def_id).to_string(tcx)).unwrap(); - } - - write!(&mut s, "]").unwrap(); - - s + def_path_hash.0.combine(local_id) } } @@ -671,5 +782,3 @@ impl WorkProductId { impl_stable_hash_for!(struct ::dep_graph::WorkProductId { hash }); - -type DefIdList = Vec; diff --git a/src/librustc/dep_graph/edges.rs b/src/librustc/dep_graph/edges.rs deleted file mode 100644 index 809d1dfcf6..0000000000 --- a/src/librustc/dep_graph/edges.rs +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use ich::Fingerprint; -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_data_structures::stable_hasher::StableHasher; -use std::env; -use std::hash::Hash; -use std::mem; -use super::{DepGraphQuery, DepKind, DepNode}; -use super::debug::EdgeFilter; - -pub struct DepGraphEdges { - nodes: Vec, - indices: FxHashMap, - edges: FxHashSet<(DepNodeIndex, DepNodeIndex)>, - task_stack: Vec, - forbidden_edge: Option, - - // A set to help assert that no two tasks use the same DepNode. This is a - // temporary measure. Once we load the previous dep-graph as readonly, this - // check will fall out of the graph implementation naturally. - opened_once: FxHashSet, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct DepNodeIndex { - index: u32 -} - -impl DepNodeIndex { - - pub const INVALID: DepNodeIndex = DepNodeIndex { index: ::std::u32::MAX }; - - fn new(v: usize) -> DepNodeIndex { - assert!((v & 0xFFFF_FFFF) == v); - DepNodeIndex { index: v as u32 } - } - - fn index(self) -> usize { - self.index as usize - } -} - -#[derive(Clone, Debug, PartialEq)] -enum OpenTask { - Regular { - node: DepNode, - reads: Vec, - read_set: FxHashSet, - }, - Anon { - reads: Vec, - read_set: FxHashSet, - }, - Ignore, -} - -impl DepGraphEdges { - pub fn new() -> DepGraphEdges { - let forbidden_edge = if cfg!(debug_assertions) { - match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { - Ok(s) => { - match EdgeFilter::new(&s) { - Ok(f) => Some(f), - Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), - } - } - Err(_) => None, - } - } else { - None - }; - - DepGraphEdges { - nodes: vec![], - indices: FxHashMap(), - edges: FxHashSet(), - task_stack: Vec::new(), - forbidden_edge, - opened_once: FxHashSet(), - } - } - - fn id(&self, index: DepNodeIndex) -> DepNode { - self.nodes[index.index()] - } - - pub fn push_ignore(&mut self) { - self.task_stack.push(OpenTask::Ignore); - } - - pub fn pop_ignore(&mut self) { - let popped_node = self.task_stack.pop().unwrap(); - debug_assert_eq!(popped_node, OpenTask::Ignore); - } - - pub fn push_task(&mut self, key: DepNode) { - if !self.opened_once.insert(key) { - bug!("Re-opened node {:?}", key) - } - - self.task_stack.push(OpenTask::Regular { - node: key, - reads: Vec::new(), - read_set: FxHashSet(), - }); - } - - pub fn pop_task(&mut self, key: DepNode) -> DepNodeIndex { - let popped_node = self.task_stack.pop().unwrap(); - - if let OpenTask::Regular { - node, - read_set: _, - reads - } = popped_node { - debug_assert_eq!(node, key); - - let target_id = self.get_or_create_node(node); - - for read in reads.into_iter() { - let source_id = self.get_or_create_node(read); - self.edges.insert((source_id, target_id)); - } - - target_id - } else { - bug!("pop_task() - Expected regular task to be popped") - } - } - - pub fn push_anon_task(&mut self) { - self.task_stack.push(OpenTask::Anon { - reads: Vec::new(), - read_set: FxHashSet(), - }); - } - - pub fn pop_anon_task(&mut self, kind: DepKind) -> DepNodeIndex { - let popped_node = self.task_stack.pop().unwrap(); - - if let OpenTask::Anon { - read_set: _, - reads - } = popped_node { - let mut fingerprint = Fingerprint::zero(); - let mut hasher = StableHasher::new(); - - for read in reads.iter() { - mem::discriminant(&read.kind).hash(&mut hasher); - - // Fingerprint::combine() is faster than sending Fingerprint - // through the StableHasher (at least as long as StableHasher - // is so slow). - fingerprint = fingerprint.combine(read.hash); - } - - fingerprint = fingerprint.combine(hasher.finish()); - - let target_dep_node = DepNode { - kind, - hash: fingerprint, - }; - - if let Some(&index) = self.indices.get(&target_dep_node) { - return index; - } - - let target_id = self.get_or_create_node(target_dep_node); - - for read in reads.into_iter() { - let source_id = self.get_or_create_node(read); - self.edges.insert((source_id, target_id)); - } - - target_id - } else { - bug!("pop_anon_task() - Expected anonymous task to be popped") - } - } - - /// Indicates that the current task `C` reads `v` by adding an - /// edge from `v` to `C`. If there is no current task, has no - /// effect. Note that *reading* from tracked state is harmless if - /// you are not in a task; what is bad is *writing* to tracked - /// state (and leaking data that you read into a tracked task). - pub fn read(&mut self, source: DepNode) { - match self.task_stack.last_mut() { - Some(&mut OpenTask::Regular { - node: target, - ref mut reads, - ref mut read_set, - }) => { - if read_set.insert(source) { - reads.push(source); - - if cfg!(debug_assertions) { - if let Some(ref forbidden_edge) = self.forbidden_edge { - if forbidden_edge.test(&source, &target) { - bug!("forbidden edge {:?} -> {:?} created", source, target) - } - } - } - } - } - Some(&mut OpenTask::Anon { - ref mut reads, - ref mut read_set, - }) => { - if read_set.insert(source) { - reads.push(source); - } - } - Some(&mut OpenTask::Ignore) | None => { - // ignore - } - } - } - - pub fn read_index(&mut self, source: DepNodeIndex) { - let dep_node = self.nodes[source.index()]; - self.read(dep_node); - } - - pub fn query(&self) -> DepGraphQuery { - let edges: Vec<_> = self.edges.iter() - .map(|&(i, j)| (self.id(i), self.id(j))) - .collect(); - DepGraphQuery::new(&self.nodes, &edges) - } - - #[inline] - pub fn add_edge(&mut self, source: DepNode, target: DepNode) { - let source = self.get_or_create_node(source); - let target = self.get_or_create_node(target); - self.edges.insert((source, target)); - } - - pub fn add_node(&mut self, node: DepNode) -> DepNodeIndex { - self.get_or_create_node(node) - } - - #[inline] - fn get_or_create_node(&mut self, dep_node: DepNode) -> DepNodeIndex { - let DepGraphEdges { - ref mut indices, - ref mut nodes, - .. - } = *self; - - *indices.entry(dep_node).or_insert_with(|| { - let next_id = nodes.len(); - nodes.push(dep_node); - DepNodeIndex::new(next_id) - }) - } -} diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index c487fc963c..8aff042955 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -8,26 +8,91 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, + StableHashingContextProvider}; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use session::config::OutputType; use std::cell::{Ref, RefCell}; +use std::env; +use std::hash::Hash; use std::rc::Rc; +use ty::TyCtxt; use util::common::{ProfileQueriesMsg, profq_msg}; +use ich::Fingerprint; + +use super::debug::EdgeFilter; use super::dep_node::{DepNode, DepKind, WorkProductId}; use super::query::DepGraphQuery; use super::raii; use super::safe::DepGraphSafe; -use super::edges::{DepGraphEdges, DepNodeIndex}; +use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex}; +use super::prev::PreviousDepGraph; #[derive(Clone)] pub struct DepGraph { - data: Option> + data: Option>, + + // At the moment we are using DepNode as key here. In the future it might + // be possible to use an IndexVec here. At the moment there + // are a few problems with that: + // - Some fingerprints are needed even if incr. comp. is disabled -- yet + // we need to have a dep-graph to generate DepNodeIndices. + // - The architecture is still in flux and it's not clear what how to best + // implement things. + fingerprints: Rc>> +} + + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct DepNodeIndex { + index: u32, +} + +impl Idx for DepNodeIndex { + fn new(idx: usize) -> Self { + debug_assert!((idx & 0xFFFF_FFFF) == idx); + DepNodeIndex { index: idx as u32 } + } + fn index(self) -> usize { + self.index as usize + } +} + +impl DepNodeIndex { + const INVALID: DepNodeIndex = DepNodeIndex { + index: ::std::u32::MAX, + }; +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum DepNodeColor { + Red, + Green(DepNodeIndex) +} + +impl DepNodeColor { + pub fn is_green(self) -> bool { + match self { + DepNodeColor::Red => false, + DepNodeColor::Green(_) => true, + } + } } struct DepGraphData { - /// The actual graph data. - edges: RefCell, + /// The new encoding of the dependency graph, optimized for red/green + /// tracking. The `current` field is the dependency graph of only the + /// current compilation session: We don't merge the previous dep-graph into + /// current one anymore. + current: RefCell, + + /// The dep-graph from the previous compilation session. It contains all + /// nodes and edges as well as all fingerprints of nodes that have them. + previous: PreviousDepGraph, + + colors: RefCell>, /// When we load, there may be `.o` files, cached mir, or other such /// things available to us. If we find that they are not dirty, we @@ -39,21 +104,32 @@ struct DepGraphData { work_products: RefCell>, dep_node_debug: RefCell>, + + // Used for testing, only populated when -Zquery-dep-graph is specified. + loaded_from_cache: RefCell>, } impl DepGraph { - pub fn new(enabled: bool) -> DepGraph { + + pub fn new(prev_graph: PreviousDepGraph) -> DepGraph { DepGraph { - data: if enabled { - Some(Rc::new(DepGraphData { - previous_work_products: RefCell::new(FxHashMap()), - work_products: RefCell::new(FxHashMap()), - edges: RefCell::new(DepGraphEdges::new()), - dep_node_debug: RefCell::new(FxHashMap()), - })) - } else { - None - } + data: Some(Rc::new(DepGraphData { + previous_work_products: RefCell::new(FxHashMap()), + work_products: RefCell::new(FxHashMap()), + dep_node_debug: RefCell::new(FxHashMap()), + current: RefCell::new(CurrentDepGraph::new()), + previous: prev_graph, + colors: RefCell::new(FxHashMap()), + loaded_from_cache: RefCell::new(FxHashMap()), + })), + fingerprints: Rc::new(RefCell::new(FxHashMap())), + } + } + + pub fn new_disabled() -> DepGraph { + DepGraph { + data: None, + fingerprints: Rc::new(RefCell::new(FxHashMap())), } } @@ -64,15 +140,22 @@ impl DepGraph { } pub fn query(&self) -> DepGraphQuery { - self.data.as_ref().unwrap().edges.borrow().query() - } + let current_dep_graph = self.data.as_ref().unwrap().current.borrow(); + let nodes: Vec<_> = current_dep_graph.nodes.iter().cloned().collect(); + let mut edges = Vec::new(); + for (index, edge_targets) in current_dep_graph.edges.iter_enumerated() { + let from = current_dep_graph.nodes[index]; + for &edge_target in edge_targets { + let to = current_dep_graph.nodes[edge_target]; + edges.push((from, to)); + } + } - pub fn in_ignore<'graph>(&'graph self) -> Option> { - self.data.as_ref().map(|data| raii::IgnoreTask::new(&data.edges)) + DepGraphQuery::new(&nodes[..], &edges[..]) } - pub fn in_task<'graph>(&'graph self, key: DepNode) -> Option> { - self.data.as_ref().map(|data| raii::DepTask::new(&data.edges, key)) + pub fn in_ignore<'graph>(&'graph self) -> Option> { + self.data.as_ref().map(|data| raii::IgnoreTask::new(&data.current)) } pub fn with_ignore(&self, op: OP) -> R @@ -109,27 +192,85 @@ impl DepGraph { /// `arg` parameter. /// /// [README]: README.md - pub fn with_task(&self, - key: DepNode, - cx: C, - arg: A, - task: fn(C, A) -> R) - -> (R, DepNodeIndex) - where C: DepGraphSafe + pub fn with_task(&self, + key: DepNode, + cx: C, + arg: A, + task: fn(C, A) -> R) + -> (R, DepNodeIndex) + where C: DepGraphSafe + StableHashingContextProvider, + R: HashStable, { if let Some(ref data) = self.data { - data.edges.borrow_mut().push_task(key); + debug_assert!(!data.colors.borrow().contains_key(&key)); + + data.current.borrow_mut().push_task(key); if cfg!(debug_assertions) { profq_msg(ProfileQueriesMsg::TaskBegin(key.clone())) }; + + // In incremental mode, hash the result of the task. We don't + // do anything with the hash yet, but we are computing it + // anyway so that + // - we make sure that the infrastructure works and + // - we can get an idea of the runtime cost. + let mut hcx = cx.create_stable_hashing_context(); + let result = task(cx, arg); if cfg!(debug_assertions) { profq_msg(ProfileQueriesMsg::TaskEnd) }; - let dep_node_index = data.edges.borrow_mut().pop_task(key); + + let dep_node_index = data.current.borrow_mut().pop_task(key); + + let mut stable_hasher = StableHasher::new(); + result.hash_stable(&mut hcx, &mut stable_hasher); + + let current_fingerprint = stable_hasher.finish(); + + // Store the current fingerprint + { + let old_value = self.fingerprints + .borrow_mut() + .insert(key, current_fingerprint); + debug_assert!(old_value.is_none(), + "DepGraph::with_task() - Duplicate fingerprint \ + insertion for {:?}", key); + } + + // Determine the color of the new DepNode. + { + let prev_fingerprint = data.previous.fingerprint_of(&key); + + let color = if Some(current_fingerprint) == prev_fingerprint { + DepNodeColor::Green(dep_node_index) + } else { + DepNodeColor::Red + }; + + let old_value = data.colors.borrow_mut().insert(key, color); + debug_assert!(old_value.is_none(), + "DepGraph::with_task() - Duplicate DepNodeColor \ + insertion for {:?}", key); + } + (result, dep_node_index) } else { - (task(cx, arg), DepNodeIndex::INVALID) + if key.kind.fingerprint_needed_for_crate_hash() { + let mut hcx = cx.create_stable_hashing_context(); + let result = task(cx, arg); + let mut stable_hasher = StableHasher::new(); + result.hash_stable(&mut hcx, &mut stable_hasher); + let old_value = self.fingerprints + .borrow_mut() + .insert(key, stable_hasher.finish()); + debug_assert!(old_value.is_none(), + "DepGraph::with_task() - Duplicate fingerprint \ + insertion for {:?}", key); + (result, DepNodeIndex::INVALID) + } else { + (task(cx, arg), DepNodeIndex::INVALID) + } } } @@ -139,10 +280,12 @@ impl DepGraph { where OP: FnOnce() -> R { if let Some(ref data) = self.data { - data.edges.borrow_mut().push_anon_task(); + data.current.borrow_mut().push_anon_task(); let result = op(); - let dep_node = data.edges.borrow_mut().pop_anon_task(dep_kind); - (result, dep_node) + let dep_node_index = data.current + .borrow_mut() + .pop_anon_task(dep_kind); + (result, dep_node_index) } else { (op(), DepNodeIndex::INVALID) } @@ -151,34 +294,28 @@ impl DepGraph { #[inline] pub fn read(&self, v: DepNode) { if let Some(ref data) = self.data { - data.edges.borrow_mut().read(v); + let mut current = data.current.borrow_mut(); + if let Some(&dep_node_index) = current.node_to_node_index.get(&v) { + current.read_index(dep_node_index); + } else { + bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind) + } } } #[inline] - pub fn read_index(&self, v: DepNodeIndex) { + pub fn read_index(&self, dep_node_index: DepNodeIndex) { if let Some(ref data) = self.data { - data.edges.borrow_mut().read_index(v); + data.current.borrow_mut().read_index(dep_node_index); } } - /// Only to be used during graph loading - #[inline] - pub fn add_edge_directly(&self, source: DepNode, target: DepNode) { - self.data.as_ref().unwrap().edges.borrow_mut().add_edge(source, target); + pub fn fingerprint_of(&self, dep_node: &DepNode) -> Fingerprint { + self.fingerprints.borrow()[dep_node] } - /// Only to be used during graph loading - pub fn add_node_directly(&self, node: DepNode) { - self.data.as_ref().unwrap().edges.borrow_mut().add_node(node); - } - - pub fn alloc_input_node(&self, node: DepNode) -> DepNodeIndex { - if let Some(ref data) = self.data { - data.edges.borrow_mut().add_node(node) - } else { - DepNodeIndex::INVALID - } + pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option { + self.data.as_ref().unwrap().previous.fingerprint_of(dep_node) } /// Indicates that a previous work product exists for `v`. This is @@ -235,15 +372,244 @@ impl DepGraph { debug_str_gen: F) where F: FnOnce() -> String { - let mut dep_node_debug = self.data.as_ref().unwrap().dep_node_debug.borrow_mut(); + let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug; - dep_node_debug.entry(dep_node) - .or_insert_with(debug_str_gen); + if dep_node_debug.borrow().contains_key(&dep_node) { + return + } + let debug_str = debug_str_gen(); + dep_node_debug.borrow_mut().insert(dep_node, debug_str); } pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option { self.data.as_ref().and_then(|t| t.dep_node_debug.borrow().get(&dep_node).cloned()) } + + pub fn serialize(&self) -> SerializedDepGraph { + let fingerprints = self.fingerprints.borrow(); + let current_dep_graph = self.data.as_ref().unwrap().current.borrow(); + + let nodes: IndexVec<_, _> = current_dep_graph.nodes.iter().map(|dep_node| { + let fingerprint = fingerprints.get(dep_node) + .cloned() + .unwrap_or(Fingerprint::zero()); + (*dep_node, fingerprint) + }).collect(); + + let total_edge_count: usize = current_dep_graph.edges.iter() + .map(|v| v.len()) + .sum(); + + let mut edge_list_indices = IndexVec::with_capacity(nodes.len()); + let mut edge_list_data = Vec::with_capacity(total_edge_count); + + for (current_dep_node_index, edges) in current_dep_graph.edges.iter_enumerated() { + let start = edge_list_data.len() as u32; + // This should really just be a memcpy :/ + edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index()))); + let end = edge_list_data.len() as u32; + + debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len()); + edge_list_indices.push((start, end)); + } + + debug_assert!(edge_list_data.len() <= ::std::u32::MAX as usize); + debug_assert_eq!(edge_list_data.len(), total_edge_count); + + SerializedDepGraph { + nodes, + edge_list_indices, + edge_list_data, + } + } + + pub fn node_color(&self, dep_node: &DepNode) -> Option { + self.data.as_ref().and_then(|data| data.colors.borrow().get(dep_node).cloned()) + } + + pub fn try_mark_green(&self, + tcx: TyCtxt, + dep_node: &DepNode) + -> Option { + debug!("try_mark_green({:?}) - BEGIN", dep_node); + let data = self.data.as_ref().unwrap(); + + debug_assert!(!data.colors.borrow().contains_key(dep_node)); + debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node)); + + if dep_node.kind.is_input() { + // We should only hit try_mark_green() for inputs that do not exist + // anymore in the current compilation session. Existing inputs are + // eagerly marked as either red/green before any queries are + // executed. + debug_assert!(dep_node.extract_def_id(tcx).is_none()); + debug!("try_mark_green({:?}) - END - DepNode is deleted input", dep_node); + return None; + } + + let (prev_deps, prev_dep_node_index) = match data.previous.edges_from(dep_node) { + Some(prev) => { + // This DepNode and the corresponding query invocation existed + // in the previous compilation session too, so we can try to + // mark it as green by recursively marking all of its + // dependencies green. + prev + } + None => { + // This DepNode did not exist in the previous compilation session, + // so we cannot mark it as green. + debug!("try_mark_green({:?}) - END - DepNode does not exist in \ + current compilation session anymore", dep_node); + return None + } + }; + + let mut current_deps = Vec::new(); + + for &dep_dep_node_index in prev_deps { + let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index); + + let dep_dep_node_color = data.colors.borrow().get(dep_dep_node).cloned(); + match dep_dep_node_color { + Some(DepNodeColor::Green(node_index)) => { + // This dependency has been marked as green before, we are + // still fine and can continue with checking the other + // dependencies. + debug!("try_mark_green({:?}) --- found dependency {:?} to \ + be immediately green", dep_node, dep_dep_node); + current_deps.push(node_index); + } + Some(DepNodeColor::Red) => { + // We found a dependency the value of which has changed + // compared to the previous compilation session. We cannot + // mark the DepNode as green and also don't need to bother + // with checking any of the other dependencies. + debug!("try_mark_green({:?}) - END - dependency {:?} was \ + immediately red", dep_node, dep_dep_node); + return None + } + None => { + if dep_dep_node.kind.is_input() { + // This input does not exist anymore. + debug_assert!(dep_dep_node.extract_def_id(tcx).is_none(), + "Encountered input {:?} without color", + dep_dep_node); + debug!("try_mark_green({:?}) - END - dependency {:?} \ + was deleted input", dep_node, dep_dep_node); + return None; + } + + debug!("try_mark_green({:?}) --- state of dependency {:?} \ + is unknown, trying to mark it green", dep_node, + dep_dep_node); + + // We don't know the state of this dependency. Let's try to + // mark it green. + if let Some(node_index) = self.try_mark_green(tcx, dep_dep_node) { + debug!("try_mark_green({:?}) --- managed to MARK \ + dependency {:?} as green", dep_node, dep_dep_node); + current_deps.push(node_index); + } else { + // We failed to mark it green, so we try to force the query. + debug!("try_mark_green({:?}) --- trying to force \ + dependency {:?}", dep_node, dep_dep_node); + if ::ty::maps::force_from_dep_node(tcx, dep_dep_node) { + let dep_dep_node_color = data.colors + .borrow() + .get(dep_dep_node) + .cloned(); + match dep_dep_node_color { + Some(DepNodeColor::Green(node_index)) => { + debug!("try_mark_green({:?}) --- managed to \ + FORCE dependency {:?} to green", + dep_node, dep_dep_node); + current_deps.push(node_index); + } + Some(DepNodeColor::Red) => { + debug!("try_mark_green({:?}) - END - \ + dependency {:?} was red after forcing", + dep_node, + dep_dep_node); + return None + } + None => { + bug!("try_mark_green() - Forcing the DepNode \ + should have set its color") + } + } + } else { + // The DepNode could not be forced. + debug!("try_mark_green({:?}) - END - dependency {:?} \ + could not be forced", dep_node, dep_dep_node); + return None + } + } + } + } + } + + + // If we got here without hitting a `return` that means that all + // dependencies of this DepNode could be marked as green. Therefore we + // can also mark this DepNode as green. We do so by... + + // ... allocating an entry for it in the current dependency graph and + // adding all the appropriate edges imported from the previous graph ... + let dep_node_index = data.current + .borrow_mut() + .alloc_node(*dep_node, current_deps); + + // ... copying the fingerprint from the previous graph too, so we don't + // have to recompute it ... + let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index); + let old_fingerprint = self.fingerprints + .borrow_mut() + .insert(*dep_node, fingerprint); + debug_assert!(old_fingerprint.is_none(), + "DepGraph::try_mark_green() - Duplicate fingerprint \ + insertion for {:?}", dep_node); + + // ... and finally storing a "Green" entry in the color map. + let old_color = data.colors + .borrow_mut() + .insert(*dep_node, DepNodeColor::Green(dep_node_index)); + debug_assert!(old_color.is_none(), + "DepGraph::try_mark_green() - Duplicate DepNodeColor \ + insertion for {:?}", dep_node); + + debug!("try_mark_green({:?}) - END - successfully marked as green", dep_node); + Some(dep_node_index) + } + + // Used in various assertions + pub fn is_green(&self, dep_node_index: DepNodeIndex) -> bool { + let dep_node = self.data.as_ref().unwrap().current.borrow().nodes[dep_node_index]; + self.data.as_ref().unwrap().colors.borrow().get(&dep_node).map(|&color| { + match color { + DepNodeColor::Red => false, + DepNodeColor::Green(_) => true, + } + }).unwrap_or(false) + } + + pub fn mark_loaded_from_cache(&self, dep_node_index: DepNodeIndex, state: bool) { + debug!("mark_loaded_from_cache({:?}, {})", + self.data.as_ref().unwrap().current.borrow().nodes[dep_node_index], + state); + + self.data + .as_ref() + .unwrap() + .loaded_from_cache + .borrow_mut() + .insert(dep_node_index, state); + } + + pub fn was_loaded_from_cache(&self, dep_node: &DepNode) -> Option { + let data = self.data.as_ref().unwrap(); + let dep_node_index = data.current.borrow().node_to_node_index[dep_node]; + data.loaded_from_cache.borrow().get(&dep_node_index).cloned() + } } /// A "work product" is an intermediate result that we save into the @@ -280,11 +646,203 @@ impl DepGraph { #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub struct WorkProduct { pub cgu_name: String, - /// Extra hash used to decide if work-product is still suitable; - /// note that this is *not* a hash of the work-product itself. - /// See documentation on `WorkProduct` type for an example. - pub input_hash: u64, - /// Saved files associated with this CGU pub saved_files: Vec<(OutputType, String)>, } + +pub(super) struct CurrentDepGraph { + nodes: IndexVec, + edges: IndexVec>, + node_to_node_index: FxHashMap, + task_stack: Vec, + forbidden_edge: Option, + + // Anonymous DepNodes are nodes the ID of which we compute from the list of + // their edges. This has the beneficial side-effect that multiple anonymous + // nodes can be coalesced into one without changing the semantics of the + // dependency graph. However, the merging of nodes can lead to a subtle + // problem during red-green marking: The color of an anonymous node from + // the current session might "shadow" the color of the node with the same + // ID from the previous session. In order to side-step this problem, we make + // sure that anon-node IDs allocated in different sessions don't overlap. + // This is implemented by mixing a session-key into the ID fingerprint of + // each anon node. The session-key is just a random number generated when + // the DepGraph is created. + anon_id_seed: Fingerprint, +} + +impl CurrentDepGraph { + fn new() -> CurrentDepGraph { + use std::time::{SystemTime, UNIX_EPOCH}; + + let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + let nanos = duration.as_secs() * 1_000_000_000 + + duration.subsec_nanos() as u64; + let mut stable_hasher = StableHasher::new(); + nanos.hash(&mut stable_hasher); + + let forbidden_edge = if cfg!(debug_assertions) { + match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { + Ok(s) => { + match EdgeFilter::new(&s) { + Ok(f) => Some(f), + Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), + } + } + Err(_) => None, + } + } else { + None + }; + + CurrentDepGraph { + nodes: IndexVec::new(), + edges: IndexVec::new(), + node_to_node_index: FxHashMap(), + anon_id_seed: stable_hasher.finish(), + task_stack: Vec::new(), + forbidden_edge, + } + } + + pub(super) fn push_ignore(&mut self) { + self.task_stack.push(OpenTask::Ignore); + } + + pub(super) fn pop_ignore(&mut self) { + let popped_node = self.task_stack.pop().unwrap(); + debug_assert_eq!(popped_node, OpenTask::Ignore); + } + + pub(super) fn push_task(&mut self, key: DepNode) { + self.task_stack.push(OpenTask::Regular { + node: key, + reads: Vec::new(), + read_set: FxHashSet(), + }); + } + + pub(super) fn pop_task(&mut self, key: DepNode) -> DepNodeIndex { + let popped_node = self.task_stack.pop().unwrap(); + + if let OpenTask::Regular { + node, + read_set: _, + reads + } = popped_node { + debug_assert_eq!(node, key); + self.alloc_node(node, reads) + } else { + bug!("pop_task() - Expected regular task to be popped") + } + } + + fn push_anon_task(&mut self) { + self.task_stack.push(OpenTask::Anon { + reads: Vec::new(), + read_set: FxHashSet(), + }); + } + + fn pop_anon_task(&mut self, kind: DepKind) -> DepNodeIndex { + let popped_node = self.task_stack.pop().unwrap(); + + if let OpenTask::Anon { + read_set: _, + reads + } = popped_node { + let mut fingerprint = self.anon_id_seed; + let mut hasher = StableHasher::new(); + + for &read in reads.iter() { + let read_dep_node = self.nodes[read]; + + ::std::mem::discriminant(&read_dep_node.kind).hash(&mut hasher); + + // Fingerprint::combine() is faster than sending Fingerprint + // through the StableHasher (at least as long as StableHasher + // is so slow). + fingerprint = fingerprint.combine(read_dep_node.hash); + } + + fingerprint = fingerprint.combine(hasher.finish()); + + let target_dep_node = DepNode { + kind, + hash: fingerprint, + }; + + if let Some(&index) = self.node_to_node_index.get(&target_dep_node) { + index + } else { + self.alloc_node(target_dep_node, reads) + } + } else { + bug!("pop_anon_task() - Expected anonymous task to be popped") + } + } + + fn read_index(&mut self, source: DepNodeIndex) { + match self.task_stack.last_mut() { + Some(&mut OpenTask::Regular { + ref mut reads, + ref mut read_set, + node: ref target, + }) => { + if read_set.insert(source) { + reads.push(source); + + if cfg!(debug_assertions) { + if let Some(ref forbidden_edge) = self.forbidden_edge { + let source = self.nodes[source]; + if forbidden_edge.test(&source, &target) { + bug!("forbidden edge {:?} -> {:?} created", + source, + target) + } + } + } + } + } + Some(&mut OpenTask::Anon { + ref mut reads, + ref mut read_set, + }) => { + if read_set.insert(source) { + reads.push(source); + } + } + Some(&mut OpenTask::Ignore) | None => { + // ignore + } + } + } + + fn alloc_node(&mut self, + dep_node: DepNode, + edges: Vec) + -> DepNodeIndex { + debug_assert_eq!(self.edges.len(), self.nodes.len()); + debug_assert_eq!(self.node_to_node_index.len(), self.nodes.len()); + debug_assert!(!self.node_to_node_index.contains_key(&dep_node)); + let dep_node_index = DepNodeIndex::new(self.nodes.len()); + self.nodes.push(dep_node); + self.node_to_node_index.insert(dep_node, dep_node_index); + self.edges.push(edges); + dep_node_index + } +} + +#[derive(Clone, Debug, PartialEq)] +enum OpenTask { + Regular { + node: DepNode, + reads: Vec, + read_set: FxHashSet, + }, + Anon { + reads: Vec, + read_set: FxHashSet, + }, + Ignore, +} diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index ac0c88ced9..8d2cf67684 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -11,21 +11,18 @@ pub mod debug; mod dep_node; mod dep_tracking_map; -mod edges; mod graph; +mod prev; mod query; mod raii; mod safe; +mod serialized; pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig}; -pub use self::dep_node::DepNode; -pub use self::dep_node::WorkProductId; -pub use self::graph::DepGraph; -pub use self::graph::WorkProduct; -pub use self::edges::DepNodeIndex; +pub use self::dep_node::{DepNode, DepKind, DepConstructor, WorkProductId}; +pub use self::graph::{DepGraph, WorkProduct, DepNodeIndex, DepNodeColor}; +pub use self::prev::PreviousDepGraph; pub use self::query::DepGraphQuery; pub use self::safe::AssertDepGraphSafe; pub use self::safe::DepGraphSafe; -pub use self::raii::DepTask; - -pub use self::dep_node::{DepKind, DepConstructor}; +pub use self::serialized::SerializedDepGraph; diff --git a/src/librustc/dep_graph/prev.rs b/src/librustc/dep_graph/prev.rs new file mode 100644 index 0000000000..17001bbb0c --- /dev/null +++ b/src/librustc/dep_graph/prev.rs @@ -0,0 +1,60 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ich::Fingerprint; +use rustc_data_structures::fx::FxHashMap; +use super::dep_node::DepNode; +use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex}; + +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct PreviousDepGraph { + data: SerializedDepGraph, + index: FxHashMap, +} + +impl PreviousDepGraph { + pub fn new(data: SerializedDepGraph) -> PreviousDepGraph { + let index: FxHashMap<_, _> = data.nodes + .iter_enumerated() + .map(|(idx, &(dep_node, _))| (dep_node, idx)) + .collect(); + PreviousDepGraph { data, index } + } + + #[inline] + pub fn edges_from(&self, + dep_node: &DepNode) + -> Option<(&[SerializedDepNodeIndex], SerializedDepNodeIndex)> { + self.index + .get(dep_node) + .map(|&node_index| { + (self.data.edge_targets_from(node_index), node_index) + }) + } + + #[inline] + pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode { + self.data.nodes[dep_node_index].0 + } + + #[inline] + pub fn fingerprint_of(&self, dep_node: &DepNode) -> Option { + self.index + .get(dep_node) + .map(|&node_index| self.data.nodes[node_index].1) + } + + #[inline] + pub fn fingerprint_by_index(&self, + dep_node_index: SerializedDepNodeIndex) + -> Fingerprint { + self.data.nodes[dep_node_index].1 + } +} diff --git a/src/librustc/dep_graph/query.rs b/src/librustc/dep_graph/query.rs index 283da1050a..ea83a4f8b3 100644 --- a/src/librustc/dep_graph/query.rs +++ b/src/librustc/dep_graph/query.rs @@ -22,11 +22,10 @@ impl DepGraphQuery { pub fn new(nodes: &[DepNode], edges: &[(DepNode, DepNode)]) -> DepGraphQuery { - let mut graph = Graph::new(); + let mut graph = Graph::with_capacity(nodes.len(), edges.len()); let mut indices = FxHashMap(); for node in nodes { - indices.insert(node.clone(), graph.next_node_index()); - graph.add_node(node.clone()); + indices.insert(node.clone(), graph.add_node(node.clone())); } for &(ref source, ref target) in edges { diff --git a/src/librustc/dep_graph/raii.rs b/src/librustc/dep_graph/raii.rs index ce261ca68e..5728bcc7d2 100644 --- a/src/librustc/dep_graph/raii.rs +++ b/src/librustc/dep_graph/raii.rs @@ -8,43 +8,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::DepNode; -use super::edges::DepGraphEdges; +use super::graph::CurrentDepGraph; use std::cell::RefCell; -pub struct DepTask<'graph> { - graph: &'graph RefCell, - key: DepNode, -} - -impl<'graph> DepTask<'graph> { - pub fn new(graph: &'graph RefCell, - key: DepNode) - -> DepTask<'graph> { - graph.borrow_mut().push_task(key); - DepTask { - graph, - key, - } - } -} - -impl<'graph> Drop for DepTask<'graph> { - fn drop(&mut self) { - self.graph.borrow_mut().pop_task(self.key); - } -} - pub struct IgnoreTask<'graph> { - graph: &'graph RefCell, + graph: &'graph RefCell, } impl<'graph> IgnoreTask<'graph> { - pub fn new(graph: &'graph RefCell) -> IgnoreTask<'graph> { + pub(super) fn new(graph: &'graph RefCell) -> IgnoreTask<'graph> { graph.borrow_mut().push_ignore(); IgnoreTask { - graph + graph, } } } diff --git a/src/librustc/dep_graph/safe.rs b/src/librustc/dep_graph/safe.rs index bf224f89f0..f82bf9be03 100644 --- a/src/librustc/dep_graph/safe.rs +++ b/src/librustc/dep_graph/safe.rs @@ -58,6 +58,13 @@ impl<'a, A> DepGraphSafe for &'a A { } +/// Mut ref to dep-graph-safe stuff should still be dep-graph-safe. +impl<'a, A> DepGraphSafe for &'a mut A + where A: DepGraphSafe, +{ +} + + /// No data here! :) impl DepGraphSafe for () { } diff --git a/src/librustc/dep_graph/serialized.rs b/src/librustc/dep_graph/serialized.rs new file mode 100644 index 0000000000..c96040ab9b --- /dev/null +++ b/src/librustc/dep_graph/serialized.rs @@ -0,0 +1,50 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The data that we will serialize and deserialize. + +use dep_graph::DepNode; +use ich::Fingerprint; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; + +newtype_index!(SerializedDepNodeIndex); + +/// Data for use when recompiling the **current crate**. +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct SerializedDepGraph { + /// The set of all DepNodes in the graph + pub nodes: IndexVec, + /// For each DepNode, stores the list of edges originating from that + /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data, + /// which holds the actual DepNodeIndices of the target nodes. + pub edge_list_indices: IndexVec, + /// A flattened list of all edge targets in the graph. Edge sources are + /// implicit in edge_list_indices. + pub edge_list_data: Vec, +} + +impl SerializedDepGraph { + + pub fn new() -> SerializedDepGraph { + SerializedDepGraph { + nodes: IndexVec::new(), + edge_list_indices: IndexVec::new(), + edge_list_data: Vec::new(), + } + } + + #[inline] + pub fn edge_targets_from(&self, + source: SerializedDepNodeIndex) + -> &[SerializedDepNodeIndex] { + let targets = self.edge_list_indices[source]; + &self.edge_list_data[targets.0 as usize..targets.1 as usize] + } +} diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs index 412759e114..26f56ffaca 100644 --- a/src/librustc/diagnostics.rs +++ b/src/librustc/diagnostics.rs @@ -479,40 +479,6 @@ fn main() { ``` "##, -E0133: r##" -Unsafe code was used outside of an unsafe function or block. - -Erroneous code example: - -```compile_fail,E0133 -unsafe fn f() { return; } // This is the unsafe code - -fn main() { - f(); // error: call to unsafe function requires unsafe function or block -} -``` - -Using unsafe functionality is potentially dangerous and disallowed by safety -checks. Examples: - -* Dereferencing raw pointers -* Calling functions via FFI -* Calling functions marked unsafe - -These safety checks can be relaxed for a section of the code by wrapping the -unsafe instructions with an `unsafe` block. For instance: - -``` -unsafe fn f() { return; } - -fn main() { - unsafe { f(); } // ok! -} -``` - -See also https://doc.rust-lang.org/book/first-edition/unsafe.html -"##, - // This shouldn't really ever trigger since the repeated value error comes first E0136: r##" A binary can only have one entry point, and by default that entry point is the @@ -688,8 +654,8 @@ See also https://doc.rust-lang.org/book/first-edition/no-stdlib.html "##, E0214: r##" -A generic type was described using parentheses rather than angle brackets. For -example: +A generic type was described using parentheses rather than angle brackets. +For example: ```compile_fail,E0214 fn main() { @@ -702,6 +668,93 @@ Parentheses are currently only used with generic types when defining parameters for `Fn`-family traits. "##, +E0230: r##" +The `#[rustc_on_unimplemented]` attribute lets you specify a custom error +message for when a particular trait isn't implemented on a type placed in a +position that needs that trait. For example, when the following code is +compiled: + +```compile_fail +#![feature(on_unimplemented)] + +fn foo>(x: T){} + +#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] +trait Index { /* ... */ } + +foo(true); // `bool` does not implement `Index` +``` + +There will be an error about `bool` not implementing `Index`, followed by a +note saying "the type `bool` cannot be indexed by `u8`". + +As you can see, you can specify type parameters in curly braces for +substitution with the actual types (using the regular format string syntax) in +a given situation. Furthermore, `{Self}` will substitute to the type (in this +case, `bool`) that we tried to use. + +This error appears when the curly braces contain an identifier which doesn't +match with any of the type parameters or the string `Self`. This might happen +if you misspelled a type parameter, or if you intended to use literal curly +braces. If it is the latter, escape the curly braces with a second curly brace +of the same type; e.g. a literal `{` is `{{`. +"##, + +E0231: r##" +The `#[rustc_on_unimplemented]` attribute lets you specify a custom error +message for when a particular trait isn't implemented on a type placed in a +position that needs that trait. For example, when the following code is +compiled: + +```compile_fail +#![feature(on_unimplemented)] + +fn foo>(x: T){} + +#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] +trait Index { /* ... */ } + +foo(true); // `bool` does not implement `Index` +``` + +there will be an error about `bool` not implementing `Index`, followed by a +note saying "the type `bool` cannot be indexed by `u8`". + +As you can see, you can specify type parameters in curly braces for +substitution with the actual types (using the regular format string syntax) in +a given situation. Furthermore, `{Self}` will substitute to the type (in this +case, `bool`) that we tried to use. + +This error appears when the curly braces do not contain an identifier. Please +add one of the same name as a type parameter. If you intended to use literal +braces, use `{{` and `}}` to escape them. +"##, + +E0232: r##" +The `#[rustc_on_unimplemented]` attribute lets you specify a custom error +message for when a particular trait isn't implemented on a type placed in a +position that needs that trait. For example, when the following code is +compiled: + +```compile_fail +#![feature(on_unimplemented)] + +fn foo>(x: T){} + +#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] +trait Index { /* ... */ } + +foo(true); // `bool` does not implement `Index` +``` + +there will be an error about `bool` not implementing `Index`, followed by a +note saying "the type `bool` cannot be indexed by `u8`". + +For this to work, some note must be specified. An empty attribute will not do +anything, please remove the attribute or add some helpful note for users of the +trait. +"##, + E0261: r##" When using a lifetime like `'a` in a type, it must be declared before being used. @@ -917,92 +970,6 @@ for v in &vs { ``` "##, -E0272: r##" -The `#[rustc_on_unimplemented]` attribute lets you specify a custom error -message for when a particular trait isn't implemented on a type placed in a -position that needs that trait. For example, when the following code is -compiled: - -```compile_fail -#![feature(on_unimplemented)] - -fn foo>(x: T){} - -#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] -trait Index { /* ... */ } - -foo(true); // `bool` does not implement `Index` -``` - -There will be an error about `bool` not implementing `Index`, followed by a -note saying "the type `bool` cannot be indexed by `u8`". - -As you can see, you can specify type parameters in curly braces for -substitution with the actual types (using the regular format string syntax) in -a given situation. Furthermore, `{Self}` will substitute to the type (in this -case, `bool`) that we tried to use. - -This error appears when the curly braces contain an identifier which doesn't -match with any of the type parameters or the string `Self`. This might happen -if you misspelled a type parameter, or if you intended to use literal curly -braces. If it is the latter, escape the curly braces with a second curly brace -of the same type; e.g. a literal `{` is `{{`. -"##, - -E0273: r##" -The `#[rustc_on_unimplemented]` attribute lets you specify a custom error -message for when a particular trait isn't implemented on a type placed in a -position that needs that trait. For example, when the following code is -compiled: - -```compile_fail -#![feature(on_unimplemented)] - -fn foo>(x: T){} - -#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] -trait Index { /* ... */ } - -foo(true); // `bool` does not implement `Index` -``` - -there will be an error about `bool` not implementing `Index`, followed by a -note saying "the type `bool` cannot be indexed by `u8`". - -As you can see, you can specify type parameters in curly braces for -substitution with the actual types (using the regular format string syntax) in -a given situation. Furthermore, `{Self}` will substitute to the type (in this -case, `bool`) that we tried to use. - -This error appears when the curly braces do not contain an identifier. Please -add one of the same name as a type parameter. If you intended to use literal -braces, use `{{` and `}}` to escape them. -"##, - -E0274: r##" -The `#[rustc_on_unimplemented]` attribute lets you specify a custom error -message for when a particular trait isn't implemented on a type placed in a -position that needs that trait. For example, when the following code is -compiled: - -```compile_fail -#![feature(on_unimplemented)] - -fn foo>(x: T){} - -#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] -trait Index { /* ... */ } - -foo(true); // `bool` does not implement `Index` -``` - -there will be an error about `bool` not implementing `Index`, followed by a -note saying "the type `bool` cannot be indexed by `u8`". - -For this to work, some note must be specified. An empty attribute will not do -anything, please remove the attribute or add some helpful note for users of the -trait. -"##, E0275: r##" This error occurs when there was a recursive trait requirement that overflowed @@ -1138,11 +1105,13 @@ already specify all requirements that will be used for every type parameter. "##, E0281: r##" +#### Note: this error code is no longer emitted by the compiler. + You tried to supply a type which doesn't implement some trait in a location which expected that trait. This error typically occurs when working with `Fn`-based types. Erroneous code example: -```compile_fail,E0281 +```compile-fail fn foo(x: F) { } fn main() { @@ -1382,38 +1351,6 @@ struct Foo { ``` "##, -E0312: r##" -A lifetime of reference outlives lifetime of borrowed content. - -Erroneous code example: - -```compile_fail,E0312 -fn make_child<'human, 'elve>(x: &mut &'human isize, y: &mut &'elve isize) { - *x = *y; - // error: lifetime of reference outlives lifetime of borrowed content -} -``` - -The compiler cannot determine if the `human` lifetime will live long enough -to keep up on the elve one. To solve this error, you have to give an -explicit lifetime hierarchy: - -``` -fn make_child<'human, 'elve: 'human>(x: &mut &'human isize, - y: &mut &'elve isize) { - *x = *y; // ok! -} -``` - -Or use the same lifetime for every variable: - -``` -fn make_child<'elve>(x: &mut &'elve isize, y: &mut &'elve isize) { - *x = *y; // ok! -} -``` -"##, - E0317: r##" This error occurs when an `if` expression without an `else` block is used in a context where a type other than `()` is expected, for example a `let` @@ -2011,6 +1948,9 @@ register_diagnostics! { // E0102, // replaced with E0282 // E0134, // E0135, +// E0272, // on_unimplemented #0 +// E0273, // on_unimplemented #1 +// E0274, // on_unimplemented #2 E0278, // requirement is not satisfied E0279, // requirement is not satisfied E0280, // requirement is not satisfied @@ -2020,6 +1960,7 @@ register_diagnostics! { // E0304, // expected signed integer constant // E0305, // expected constant E0311, // thing may not live long enough + E0312, // lifetime of reference outlives lifetime of borrowed content E0313, // lifetime of borrowed pointer outlives lifetime of captured variable E0314, // closure outlives stack frame E0315, // cannot invoke closure outside of its lifetime @@ -2045,4 +1986,7 @@ register_diagnostics! { E0495, // cannot infer an appropriate lifetime due to conflicting requirements E0566, // conflicting representation hints E0623, // lifetime mismatch where both parameters are anonymous regions + E0628, // generators cannot have explicit arguments + E0631, // type mismatch in closure arguments + E0637, // "'_" is not a valid lifetime bound } diff --git a/src/librustc/hir/README.md b/src/librustc/hir/README.md new file mode 100644 index 0000000000..c832a897de --- /dev/null +++ b/src/librustc/hir/README.md @@ -0,0 +1,119 @@ +# Introduction to the HIR + +The HIR -- "High-level IR" -- is the primary IR used in most of +rustc. It is a desugared version of the "abstract syntax tree" (AST) +that is generated after parsing, macro expansion, and name resolution +have completed. Many parts of HIR resemble Rust surface syntax quite +closely, with the exception that some of Rust's expression forms have +been desugared away (as an example, `for` loops are converted into a +`loop` and do not appear in the HIR). + +This README covers the main concepts of the HIR. + +### Out-of-band storage and the `Crate` type + +The top-level data-structure in the HIR is the `Crate`, which stores +the contents of the crate currently being compiled (we only ever +construct HIR for the current crate). Whereas in the AST the crate +data structure basically just contains the root module, the HIR +`Crate` structure contains a number of maps and other things that +serve to organize the content of the crate for easier access. + +For example, the contents of individual items (e.g., modules, +functions, traits, impls, etc) in the HIR are not immediately +accessible in the parents. So, for example, if had a module item `foo` +containing a function `bar()`: + +``` +mod foo { + fn bar() { } +} +``` + +Then in the HIR the representation of module `foo` (the `Mod` +stuct) would have only the **`ItemId`** `I` of `bar()`. To get the +details of the function `bar()`, we would lookup `I` in the +`items` map. + +One nice result from this representation is that one can iterate +over all items in the crate by iterating over the key-value pairs +in these maps (without the need to trawl through the IR in total). +There are similar maps for things like trait items and impl items, +as well as "bodies" (explained below). + +The other reason to setup the representation this way is for better +integration with incremental compilation. This way, if you gain access +to a `&hir::Item` (e.g. for the mod `foo`), you do not immediately +gain access to the contents of the function `bar()`. Instead, you only +gain access to the **id** for `bar()`, and you must invoke some +function to lookup the contents of `bar()` given its id; this gives us +a chance to observe that you accessed the data for `bar()` and record +the dependency. + +### Identifiers in the HIR + +Most of the code that has to deal with things in HIR tends not to +carry around references into the HIR, but rather to carry around +*identifier numbers* (or just "ids"). Right now, you will find four +sorts of identifiers in active use: + +- `DefId`, which primarily name "definitions" or top-level items. + - You can think of a `DefId` as being shorthand for a very explicit + and complete path, like `std::collections::HashMap`. However, + these paths are able to name things that are not nameable in + normal Rust (e.g., impls), and they also include extra information + about the crate (such as its version number, as two versions of + the same crate can co-exist). + - A `DefId` really consists of two parts, a `CrateNum` (which + identifies the crate) and a `DefIndex` (which indixes into a list + of items that is maintained per crate). +- `HirId`, which combines the index of a particular item with an + offset within that item. + - the key point of a `HirId` is that it is *relative* to some item (which is named + via a `DefId`). +- `BodyId`, this is an absolute identifier that refers to a specific + body (definition of a function or constant) in the crate. It is currently + effectively a "newtype'd" `NodeId`. +- `NodeId`, which is an absolute id that identifies a single node in the HIR tree. + - While these are still in common use, **they are being slowly phased out**. + - Since they are absolute within the crate, adding a new node + anywhere in the tree causes the node-ids of all subsequent code in + the crate to change. This is terrible for incremental compilation, + as you can perhaps imagine. + +### HIR Map + +Most of the time when you are working with the HIR, you will do so via +the **HIR Map**, accessible in the tcx via `tcx.hir` (and defined in +the `hir::map` module). The HIR map contains a number of methods to +convert between ids of various kinds and to lookup data associated +with a HIR node. + +For example, if you have a `DefId`, and you would like to convert it +to a `NodeId`, you can use `tcx.hir.as_local_node_id(def_id)`. This +returns an `Option` -- this will be `None` if the def-id +refers to something outside of the current crate (since then it has no +HIR node), but otherwise returns `Some(n)` where `n` is the node-id of +the definition. + +Similarly, you can use `tcx.hir.find(n)` to lookup the node for a +`NodeId`. This returns a `Option>`, where `Node` is an enum +defined in the map; by matching on this you can find out what sort of +node the node-id referred to and also get a pointer to the data +itself. Often, you know what sort of node `n` is -- e.g., if you know +that `n` must be some HIR expression, you can do +`tcx.hir.expect_expr(n)`, which will extract and return the +`&hir::Expr`, panicking if `n` is not in fact an expression. + +Finally, you can use the HIR map to find the parents of nodes, via +calls like `tcx.hir.get_parent_node(n)`. + +### HIR Bodies + +A **body** represents some kind of executable code, such as the body +of a function/closure or the definition of a constant. Bodies are +associated with an **owner**, which is typically some kind of item +(e.g., a `fn()` or `const`), but could also be a closure expression +(e.g., `|x, y| x + y`). You can use the HIR map to find find the body +associated with a given def-id (`maybe_body_owned_by()`) or to find +the owner of a body (`body_owner_def_id()`). diff --git a/src/librustc/hir/def.rs b/src/librustc/hir/def.rs index c500d770ce..4e0c6479ab 100644 --- a/src/librustc/hir/def.rs +++ b/src/librustc/hir/def.rs @@ -9,7 +9,7 @@ // except according to those terms. use hir::def_id::DefId; -use util::nodemap::NodeMap; +use util::nodemap::{NodeMap, DefIdMap}; use syntax::ast; use syntax::ext::base::MacroKind; use syntax_pos::Span; @@ -48,8 +48,9 @@ pub enum Def { VariantCtor(DefId, CtorKind), Method(DefId), AssociatedConst(DefId), - Local(DefId), - Upvar(DefId, // def id of closed over local + + Local(ast::NodeId), + Upvar(ast::NodeId, // node id of closed over local usize, // index in the freevars list of the closure ast::NodeId), // expr node that creates the closure Label(ast::NodeId), @@ -114,7 +115,7 @@ pub type DefMap = NodeMap; /// This is the replacement export map. It maps a module to all of the exports /// within. -pub type ExportMap = NodeMap>; +pub type ExportMap = DefIdMap>; #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] pub struct Export { @@ -150,11 +151,13 @@ impl Def { Def::Variant(id) | Def::VariantCtor(id, ..) | Def::Enum(id) | Def::TyAlias(id) | Def::AssociatedTy(id) | Def::TyParam(id) | Def::Struct(id) | Def::StructCtor(id, ..) | Def::Union(id) | Def::Trait(id) | Def::Method(id) | Def::Const(id) | - Def::AssociatedConst(id) | Def::Local(id) | Def::Upvar(id, ..) | Def::Macro(id, ..) | + Def::AssociatedConst(id) | Def::Macro(id, ..) | Def::GlobalAsm(id) => { id } + Def::Local(..) | + Def::Upvar(..) | Def::Label(..) | Def::PrimTy(..) | Def::SelfTy(..) | diff --git a/src/librustc/hir/def_id.rs b/src/librustc/hir/def_id.rs index 7f76e1bf77..69d23504cd 100644 --- a/src/librustc/hir/def_id.rs +++ b/src/librustc/hir/def_id.rs @@ -93,10 +93,30 @@ impl serialize::UseSpecializedDecodable for CrateNum { /// /// Since the DefIndex is mostly treated as an opaque ID, you probably /// don't have to care about these ranges. -#[derive(Clone, Debug, Eq, Ord, PartialOrd, PartialEq, RustcEncodable, +#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, RustcEncodable, RustcDecodable, Hash, Copy)] pub struct DefIndex(u32); +impl Idx for DefIndex { + fn new(value: usize) -> Self { + assert!(value < (u32::MAX) as usize); + DefIndex(value as u32) + } + + fn index(self) -> usize { + self.0 as usize + } +} + +impl fmt::Debug for DefIndex { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, + "DefIndex({}:{})", + self.address_space().index(), + self.as_array_index()) + } +} + impl DefIndex { #[inline] pub fn new(x: usize) -> DefIndex { @@ -177,12 +197,12 @@ pub struct DefId { impl fmt::Debug for DefId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "DefId {{ krate: {:?}, node: {:?}", + write!(f, "DefId {{ krate: {:?}, index: {:?}", self.krate, self.index)?; ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { - write!(f, " => {}", tcx.def_path(*self).to_string(tcx))?; + write!(f, " => {}", tcx.def_path_debug_str(*self))?; } Ok(()) })?; diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs index d52cc26eae..1755b3bca0 100644 --- a/src/librustc/hir/intravisit.rs +++ b/src/librustc/hir/intravisit.rs @@ -27,11 +27,19 @@ //! for more details. //! //! Note: it is an important invariant that the default visitor walks -//! the body of a function in "execution order" (more concretely, -//! reverse post-order with respect to the CFG implied by the AST), -//! meaning that if AST node A may execute before AST node B, then A -//! is visited first. The borrow checker in particular relies on this -//! property. +//! the body of a function in "execution order" - more concretely, if +//! we consider the reverse post-order (RPO) of the CFG implied by the HIR, +//! then a pre-order traversal of the HIR is consistent with the CFG RPO +//! on the *initial CFG point* of each HIR node, while a post-order traversal +//! of the HIR is consistent with the CFG RPO on each *final CFG point* of +//! each CFG node. +//! +//! One thing that follows is that if HIR node A always starts/ends executing +//! before HIR node B, then A appears in traversal pre/postorder before B, +//! respectively. (This follows from RPO respecting CFG domination). +//! +//! This order consistency is required in a few places in rustc, for +//! example generator inference, and possibly also HIR borrowck. use syntax::abi::Abi; use syntax::ast::{NodeId, CRATE_NODE_ID, Name, Attribute}; @@ -403,15 +411,23 @@ pub fn walk_body<'v, V: Visitor<'v>>(visitor: &mut V, body: &'v Body) { } pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local) { + // Intentionally visiting the expr first - the initialization expr + // dominates the local's definition. + walk_list!(visitor, visit_expr, &local.init); + visitor.visit_id(local.id); visitor.visit_pat(&local.pat); walk_list!(visitor, visit_ty, &local.ty); - walk_list!(visitor, visit_expr, &local.init); } pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) { visitor.visit_id(lifetime.id); - visitor.visit_name(lifetime.span, lifetime.name); + match lifetime.name { + LifetimeName::Name(name) => { + visitor.visit_name(lifetime.span, name); + } + LifetimeName::Static | LifetimeName::Implicit | LifetimeName::Underscore => {} + } } pub fn walk_lifetime_def<'v, V: Visitor<'v>>(visitor: &mut V, lifetime_def: &'v LifetimeDef) { @@ -611,7 +627,9 @@ pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V, path_span: Span, segment: &'v PathSegment) { visitor.visit_name(path_span, segment.name); - visitor.visit_path_parameters(path_span, &segment.parameters); + if let Some(ref parameters) = segment.parameters { + visitor.visit_path_parameters(path_span, parameters); + } } pub fn walk_path_parameters<'v, V: Visitor<'v>>(visitor: &mut V, @@ -653,8 +671,8 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) { PatKind::Ref(ref subpattern, _) => { visitor.visit_pat(subpattern) } - PatKind::Binding(_, def_id, ref pth1, ref optional_subpattern) => { - visitor.visit_def_mention(Def::Local(def_id)); + PatKind::Binding(_, canonical_id, ref pth1, ref optional_subpattern) => { + visitor.visit_def_mention(Def::Local(canonical_id)); visitor.visit_name(pth1.span, pth1.node); walk_list!(visitor, visit_pat, optional_subpattern); } @@ -979,7 +997,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { visitor.visit_expr(subexpression); walk_list!(visitor, visit_arm, arms); } - ExprClosure(_, ref function_declaration, body, _fn_decl_span) => { + ExprClosure(_, ref function_declaration, body, _fn_decl_span, _gen) => { visitor.visit_fn(FnKind::Closure(&expression.attrs), function_declaration, body, @@ -1043,6 +1061,9 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { visitor.visit_expr(input) } } + ExprYield(ref subexpression) => { + visitor.visit_expr(subexpression); + } } } diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs index ac26cbc833..64a2ba1fa6 100644 --- a/src/librustc/hir/lowering.rs +++ b/src/librustc/hir/lowering.rs @@ -40,12 +40,13 @@ //! get confused if the spans from leaf AST nodes occur in multiple places //! in the HIR, especially for multiple identifiers. +use dep_graph::DepGraph; use hir; -use hir::map::{Definitions, DefKey, REGULAR_SPACE}; -use hir::map::definitions::DefPathData; +use hir::map::{Definitions, DefKey}; use hir::def_id::{DefIndex, DefId, CRATE_DEF_INDEX}; use hir::def::{Def, PathResolution}; use lint::builtin::PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES; +use middle::cstore::CrateStore; use rustc_data_structures::indexed_vec::IndexVec; use session::Session; use util::common::FN_OUTPUT_NAME; @@ -63,6 +64,8 @@ use syntax::ptr::P; use syntax::codemap::{self, respan, Spanned, CompilerDesugaringKind}; use syntax::std_inject; use syntax::symbol::{Symbol, keywords}; +use syntax::tokenstream::{TokenStream, TokenTree, Delimited}; +use syntax::parse::token::Token; use syntax::util::small_vector::SmallVector; use syntax::visit::{self, Visitor}; use syntax_pos::Span; @@ -75,6 +78,8 @@ pub struct LoweringContext<'a> { // Use to assign ids to hir nodes that do not directly correspond to an ast node sess: &'a Session, + cstore: &'a CrateStore, + // As we walk the AST we must keep track of the current 'parent' def id (in // the form of a DefIndex) so that if we create a new node which introduces // a definition, then we can properly create the def id. @@ -93,6 +98,8 @@ pub struct LoweringContext<'a> { trait_impls: BTreeMap>, trait_default_impl: BTreeMap, + is_generator: bool, + catch_scopes: Vec, loop_scopes: Vec, is_in_loop_condition: bool, @@ -117,17 +124,20 @@ pub trait Resolver { } pub fn lower_crate(sess: &Session, + cstore: &CrateStore, + dep_graph: &DepGraph, krate: &Crate, resolver: &mut Resolver) -> hir::Crate { // We're constructing the HIR here; we don't care what we will // read, since we haven't even constructed the *input* to // incr. comp. yet. - let _ignore = sess.dep_graph.in_ignore(); + let _ignore = dep_graph.in_ignore(); LoweringContext { crate_root: std_inject::injected_crate_name(krate), sess, + cstore, parent_def: None, resolver, name_map: FxHashMap(), @@ -145,6 +155,7 @@ pub fn lower_crate(sess: &Session, current_hir_id_owner: vec![(CRATE_DEF_INDEX, 0)], item_local_id_counters: NodeMap(), node_id_to_hir_id: IndexVec::new(), + is_generator: false, }.lower_crate(krate) } @@ -393,6 +404,7 @@ impl<'a> LoweringContext<'a> { arguments: decl.map_or(hir_vec![], |decl| { decl.inputs.iter().map(|x| self.lower_arg(x)).collect() }), + is_generator: self.is_generator, value, }; let id = body.id(); @@ -421,8 +433,7 @@ impl<'a> LoweringContext<'a> { Symbol::gensym(s) } - fn allow_internal_unstable(&self, reason: CompilerDesugaringKind, mut span: Span) - -> Span + fn allow_internal_unstable(&self, reason: CompilerDesugaringKind, span: Span) -> Span { let mark = Mark::fresh(Mark::root()); mark.set_expn_info(codemap::ExpnInfo { @@ -434,8 +445,7 @@ impl<'a> LoweringContext<'a> { allow_internal_unsafe: false, }, }); - span.ctxt = SyntaxContext::empty().apply_mark(mark); - span + span.with_ctxt(SyntaxContext::empty().apply_mark(mark)) } fn with_catch_scope(&mut self, catch_id: NodeId, f: F) -> T @@ -453,6 +463,16 @@ impl<'a> LoweringContext<'a> { result } + fn lower_body(&mut self, decl: Option<&FnDecl>, f: F) -> hir::BodyId + where F: FnOnce(&mut LoweringContext) -> hir::Expr + { + let prev = mem::replace(&mut self.is_generator, false); + let result = f(self); + let r = self.record_body(result, decl); + self.is_generator = prev; + return r + } + fn with_loop_scope(&mut self, loop_id: NodeId, f: F) -> T where F: FnOnce(&mut LoweringContext) -> T { @@ -523,7 +543,7 @@ impl<'a> LoweringContext<'a> { if id.is_local() { self.resolver.definitions().def_key(id.index) } else { - self.sess.cstore.def_key(id) + self.cstore.def_key(id) } } @@ -571,7 +591,48 @@ impl<'a> LoweringContext<'a> { } fn lower_attrs(&mut self, attrs: &Vec) -> hir::HirVec { - attrs.clone().into() + attrs.iter().map(|a| self.lower_attr(a)).collect::>().into() + } + + fn lower_attr(&mut self, attr: &Attribute) -> Attribute { + Attribute { + id: attr.id, + style: attr.style, + path: attr.path.clone(), + tokens: self.lower_token_stream(attr.tokens.clone()), + is_sugared_doc: attr.is_sugared_doc, + span: attr.span, + } + } + + fn lower_token_stream(&mut self, tokens: TokenStream) -> TokenStream { + tokens.into_trees() + .flat_map(|tree| self.lower_token_tree(tree).into_trees()) + .collect() + } + + fn lower_token_tree(&mut self, tree: TokenTree) -> TokenStream { + match tree { + TokenTree::Token(span, token) => { + self.lower_token(token, span) + } + TokenTree::Delimited(span, delimited) => { + TokenTree::Delimited(span, Delimited { + delim: delimited.delim, + tts: self.lower_token_stream(delimited.tts.into()).into(), + }).into() + } + } + } + + fn lower_token(&mut self, token: Token, span: Span) -> TokenStream { + match token { + Token::Interpolated(_) => {} + other => return TokenTree::Token(span, other).into(), + } + + let tts = token.interpolated_to_tokenstream(&self.sess.parse_sess, span); + self.lower_token_stream(tts) } fn lower_arm(&mut self, arm: &Arm) -> hir::Arm { @@ -599,7 +660,7 @@ impl<'a> LoweringContext<'a> { TyKind::Slice(ref ty) => hir::TySlice(self.lower_ty(ty)), TyKind::Ptr(ref mt) => hir::TyPtr(self.lower_mt(mt)), TyKind::Rptr(ref region, ref mt) => { - let span = Span { hi: t.span.lo, ..t.span }; + let span = t.span.with_hi(t.span.lo()); let lifetime = match *region { Some(ref lt) => self.lower_lifetime(lt), None => self.elided_lifetime(span) @@ -612,6 +673,7 @@ impl<'a> LoweringContext<'a> { unsafety: self.lower_unsafety(f.unsafety), abi: f.abi, decl: self.lower_fn_decl(&f.decl), + arg_names: self.lower_fn_args_to_names(&f.decl), })) } TyKind::Never => hir::TyNever, @@ -622,28 +684,26 @@ impl<'a> LoweringContext<'a> { return self.lower_ty(ty); } TyKind::Path(ref qself, ref path) => { - let id = self.lower_node_id(t.id).node_id; + let id = self.lower_node_id(t.id); let qpath = self.lower_qpath(t.id, qself, path, ParamMode::Explicit); return self.ty_path(id, t.span, qpath); } TyKind::ImplicitSelf => { hir::TyPath(hir::QPath::Resolved(None, P(hir::Path { def: self.expect_full_def(t.id), - segments: hir_vec![hir::PathSegment { - name: keywords::SelfType.name(), - parameters: hir::PathParameters::none() - }], + segments: hir_vec![ + hir::PathSegment::from_name(keywords::SelfType.name()) + ], span: t.span, }))) } TyKind::Array(ref ty, ref length) => { - let length = self.lower_expr(length); - hir::TyArray(self.lower_ty(ty), - self.record_body(length, None)) + let length = self.lower_body(None, |this| this.lower_expr(length)); + hir::TyArray(self.lower_ty(ty), length) } TyKind::Typeof(ref expr) => { - let expr = self.lower_expr(expr); - hir::TyTypeof(self.record_body(expr, None)) + let expr = self.lower_body(None, |this| this.lower_expr(expr)); + hir::TyTypeof(expr) } TyKind::TraitObject(ref bounds) => { let mut lifetime_bound = None; @@ -672,10 +732,12 @@ impl<'a> LoweringContext<'a> { TyKind::Mac(_) => panic!("TyMac should have been expanded by now."), }; + let LoweredNodeId { node_id, hir_id } = self.lower_node_id(t.id); P(hir::Ty { - id: self.lower_node_id(t.id).node_id, + id: node_id, node: kind, span: t.span, + hir_id, }) } @@ -700,8 +762,7 @@ impl<'a> LoweringContext<'a> { attrs: self.lower_attrs(&v.node.attrs), data: self.lower_variant_data(&v.node.data), disr_expr: v.node.disr_expr.as_ref().map(|e| { - let e = self.lower_expr(e); - self.record_body(e, None) + self.lower_body(None, |this| this.lower_expr(e)) }), }, span: v.span, @@ -777,7 +838,7 @@ impl<'a> LoweringContext<'a> { return n; } assert!(!def_id.is_local()); - let n = self.sess.cstore.item_generics_cloned(def_id).regions.len(); + let n = self.cstore.item_generics_cloned_untracked(def_id).regions.len(); self.type_def_lifetime_params.insert(def_id, n); n }); @@ -802,7 +863,7 @@ impl<'a> LoweringContext<'a> { // Otherwise, the base path is an implicit `Self` type path, // e.g. `Vec` in `Vec::new` or `::Item` in // `::Item::default`. - let new_id = self.next_id().node_id; + let new_id = self.next_id(); self.ty_path(new_id, p.span, hir::QPath::Resolved(qself, path)) }; @@ -827,7 +888,7 @@ impl<'a> LoweringContext<'a> { } // Wrap the associated extension in another type node. - let new_id = self.next_id().node_id; + let new_id = self.next_id(); ty = self.ty_path(new_id, p.span, qpath); } @@ -853,12 +914,8 @@ impl<'a> LoweringContext<'a> { segments: segments.map(|segment| { self.lower_path_segment(p.span, segment, param_mode, 0, ParenthesizedGenericArgs::Err) - }).chain(name.map(|name| { - hir::PathSegment { - name, - parameters: hir::PathParameters::none() - } - })).collect(), + }).chain(name.map(|name| hir::PathSegment::from_name(name))) + .collect(), span: p.span, } } @@ -879,7 +936,7 @@ impl<'a> LoweringContext<'a> { expected_lifetimes: usize, parenthesized_generic_args: ParenthesizedGenericArgs) -> hir::PathSegment { - let mut parameters = if let Some(ref parameters) = segment.parameters { + let (mut parameters, infer_types) = if let Some(ref parameters) = segment.parameters { let msg = "parenthesized parameters may only be used with a trait"; match **parameters { PathParameters::AngleBracketed(ref data) => { @@ -890,12 +947,12 @@ impl<'a> LoweringContext<'a> { ParenthesizedGenericArgs::Warn => { self.sess.buffer_lint(PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES, CRATE_NODE_ID, data.span, msg.into()); - hir::PathParameters::none() + (hir::PathParameters::none(), true) } ParenthesizedGenericArgs::Err => { struct_span_err!(self.sess, data.span, E0214, "{}", msg) .span_label(data.span, "only traits may use parentheses").emit(); - hir::PathParameters::none() + (hir::PathParameters::none(), true) } } } @@ -909,39 +966,39 @@ impl<'a> LoweringContext<'a> { }).collect(); } - hir::PathSegment { - name: self.lower_ident(segment.identifier), + hir::PathSegment::new( + self.lower_ident(segment.identifier), parameters, - } + infer_types + ) } fn lower_angle_bracketed_parameter_data(&mut self, data: &AngleBracketedParameterData, param_mode: ParamMode) - -> hir::PathParameters { + -> (hir::PathParameters, bool) { let &AngleBracketedParameterData { ref lifetimes, ref types, ref bindings, .. } = data; - hir::PathParameters { + (hir::PathParameters { lifetimes: self.lower_lifetimes(lifetimes), types: types.iter().map(|ty| self.lower_ty(ty)).collect(), - infer_types: types.is_empty() && param_mode == ParamMode::Optional, bindings: bindings.iter().map(|b| self.lower_ty_binding(b)).collect(), parenthesized: false, - } + }, types.is_empty() && param_mode == ParamMode::Optional) } fn lower_parenthesized_parameter_data(&mut self, data: &ParenthesizedParameterData) - -> hir::PathParameters { + -> (hir::PathParameters, bool) { let &ParenthesizedParameterData { ref inputs, ref output, span } = data; let inputs = inputs.iter().map(|ty| self.lower_ty(ty)).collect(); let mk_tup = |this: &mut Self, tys, span| { - P(hir::Ty { node: hir::TyTup(tys), id: this.next_id().node_id, span }) + let LoweredNodeId { node_id, hir_id } = this.next_id(); + P(hir::Ty { node: hir::TyTup(tys), id: node_id, hir_id, span }) }; - hir::PathParameters { + (hir::PathParameters { lifetimes: hir::HirVec::new(), types: hir_vec![mk_tup(self, inputs, span)], - infer_types: false, bindings: hir_vec![hir::TypeBinding { id: self.next_id().node_id, name: Symbol::intern(FN_OUTPUT_NAME), @@ -950,7 +1007,7 @@ impl<'a> LoweringContext<'a> { span: output.as_ref().map_or(span, |ty| ty.span), }], parenthesized: true, - } + }, false) } fn lower_local(&mut self, l: &Local) -> P { @@ -1047,6 +1104,10 @@ impl<'a> LoweringContext<'a> { default: tp.default.as_ref().map(|x| self.lower_ty(x)), span: tp.span, pure_wrt_drop: tp.attrs.iter().any(|attr| attr.check_name("may_dangle")), + synthetic: tp.attrs.iter() + .filter(|attr| attr.check_name("rustc_synthetic")) + .map(|_| hir::SyntheticTyParamKind::ImplTrait) + .nth(0), } } @@ -1060,7 +1121,11 @@ impl<'a> LoweringContext<'a> { fn lower_lifetime(&mut self, l: &Lifetime) -> hir::Lifetime { hir::Lifetime { id: self.lower_node_id(l.id).node_id, - name: self.lower_ident(l.ident), + name: match self.lower_ident(l.ident) { + x if x == "'_" => hir::LifetimeName::Underscore, + x if x == "'static" => hir::LifetimeName::Static, + name => hir::LifetimeName::Name(name), + }, span: l.span, } } @@ -1225,7 +1290,7 @@ impl<'a> LoweringContext<'a> { name: self.lower_ident(match f.ident { Some(ident) => ident, // FIXME(jseyfried) positional field hygiene - None => Ident { name: Symbol::intern(&index.to_string()), ctxt: f.span.ctxt }, + None => Ident { name: Symbol::intern(&index.to_string()), ctxt: f.span.ctxt() }, }), vis: self.lower_visibility(&f.vis, None), ty: self.lower_ty(&f.ty), @@ -1368,21 +1433,21 @@ impl<'a> LoweringContext<'a> { hir::ItemUse(path, kind) } ItemKind::Static(ref t, m, ref e) => { - let value = self.lower_expr(e); + let value = self.lower_body(None, |this| this.lower_expr(e)); hir::ItemStatic(self.lower_ty(t), self.lower_mutability(m), - self.record_body(value, None)) + value) } ItemKind::Const(ref t, ref e) => { - let value = self.lower_expr(e); - hir::ItemConst(self.lower_ty(t), - self.record_body(value, None)) + let value = self.lower_body(None, |this| this.lower_expr(e)); + hir::ItemConst(self.lower_ty(t), value) } ItemKind::Fn(ref decl, unsafety, constness, abi, ref generics, ref body) => { self.with_new_scopes(|this| { - let body = this.lower_block(body, false); - let body = this.expr_block(body, ThinVec::new()); - let body_id = this.record_body(body, Some(decl)); + let body_id = this.lower_body(Some(decl), |this| { + let body = this.lower_block(body, false); + this.expr_block(body, ThinVec::new()) + }); hir::ItemFn(this.lower_fn_decl(decl), this.lower_unsafety(unsafety), this.lower_constness(constness), @@ -1478,8 +1543,7 @@ impl<'a> LoweringContext<'a> { TraitItemKind::Const(ref ty, ref default) => { hir::TraitItemKind::Const(this.lower_ty(ty), default.as_ref().map(|x| { - let value = this.lower_expr(x); - this.record_body(value, None) + this.lower_body(None, |this| this.lower_expr(x)) })) } TraitItemKind::Method(ref sig, None) => { @@ -1488,9 +1552,10 @@ impl<'a> LoweringContext<'a> { hir::TraitMethod::Required(names)) } TraitItemKind::Method(ref sig, Some(ref body)) => { - let body = this.lower_block(body, false); - let expr = this.expr_block(body, ThinVec::new()); - let body_id = this.record_body(expr, Some(&sig.decl)); + let body_id = this.lower_body(Some(&sig.decl), |this| { + let body = this.lower_block(body, false); + this.expr_block(body, ThinVec::new()) + }); hir::TraitItemKind::Method(this.lower_method_sig(sig), hir::TraitMethod::Provided(body_id)) } @@ -1542,14 +1607,14 @@ impl<'a> LoweringContext<'a> { defaultness: this.lower_defaultness(i.defaultness, true /* [1] */), node: match i.node { ImplItemKind::Const(ref ty, ref expr) => { - let value = this.lower_expr(expr); - let body_id = this.record_body(value, None); + let body_id = this.lower_body(None, |this| this.lower_expr(expr)); hir::ImplItemKind::Const(this.lower_ty(ty), body_id) } ImplItemKind::Method(ref sig, ref body) => { - let body = this.lower_block(body, false); - let expr = this.expr_block(body, ThinVec::new()); - let body_id = this.record_body(expr, Some(&sig.decl)); + let body_id = this.lower_body(Some(&sig.decl), |this| { + let body = this.lower_block(body, false); + this.expr_block(body, ThinVec::new()) + }); hir::ImplItemKind::Method(this.lower_method_sig(sig), body_id) } ImplItemKind::Type(ref ty) => hir::ImplItemKind::Type(this.lower_ty(ty)), @@ -1609,13 +1674,14 @@ impl<'a> LoweringContext<'a> { let attrs = self.lower_attrs(&i.attrs); if let ItemKind::MacroDef(ref def) = i.node { if !def.legacy || i.attrs.iter().any(|attr| attr.path == "macro_export") { + let body = self.lower_token_stream(def.stream()); self.exported_macros.push(hir::MacroDef { name, vis, attrs, id: i.id, span: i.span, - body: def.stream(), + body, legacy: def.legacy, }); } @@ -1728,29 +1794,28 @@ impl<'a> LoweringContext<'a> { node: match p.node { PatKind::Wild => hir::PatKind::Wild, PatKind::Ident(ref binding_mode, pth1, ref sub) => { - self.with_parent_def(p.id, |this| { - match this.resolver.get_resolution(p.id).map(|d| d.base_def()) { - // `None` can occur in body-less function signatures - def @ None | def @ Some(Def::Local(_)) => { - let def_id = def.map(|d| d.def_id()).unwrap_or_else(|| { - this.resolver.definitions().local_def_id(p.id) - }); - hir::PatKind::Binding(this.lower_binding_mode(binding_mode), - def_id, - respan(pth1.span, pth1.node.name), - sub.as_ref().map(|x| this.lower_pat(x))) - } - Some(def) => { - hir::PatKind::Path(hir::QPath::Resolved(None, P(hir::Path { - span: pth1.span, - def, - segments: hir_vec![ - hir::PathSegment::from_name(pth1.node.name) - ], - }))) - } + match self.resolver.get_resolution(p.id).map(|d| d.base_def()) { + // `None` can occur in body-less function signatures + def @ None | def @ Some(Def::Local(_)) => { + let canonical_id = match def { + Some(Def::Local(id)) => id, + _ => p.id + }; + hir::PatKind::Binding(self.lower_binding_mode(binding_mode), + canonical_id, + respan(pth1.span, pth1.node.name), + sub.as_ref().map(|x| self.lower_pat(x))) } - }) + Some(def) => { + hir::PatKind::Path(hir::QPath::Resolved(None, P(hir::Path { + span: pth1.span, + def, + segments: hir_vec![ + hir::PathSegment::from_name(pth1.node.name) + ], + }))) + } + } } PatKind::Lit(ref e) => hir::PatKind::Lit(P(self.lower_expr(e))), PatKind::TupleStruct(ref path, ref pats, ddpos) => { @@ -1804,7 +1869,7 @@ impl<'a> LoweringContext<'a> { fn lower_range_end(&mut self, e: &RangeEnd) -> hir::RangeEnd { match *e { - RangeEnd::Included => hir::RangeEnd::Included, + RangeEnd::Included(_) => hir::RangeEnd::Included, RangeEnd::Excluded => hir::RangeEnd::Excluded, } } @@ -1928,8 +1993,8 @@ impl<'a> LoweringContext<'a> { } ExprKind::Repeat(ref expr, ref count) => { let expr = P(self.lower_expr(expr)); - let count = self.lower_expr(count); - hir::ExprRepeat(expr, self.record_body(count, None)) + let count = self.lower_body(None, |this| this.lower_expr(count)); + hir::ExprRepeat(expr, count) } ExprKind::Tup(ref elts) => { hir::ExprTup(elts.iter().map(|x| self.lower_expr(x)).collect()) @@ -2027,11 +2092,22 @@ impl<'a> LoweringContext<'a> { ExprKind::Closure(capture_clause, ref decl, ref body, fn_decl_span) => { self.with_new_scopes(|this| { this.with_parent_def(e.id, |this| { - let expr = this.lower_expr(body); + let mut is_generator = false; + let body_id = this.lower_body(Some(decl), |this| { + let e = this.lower_expr(body); + is_generator = this.is_generator; + e + }); + if is_generator && !decl.inputs.is_empty() { + span_err!(this.sess, fn_decl_span, E0628, + "generators cannot have explicit arguments"); + this.sess.abort_if_errors(); + } hir::ExprClosure(this.lower_capture_clause(capture_clause), this.lower_fn_decl(decl), - this.record_body(expr, Some(decl)), - fn_decl_span) + body_id, + fn_decl_span, + is_generator) }) }) } @@ -2172,6 +2248,14 @@ impl<'a> LoweringContext<'a> { return ex; } + ExprKind::Yield(ref opt_expr) => { + self.is_generator = true; + let expr = opt_expr.as_ref().map(|x| self.lower_expr(x)).unwrap_or_else(|| { + self.expr(e.span, hir::ExprTup(hir_vec![]), ThinVec::new()) + }); + hir::ExprYield(P(expr)) + } + // Desugar ExprIfLet // From: `if let = []` ExprKind::IfLet(ref pat, ref sub_expr, ref body, ref else_opt) => { @@ -2686,14 +2770,9 @@ impl<'a> LoweringContext<'a> { id: Name, binding: NodeId, attrs: ThinVec) -> hir::Expr { - let def = { - let defs = self.resolver.definitions(); - Def::Local(defs.local_def_id(binding)) - }; - let expr_path = hir::ExprPath(hir::QPath::Resolved(None, P(hir::Path { span, - def, + def: Def::Local(binding), segments: hir_vec![hir::PathSegment::from_name(id)], }))); @@ -2831,23 +2910,12 @@ impl<'a> LoweringContext<'a> { fn pat_ident_binding_mode(&mut self, span: Span, name: Name, bm: hir::BindingAnnotation) -> P { let LoweredNodeId { node_id, hir_id } = self.next_id(); - let parent_def = self.parent_def.unwrap(); - let def_id = { - let defs = self.resolver.definitions(); - let def_path_data = DefPathData::Binding(name); - let def_index = defs.create_def_with_parent(parent_def, - node_id, - def_path_data, - REGULAR_SPACE, - Mark::root()); - DefId::local(def_index) - }; P(hir::Pat { id: node_id, hir_id, node: hir::PatKind::Binding(bm, - def_id, + node_id, Spanned { span, node: name, @@ -2908,7 +2976,7 @@ impl<'a> LoweringContext<'a> { self.expr_block(block, attrs) } - fn ty_path(&mut self, id: NodeId, span: Span, qpath: hir::QPath) -> P { + fn ty_path(&mut self, id: LoweredNodeId, span: Span, qpath: hir::QPath) -> P { let mut id = id; let node = match qpath { hir::QPath::Resolved(None, path) => { @@ -2918,14 +2986,14 @@ impl<'a> LoweringContext<'a> { bound_lifetimes: hir_vec![], trait_ref: hir::TraitRef { path: path.and_then(|path| path), - ref_id: id, + ref_id: id.node_id, }, span, }; // The original ID is taken by the `PolyTraitRef`, // so the `Ty` itself needs a different one. - id = self.next_id().node_id; + id = self.next_id(); hir::TyTraitObject(hir_vec![principal], self.elided_lifetime(span)) } else { @@ -2934,14 +3002,14 @@ impl<'a> LoweringContext<'a> { } _ => hir::TyPath(qpath) }; - P(hir::Ty { id, node, span }) + P(hir::Ty { id: id.node_id, hir_id: id.hir_id, node, span }) } fn elided_lifetime(&mut self, span: Span) -> hir::Lifetime { hir::Lifetime { id: self.next_id().node_id, span, - name: keywords::Invalid.name() + name: hir::LifetimeName::Implicit, } } } diff --git a/src/librustc/hir/map/README.md b/src/librustc/hir/map/README.md new file mode 100644 index 0000000000..34ed325705 --- /dev/null +++ b/src/librustc/hir/map/README.md @@ -0,0 +1,4 @@ +The HIR map, accessible via `tcx.hir`, allows you to quickly navigate the +HIR and convert between various forms of identifiers. See [the HIR README] for more information. + +[the HIR README]: ../README.md diff --git a/src/librustc/hir/map/blocks.rs b/src/librustc/hir/map/blocks.rs index 1b7eb15856..d2888dcf6a 100644 --- a/src/librustc/hir/map/blocks.rs +++ b/src/librustc/hir/map/blocks.rs @@ -264,7 +264,7 @@ impl<'a> FnLikeNode<'a> { } }, map::NodeExpr(e) => match e.node { - ast::ExprClosure(_, ref decl, block, _fn_decl_span) => + ast::ExprClosure(_, ref decl, block, _fn_decl_span, _gen) => closure(ClosureParts::new(&decl, block, e.id, e.span, &e.attrs)), _ => bug!("expr FnLikeNode that is not fn-like"), }, diff --git a/src/librustc/hir/map/collector.rs b/src/librustc/hir/map/collector.rs index a54068c648..80fadcda27 100644 --- a/src/librustc/hir/map/collector.rs +++ b/src/librustc/hir/map/collector.rs @@ -16,6 +16,9 @@ use std::iter::repeat; use syntax::ast::{NodeId, CRATE_NODE_ID}; use syntax_pos::Span; +use ich::StableHashingContext; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; + /// A Visitor that walks over the HIR and collects Nodes into a HIR map pub(super) struct NodeCollector<'a, 'hir> { /// The crate @@ -25,37 +28,113 @@ pub(super) struct NodeCollector<'a, 'hir> { /// The parent of this node parent_node: NodeId, + // These fields keep track of the currently relevant DepNodes during + // the visitor's traversal. current_dep_node_owner: DefIndex, - current_dep_node_index: DepNodeIndex, + current_signature_dep_index: DepNodeIndex, + current_full_dep_index: DepNodeIndex, + currently_in_body: bool, dep_graph: &'a DepGraph, definitions: &'a definitions::Definitions, + + hcx: StableHashingContext<'a>, + + // We are collecting DepNode::HirBody hashes here so we can compute the + // crate hash from then later on. + hir_body_nodes: Vec, } impl<'a, 'hir> NodeCollector<'a, 'hir> { pub(super) fn root(krate: &'hir Crate, - dep_graph: &'a DepGraph, - definitions: &'a definitions::Definitions) + dep_graph: &'a DepGraph, + definitions: &'a definitions::Definitions, + hcx: StableHashingContext<'a>) -> NodeCollector<'a, 'hir> { let root_mod_def_path_hash = definitions.def_path_hash(CRATE_DEF_INDEX); - let root_mod_dep_node = root_mod_def_path_hash.to_dep_node(DepKind::Hir); - let root_mod_dep_node_index = dep_graph.alloc_input_node(root_mod_dep_node); + + // Allocate DepNodes for the root module + let (root_mod_sig_dep_index, root_mod_full_dep_index); + { + let Crate { + ref module, + // Crate attributes are not copied over to the root `Mod`, so hash + // them explicitly here. + ref attrs, + span, + + // These fields are handled separately: + exported_macros: _, + items: _, + trait_items: _, + impl_items: _, + bodies: _, + trait_impls: _, + trait_default_impl: _, + body_ids: _, + } = *krate; + + root_mod_sig_dep_index = dep_graph.with_task( + root_mod_def_path_hash.to_dep_node(DepKind::Hir), + &hcx, + HirItemLike { item_like: (module, attrs, span), hash_bodies: false }, + identity_fn + ).1; + root_mod_full_dep_index = dep_graph.with_task( + root_mod_def_path_hash.to_dep_node(DepKind::HirBody), + &hcx, + HirItemLike { item_like: (module, attrs, span), hash_bodies: true }, + identity_fn + ).1; + } + + { + dep_graph.with_task( + DepNode::new_no_params(DepKind::AllLocalTraitImpls), + &hcx, + &krate.trait_impls, + identity_fn + ); + } + + let hir_body_nodes = vec![root_mod_def_path_hash]; let mut collector = NodeCollector { krate, map: vec![], parent_node: CRATE_NODE_ID, - current_dep_node_index: root_mod_dep_node_index, + current_signature_dep_index: root_mod_sig_dep_index, + current_full_dep_index: root_mod_full_dep_index, current_dep_node_owner: CRATE_DEF_INDEX, + currently_in_body: false, dep_graph, definitions, + hcx, + hir_body_nodes, }; - collector.insert_entry(CRATE_NODE_ID, RootCrate(root_mod_dep_node_index)); + collector.insert_entry(CRATE_NODE_ID, RootCrate(root_mod_sig_dep_index)); collector } - pub(super) fn into_map(self) -> Vec> { + pub(super) fn finalize_and_compute_crate_hash(self, + crate_disambiguator: &str) + -> Vec> { + let mut node_hashes: Vec<_> = self + .hir_body_nodes + .iter() + .map(|&def_path_hash| { + let dep_node = def_path_hash.to_dep_node(DepKind::HirBody); + (def_path_hash, self.dep_graph.fingerprint_of(&dep_node)) + }) + .collect(); + + node_hashes.sort_unstable_by(|&(ref d1, _), &(ref d2, _)| d1.cmp(d2)); + + self.dep_graph.with_task(DepNode::new_no_params(DepKind::Krate), + &self.hcx, + (node_hashes, crate_disambiguator), + identity_fn); self.map } @@ -70,7 +149,11 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> { fn insert(&mut self, id: NodeId, node: Node<'hir>) { let parent = self.parent_node; - let dep_node_index = self.current_dep_node_index; + let dep_node_index = if self.currently_in_body { + self.current_full_dep_index + } else { + self.current_signature_dep_index + }; let entry = match node { NodeItem(n) => EntryItem(parent, dep_node_index, n), @@ -91,6 +174,7 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> { NodeTyParam(n) => EntryTyParam(parent, dep_node_index, n), NodeVisibility(n) => EntryVisibility(parent, dep_node_index, n), NodeLocal(n) => EntryLocal(parent, dep_node_index, n), + NodeMacroDef(n) => EntryMacroDef(dep_node_index, n), }; // Make sure that the DepNode of some node coincides with the HirId @@ -127,22 +211,41 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> { self.parent_node = parent_node; } - fn with_dep_node_owner(&mut self, + fn with_dep_node_owner>, + F: FnOnce(&mut Self)>(&mut self, dep_node_owner: DefIndex, + item_like: &T, f: F) { let prev_owner = self.current_dep_node_owner; - let prev_index = self.current_dep_node_index; - - // When we enter a new owner (item, impl item, or trait item), we always - // start out again with DepKind::Hir. - let new_dep_node = self.definitions - .def_path_hash(dep_node_owner) - .to_dep_node(DepKind::Hir); - self.current_dep_node_index = self.dep_graph.alloc_input_node(new_dep_node); + let prev_signature_dep_index = self.current_signature_dep_index; + let prev_full_dep_index = self.current_signature_dep_index; + let prev_in_body = self.currently_in_body; + + let def_path_hash = self.definitions.def_path_hash(dep_node_owner); + + self.current_signature_dep_index = self.dep_graph.with_task( + def_path_hash.to_dep_node(DepKind::Hir), + &self.hcx, + HirItemLike { item_like, hash_bodies: false }, + identity_fn + ).1; + + self.current_full_dep_index = self.dep_graph.with_task( + def_path_hash.to_dep_node(DepKind::HirBody), + &self.hcx, + HirItemLike { item_like, hash_bodies: true }, + identity_fn + ).1; + + self.hir_body_nodes.push(def_path_hash); + self.current_dep_node_owner = dep_node_owner; + self.currently_in_body = false; f(self); - self.current_dep_node_index = prev_index; + self.currently_in_body = prev_in_body; self.current_dep_node_owner = prev_owner; + self.current_full_dep_index = prev_full_dep_index; + self.current_signature_dep_index = prev_signature_dep_index; } } @@ -169,24 +272,17 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { } fn visit_nested_body(&mut self, id: BodyId) { - // When we enter a body, we switch to DepKind::HirBody. - // Note that current_dep_node_index might already be DepKind::HirBody, - // e.g. when entering the body of a closure that is already part of a - // surrounding body. That's expected and not a problem. - let prev_index = self.current_dep_node_index; - let new_dep_node = self.definitions - .def_path_hash(self.current_dep_node_owner) - .to_dep_node(DepKind::HirBody); - self.current_dep_node_index = self.dep_graph.alloc_input_node(new_dep_node); + let prev_in_body = self.currently_in_body; + self.currently_in_body = true; self.visit_body(self.krate.body(id)); - self.current_dep_node_index = prev_index; + self.currently_in_body = prev_in_body; } fn visit_item(&mut self, i: &'hir Item) { debug!("visit_item: {:?}", i); debug_assert_eq!(i.hir_id.owner, self.definitions.opt_def_index(i.id).unwrap()); - self.with_dep_node_owner(i.hir_id.owner, |this| { + self.with_dep_node_owner(i.hir_id.owner, i, |this| { this.insert(i.id, NodeItem(i)); this.with_parent(i.id, |this| { match i.node { @@ -222,7 +318,7 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { fn visit_trait_item(&mut self, ti: &'hir TraitItem) { debug_assert_eq!(ti.hir_id.owner, self.definitions.opt_def_index(ti.id).unwrap()); - self.with_dep_node_owner(ti.hir_id.owner, |this| { + self.with_dep_node_owner(ti.hir_id.owner, ti, |this| { this.insert(ti.id, NodeTraitItem(ti)); this.with_parent(ti.id, |this| { @@ -234,7 +330,7 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { fn visit_impl_item(&mut self, ii: &'hir ImplItem) { debug_assert_eq!(ii.hir_id.owner, self.definitions.opt_def_index(ii.id).unwrap()); - self.with_dep_node_owner(ii.hir_id.owner, |this| { + self.with_dep_node_owner(ii.hir_id.owner, ii, |this| { this.insert(ii.id, NodeImplItem(ii)); this.with_parent(ii.id, |this| { @@ -328,7 +424,11 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { } fn visit_macro_def(&mut self, macro_def: &'hir MacroDef) { - self.insert_entry(macro_def.id, NotPresent); + let def_index = self.definitions.opt_def_index(macro_def.id).unwrap(); + + self.with_dep_node_owner(def_index, macro_def, |this| { + this.insert(macro_def.id, NodeMacroDef(macro_def)); + }); } fn visit_variant(&mut self, v: &'hir Variant, g: &'hir Generics, item_id: NodeId) { @@ -375,3 +475,28 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { self.visit_nested_impl_item(id); } } + +// We use this with DepGraph::with_task(). Since we are handling only input +// values here, the "task" computing them just passes them through. +fn identity_fn(_: &StableHashingContext, item_like: T) -> T { + item_like +} + +// This is a wrapper structure that allows determining if span values within +// the wrapped item should be hashed or not. +struct HirItemLike { + item_like: T, + hash_bodies: bool, +} + +impl<'hir, T> HashStable> for HirItemLike + where T: HashStable> +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'hir>, + hasher: &mut StableHasher) { + hcx.while_hashing_hir_bodies(self.hash_bodies, |hcx| { + self.item_like.hash_stable(hcx, hasher); + }); + } +} diff --git a/src/librustc/hir/map/def_collector.rs b/src/librustc/hir/map/def_collector.rs index d348a5db05..673e6d3bbf 100644 --- a/src/librustc/hir/map/def_collector.rs +++ b/src/librustc/hir/map/def_collector.rs @@ -104,14 +104,14 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { DefPathData::Impl, ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) | ItemKind::Trait(..) | ItemKind::ExternCrate(..) | ItemKind::ForeignMod(..) | ItemKind::Ty(..) => - DefPathData::TypeNs(i.ident.name), + DefPathData::TypeNs(i.ident.name.as_str()), ItemKind::Mod(..) if i.ident == keywords::Invalid.ident() => { return visit::walk_item(self, i); } - ItemKind::Mod(..) => DefPathData::Module(i.ident.name), + ItemKind::Mod(..) => DefPathData::Module(i.ident.name.as_str()), ItemKind::Static(..) | ItemKind::Const(..) | ItemKind::Fn(..) => - DefPathData::ValueNs(i.ident.name), - ItemKind::MacroDef(..) => DefPathData::MacroDef(i.ident.name), + DefPathData::ValueNs(i.ident.name.as_str()), + ItemKind::MacroDef(..) => DefPathData::MacroDef(i.ident.name.as_str()), ItemKind::Mac(..) => return self.visit_macro_invoc(i.id, false), ItemKind::GlobalAsm(..) => DefPathData::Misc, ItemKind::Use(ref view_path) => { @@ -139,13 +139,15 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { for v in &enum_definition.variants { let variant_def_index = this.create_def(v.node.data.id(), - DefPathData::EnumVariant(v.node.name.name), + DefPathData::EnumVariant(v.node.name.name.as_str()), REGULAR_SPACE); this.with_parent(variant_def_index, |this| { for (index, field) in v.node.data.fields().iter().enumerate() { let name = field.ident.map(|ident| ident.name) .unwrap_or_else(|| Symbol::intern(&index.to_string())); - this.create_def(field.id, DefPathData::Field(name), REGULAR_SPACE); + this.create_def(field.id, + DefPathData::Field(name.as_str()), + REGULAR_SPACE); } if let Some(ref expr) = v.node.disr_expr { @@ -165,7 +167,7 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { for (index, field) in struct_def.fields().iter().enumerate() { let name = field.ident.map(|ident| ident.name) .unwrap_or_else(|| Symbol::intern(&index.to_string())); - this.create_def(field.id, DefPathData::Field(name), REGULAR_SPACE); + this.create_def(field.id, DefPathData::Field(name.as_str()), REGULAR_SPACE); } } _ => {} @@ -176,7 +178,7 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { fn visit_foreign_item(&mut self, foreign_item: &'a ForeignItem) { let def = self.create_def(foreign_item.id, - DefPathData::ValueNs(foreign_item.ident.name), + DefPathData::ValueNs(foreign_item.ident.name.as_str()), REGULAR_SPACE); self.with_parent(def, |this| { @@ -187,7 +189,7 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { fn visit_generics(&mut self, generics: &'a Generics) { for ty_param in generics.ty_params.iter() { self.create_def(ty_param.id, - DefPathData::TypeParam(ty_param.ident.name), + DefPathData::TypeParam(ty_param.ident.name.as_str()), REGULAR_SPACE); } @@ -197,8 +199,8 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { fn visit_trait_item(&mut self, ti: &'a TraitItem) { let def_data = match ti.node { TraitItemKind::Method(..) | TraitItemKind::Const(..) => - DefPathData::ValueNs(ti.ident.name), - TraitItemKind::Type(..) => DefPathData::TypeNs(ti.ident.name), + DefPathData::ValueNs(ti.ident.name.as_str()), + TraitItemKind::Type(..) => DefPathData::TypeNs(ti.ident.name.as_str()), TraitItemKind::Macro(..) => return self.visit_macro_invoc(ti.id, false), }; @@ -215,8 +217,8 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { fn visit_impl_item(&mut self, ii: &'a ImplItem) { let def_data = match ii.node { ImplItemKind::Method(..) | ImplItemKind::Const(..) => - DefPathData::ValueNs(ii.ident.name), - ImplItemKind::Type(..) => DefPathData::TypeNs(ii.ident.name), + DefPathData::ValueNs(ii.ident.name.as_str()), + ImplItemKind::Type(..) => DefPathData::TypeNs(ii.ident.name.as_str()), ImplItemKind::Macro(..) => return self.visit_macro_invoc(ii.id, false), }; @@ -231,21 +233,10 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { } fn visit_pat(&mut self, pat: &'a Pat) { - let parent_def = self.parent_def; - match pat.node { PatKind::Mac(..) => return self.visit_macro_invoc(pat.id, false), - PatKind::Ident(_, id, _) => { - let def = self.create_def(pat.id, - DefPathData::Binding(id.node.name), - REGULAR_SPACE); - self.parent_def = Some(def); - } - _ => {} + _ => visit::walk_pat(self, pat), } - - visit::walk_pat(self, pat); - self.parent_def = parent_def; } fn visit_expr(&mut self, expr: &'a Expr) { @@ -282,7 +273,7 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { fn visit_lifetime_def(&mut self, def: &'a LifetimeDef) { self.create_def(def.lifetime.id, - DefPathData::LifetimeDef(def.lifetime.ident.name), + DefPathData::LifetimeDef(def.lifetime.ident.name.as_str()), REGULAR_SPACE); } diff --git a/src/librustc/hir/map/definitions.rs b/src/librustc/hir/map/definitions.rs index b371366bc5..8bc7cf2fab 100644 --- a/src/librustc/hir/map/definitions.rs +++ b/src/librustc/hir/map/definitions.rs @@ -27,7 +27,6 @@ use std::hash::Hash; use syntax::ast; use syntax::ext::hygiene::Mark; use syntax::symbol::{Symbol, InternedString}; -use ty::TyCtxt; use util::nodemap::NodeMap; /// The DefPathTable maps DefIndexes to DefKeys and vice versa. @@ -80,8 +79,10 @@ impl DefPathTable { #[inline(always)] pub fn def_path_hash(&self, index: DefIndex) -> DefPathHash { - self.def_path_hashes[index.address_space().index()] - [index.as_array_index()] + let ret = self.def_path_hashes[index.address_space().index()] + [index.as_array_index()]; + debug!("def_path_hash({:?}) = {:?}", index, ret); + return ret } pub fn add_def_path_hashes_to(&self, @@ -210,10 +211,9 @@ impl DefKey { DefPathData::TypeParam(name) | DefPathData::LifetimeDef(name) | DefPathData::EnumVariant(name) | - DefPathData::Binding(name) | DefPathData::Field(name) | DefPathData::GlobalMetaData(name) => { - (*name.as_str()).hash(&mut hasher); + name.hash(&mut hasher); } DefPathData::Impl | @@ -295,26 +295,6 @@ impl DefPath { DefPath { data: data, krate: krate } } - pub fn to_string(&self, tcx: TyCtxt) -> String { - let mut s = String::with_capacity(self.data.len() * 16); - - s.push_str(&tcx.original_crate_name(self.krate).as_str()); - s.push_str("/"); - // Don't print the whole crate disambiguator. That's just annoying in - // debug output. - s.push_str(&tcx.crate_disambiguator(self.krate).as_str()[..7]); - - for component in &self.data { - write!(s, - "::{}[{}]", - component.data.as_interned_str(), - component.disambiguator) - .unwrap(); - } - - s - } - /// Returns a string representation of the DefPath without /// the crate-prefix. This method is useful if you don't have /// a TyCtxt available. @@ -347,31 +327,29 @@ pub enum DefPathData { /// An impl Impl, /// Something in the type NS - TypeNs(Symbol), + TypeNs(InternedString), /// Something in the value NS - ValueNs(Symbol), + ValueNs(InternedString), /// A module declaration - Module(Symbol), + Module(InternedString), /// A macro rule - MacroDef(Symbol), + MacroDef(InternedString), /// A closure expression ClosureExpr, // Subportions of items /// A type parameter (generic parameter) - TypeParam(Symbol), + TypeParam(InternedString), /// A lifetime definition - LifetimeDef(Symbol), + LifetimeDef(InternedString), /// A variant of a enum - EnumVariant(Symbol), + EnumVariant(InternedString), /// A struct field - Field(Symbol), + Field(InternedString), /// Implicit ctor for a tuple-like struct StructCtor, /// Initializer for a const Initializer, - /// Pattern binding - Binding(Symbol), /// An `impl Trait` type node. ImplTrait, /// A `typeof` type node. @@ -380,7 +358,7 @@ pub enum DefPathData { /// GlobalMetaData identifies a piece of crate metadata that is global to /// a whole crate (as opposed to just one item). GlobalMetaData components /// are only supposed to show up right below the crate root. - GlobalMetaData(Symbol) + GlobalMetaData(InternedString) } #[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Debug, @@ -601,7 +579,7 @@ impl Definitions { } impl DefPathData { - pub fn get_opt_name(&self) -> Option { + pub fn get_opt_name(&self) -> Option { use self::DefPathData::*; match *self { TypeNs(name) | @@ -611,7 +589,6 @@ impl DefPathData { TypeParam(name) | LifetimeDef(name) | EnumVariant(name) | - Binding(name) | Field(name) | GlobalMetaData(name) => Some(name), @@ -636,10 +613,9 @@ impl DefPathData { TypeParam(name) | LifetimeDef(name) | EnumVariant(name) | - Binding(name) | Field(name) | GlobalMetaData(name) => { - return name.as_str(); + return name } // note that this does not show up in user printouts @@ -684,7 +660,7 @@ macro_rules! define_global_metadata_kind { definitions.create_def_with_parent( CRATE_DEF_INDEX, ast::DUMMY_NODE_ID, - DefPathData::GlobalMetaData(instance.name()), + DefPathData::GlobalMetaData(instance.name().as_str()), GLOBAL_MD_ADDRESS_SPACE, Mark::root() ); @@ -698,7 +674,7 @@ macro_rules! define_global_metadata_kind { let def_key = DefKey { parent: Some(CRATE_DEF_INDEX), disambiguated_data: DisambiguatedDefPathData { - data: DefPathData::GlobalMetaData(self.name()), + data: DefPathData::GlobalMetaData(self.name().as_str()), disambiguator: 0, } }; diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs index 63a5b70cb8..8ce2feab06 100644 --- a/src/librustc/hir/map/mod.rs +++ b/src/librustc/hir/map/mod.rs @@ -26,7 +26,7 @@ use syntax_pos::Span; use hir::*; use hir::print::Nested; -use util::nodemap::DefIdMap; +use util::nodemap::{DefIdMap, FxHashMap}; use arena::TypedArena; use std::cell::RefCell; @@ -57,6 +57,7 @@ pub enum Node<'hir> { NodePat(&'hir Pat), NodeBlock(&'hir Block), NodeLocal(&'hir Local), + NodeMacroDef(&'hir MacroDef), /// NodeStructCtor represents a tuple struct. NodeStructCtor(&'hir VariantData), @@ -93,6 +94,8 @@ enum MapEntry<'hir> { EntryVisibility(NodeId, DepNodeIndex, &'hir Visibility), EntryLocal(NodeId, DepNodeIndex, &'hir Local), + EntryMacroDef(DepNodeIndex, &'hir MacroDef), + /// Roots for node trees. The DepNodeIndex is the dependency node of the /// crate's root module. RootCrate(DepNodeIndex), @@ -127,6 +130,7 @@ impl<'hir> MapEntry<'hir> { EntryLocal(id, _, _) => id, NotPresent | + EntryMacroDef(..) | RootCrate(_) => return None, }) } @@ -151,7 +155,10 @@ impl<'hir> MapEntry<'hir> { EntryTyParam(_, _, n) => NodeTyParam(n), EntryVisibility(_, _, n) => NodeVisibility(n), EntryLocal(_, _, n) => NodeLocal(n), - _ => return None + EntryMacroDef(_, n) => NodeMacroDef(n), + + NotPresent | + RootCrate(_) => return None }) } @@ -184,7 +191,7 @@ impl<'hir> MapEntry<'hir> { EntryExpr(_, _, expr) => { match expr.node { - ExprClosure(.., body, _) => Some(body), + ExprClosure(.., body, _, _) => Some(body), _ => None, } } @@ -245,10 +252,13 @@ pub struct Map<'hir> { /// plain old integers. map: Vec>, - definitions: Definitions, + definitions: &'hir Definitions, /// Bodies inlined from other crates are cached here. inlined_bodies: RefCell>, + + /// The reverse mapping of `node_to_hir_id`. + hir_to_node_id: FxHashMap, } impl<'hir> Map<'hir> { @@ -280,27 +290,19 @@ impl<'hir> Map<'hir> { EntryVisibility(_, dep_node_index, _) | EntryExpr(_, dep_node_index, _) | EntryLocal(_, dep_node_index, _) | + EntryMacroDef(dep_node_index, _) | RootCrate(dep_node_index) => { self.dep_graph.read_index(dep_node_index); } NotPresent => { - // Some nodes, notably macro definitions, are not - // present in the map for whatever reason, but - // they *do* have def-ids. So if we encounter an - // empty hole, check for that case. - if let Some(def_index) = self.definitions.opt_def_index(id) { - let def_path_hash = self.definitions.def_path_hash(def_index); - self.dep_graph.read(def_path_hash.to_dep_node(DepKind::Hir)); - } else { - bug!("called HirMap::read() with invalid NodeId") - } + bug!("called HirMap::read() with invalid NodeId") } } } #[inline] - pub fn definitions(&self) -> &Definitions { - &self.definitions + pub fn definitions(&self) -> &'hir Definitions { + self.definitions } pub fn def_key(&self, def_id: DefId) -> DefKey { @@ -337,6 +339,11 @@ impl<'hir> Map<'hir> { self.definitions.as_local_node_id(def_id) } + #[inline] + pub fn hir_to_node_id(&self, hir_id: HirId) -> NodeId { + self.hir_to_node_id[&hir_id] + } + #[inline] pub fn node_to_hir_id(&self, node_id: NodeId) -> HirId { self.definitions.node_to_hir_id(node_id) @@ -795,7 +802,7 @@ impl<'hir> Map<'hir> { NodeTraitItem(ti) => ti.name, NodeVariant(v) => v.node.name, NodeField(f) => f.name, - NodeLifetime(lt) => lt.name, + NodeLifetime(lt) => lt.name.name(), NodeTyParam(tp) => tp.name, NodeBinding(&Pat { node: PatKind::Binding(_,_,l,_), .. }) => l.node, NodeStructCtor(_) => self.name(self.get_parent(id)), @@ -865,6 +872,7 @@ impl<'hir> Map<'hir> { Some(EntryVisibility(_, _, &Visibility::Restricted { ref path, .. })) => path.span, Some(EntryVisibility(_, _, v)) => bug!("unexpected Visibility {:?}", v), Some(EntryLocal(_, _, local)) => local.span, + Some(EntryMacroDef(_, macro_def)) => macro_def.span, Some(RootCrate(_)) => self.forest.krate.span, Some(NotPresent) | None => { @@ -992,15 +1000,22 @@ impl Named for StructField { fn name(&self) -> Name { self.name } } impl Named for TraitItem { fn name(&self) -> Name { self.name } } impl Named for ImplItem { fn name(&self) -> Name { self.name } } -pub fn map_crate<'hir>(forest: &'hir mut Forest, - definitions: Definitions) +pub fn map_crate<'hir>(sess: &::session::Session, + cstore: &::middle::cstore::CrateStore, + forest: &'hir mut Forest, + definitions: &'hir Definitions) -> Map<'hir> { let map = { + let hcx = ::ich::StableHashingContext::new(sess, &forest.krate, definitions, cstore); + let mut collector = NodeCollector::root(&forest.krate, &forest.dep_graph, - &definitions); + &definitions, + hcx); intravisit::walk_crate(&mut collector, &forest.krate); - collector.into_map() + + let crate_disambiguator = sess.local_crate_disambiguator().as_str(); + collector.finalize_and_compute_crate_hash(&crate_disambiguator) }; if log_enabled!(::log::LogLevel::Debug) { @@ -1019,10 +1034,15 @@ pub fn map_crate<'hir>(forest: &'hir mut Forest, entries, vector_length, (entries as f64 / vector_length as f64) * 100.); } + // Build the reverse mapping of `node_to_hir_id`. + let hir_to_node_id = definitions.node_to_hir_id.iter_enumerated() + .map(|(node_id, &hir_id)| (hir_id, node_id)).collect(); + let map = Map { forest, dep_graph: forest.dep_graph.clone(), map, + hir_to_node_id, definitions, inlined_bodies: RefCell::new(DefIdMap()), }; @@ -1078,6 +1098,7 @@ impl<'a> print::State<'a> { // printing. NodeStructCtor(_) => bug!("cannot print isolated StructCtor"), NodeLocal(a) => self.print_local_decl(&a), + NodeMacroDef(_) => bug!("cannot print MacroDef"), } } } @@ -1194,6 +1215,9 @@ fn node_id_to_string(map: &Map, id: NodeId, include_id: bool) -> String { Some(NodeVisibility(ref vis)) => { format!("visibility {:?}{}", vis, id_str) } + Some(NodeMacroDef(_)) => { + format!("macro {}{}", path_str(), id_str) + } None => { format!("unknown node{}", id_str) } diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index df67f84162..5ad0ff04c1 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -145,7 +145,27 @@ pub struct Lifetime { /// HIR lowering inserts these placeholders in type paths that /// refer to type definitions needing lifetime parameters, /// `&T` and `&mut T`, and trait objects without `... + 'a`. - pub name: Name, + pub name: LifetimeName, +} + +#[derive(Debug, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)] +pub enum LifetimeName { + Implicit, + Underscore, + Static, + Name(Name), +} + +impl LifetimeName { + pub fn name(&self) -> Name { + use self::LifetimeName::*; + match *self { + Implicit => keywords::Invalid.name(), + Underscore => Symbol::intern("'_"), + Static => keywords::StaticLifetime.name(), + Name(name) => name, + } + } } impl fmt::Debug for Lifetime { @@ -159,11 +179,15 @@ impl fmt::Debug for Lifetime { impl Lifetime { pub fn is_elided(&self) -> bool { - self.name == keywords::Invalid.name() + use self::LifetimeName::*; + match self.name { + Implicit | Underscore => true, + Static | Name(_) => false, + } } pub fn is_static(&self) -> bool { - self.name == "'static" + self.name == LifetimeName::Static } } @@ -212,7 +236,13 @@ pub struct PathSegment { /// this is more than just simple syntactic sugar; the use of /// parens affects the region binding rules, so we preserve the /// distinction. - pub parameters: PathParameters, + pub parameters: Option>, + + /// Whether to infer remaining type parameters, if any. + /// This only applies to expression and pattern paths, and + /// out of those only the segments with no type parameters + /// to begin with, e.g. `Vec::new` is `>::new::<..>`. + pub infer_types: bool, } impl PathSegment { @@ -220,9 +250,35 @@ impl PathSegment { pub fn from_name(name: Name) -> PathSegment { PathSegment { name, - parameters: PathParameters::none() + infer_types: true, + parameters: None } } + + pub fn new(name: Name, parameters: PathParameters, infer_types: bool) -> Self { + PathSegment { + name, + infer_types, + parameters: if parameters.is_empty() { + None + } else { + Some(P(parameters)) + } + } + } + + // FIXME: hack required because you can't create a static + // PathParameters, so you can't just return a &PathParameters. + pub fn with_parameters(&self, f: F) -> R + where F: FnOnce(&PathParameters) -> R + { + let dummy = PathParameters::none(); + f(if let Some(ref params) = self.parameters { + ¶ms + } else { + &dummy + }) + } } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] @@ -231,11 +287,6 @@ pub struct PathParameters { pub lifetimes: HirVec, /// The type parameters for this path segment, if present. pub types: HirVec>, - /// Whether to infer remaining type parameters, if any. - /// This only applies to expression and pattern paths, and - /// out of those only the segments with no type parameters - /// to begin with, e.g. `Vec::new` is `>::new::<..>`. - pub infer_types: bool, /// Bindings (equality constraints) on associated types, if present. /// E.g., `Foo`. pub bindings: HirVec, @@ -250,12 +301,16 @@ impl PathParameters { Self { lifetimes: HirVec::new(), types: HirVec::new(), - infer_types: true, bindings: HirVec::new(), parenthesized: false, } } + pub fn is_empty(&self) -> bool { + self.lifetimes.is_empty() && self.types.is_empty() && + self.bindings.is_empty() && !self.parenthesized + } + pub fn inputs(&self) -> &[P] { if self.parenthesized { if let Some(ref ty) = self.types.get(0) { @@ -296,6 +351,7 @@ pub struct TyParam { pub default: Option>, pub span: Span, pub pure_wrt_drop: bool, + pub synthetic: Option, } /// Represents lifetimes and type parameters attached to a declaration @@ -364,6 +420,13 @@ impl Generics { } } +/// Synthetic Type Parameters are converted to an other form during lowering, this allows +/// to track the original form they had. Usefull for error messages. +#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum SyntheticTyParamKind { + ImplTrait +} + /// A `where` clause in a definition #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub struct WhereClause { @@ -413,6 +476,10 @@ pub struct WhereEqPredicate { pub type CrateConfig = HirVec>; +/// The top-level data structure that stores the entire contents of +/// the crate currently being compiled. +/// +/// For more details, see [the module-level README](README.md). #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug)] pub struct Crate { pub module: Mod, @@ -611,7 +678,7 @@ pub enum BindingAnnotation { RefMut, } -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub enum RangeEnd { Included, Excluded, @@ -623,8 +690,10 @@ pub enum PatKind { Wild, /// A fresh binding `ref mut binding @ OPT_SUBPATTERN`. - /// The `DefId` is for the definition of the variable being bound. - Binding(BindingAnnotation, DefId, Spanned, Option>), + /// The `NodeId` is the canonical ID for the variable being bound, + /// e.g. in `Ok(x) | Err(x)`, both `x` use the same canonical ID, + /// which is the pattern ID of the first `x`. + Binding(BindingAnnotation, NodeId, Spanned, Option>), /// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`. /// The `bool` is `true` in the presence of a `..`. @@ -847,8 +916,6 @@ impl Stmt_ { } } -// FIXME (pending discussion of #1697, #2178...): local should really be -// a refinement on pat. /// Local represents a `let` statement, e.g., `let : = ;` #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub struct Local { @@ -925,11 +992,32 @@ pub struct BodyId { pub node_id: NodeId, } -/// The body of a function or constant value. +/// The body of a function, closure, or constant value. In the case of +/// a function, the body contains not only the function body itself +/// (which is an expression), but also the argument patterns, since +/// those are something that the caller doesn't really care about. +/// +/// # Examples +/// +/// ``` +/// fn foo((x, y): (u32, u32)) -> u32 { +/// x + y +/// } +/// ``` +/// +/// Here, the `Body` associated with `foo()` would contain: +/// +/// - an `arguments` array containing the `(x, y)` pattern +/// - a `value` containing the `x + y` expression (maybe wrapped in a block) +/// - `is_generator` would be false +/// +/// All bodies have an **owner**, which can be accessed via the HIR +/// map using `body_owner_def_id()`. #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub struct Body { pub arguments: HirVec, - pub value: Expr + pub value: Expr, + pub is_generator: bool, } impl Body { @@ -1007,7 +1095,10 @@ pub enum Expr_ { /// A closure (for example, `move |a, b, c| {a + b + c}`). /// /// The final span is the span of the argument block `|...|` - ExprClosure(CaptureClause, P, BodyId, Span), + /// + /// This may also be a generator literal, indicated by the final boolean, + /// in that case there is an GeneratorClause. + ExprClosure(CaptureClause, P, BodyId, Span, bool), /// A block (`{ ... }`) ExprBlock(P), @@ -1052,6 +1143,9 @@ pub enum Expr_ { /// For example, `[1; 5]`. The first expression is the element /// to be repeated; the second is the number of times to repeat it. ExprRepeat(P, BodyId), + + /// A suspension point for generators. This is `yield ` in Rust. + ExprYield(P), } /// Optionally `Self`-qualified value/type path or associated extension. @@ -1297,6 +1391,7 @@ pub struct Ty { pub id: NodeId, pub node: Ty_, pub span: Span, + pub hir_id: HirId, } impl fmt::Debug for Ty { @@ -1323,6 +1418,7 @@ pub struct BareFnTy { pub abi: Abi, pub lifetimes: HirVec, pub decl: P, + pub arg_names: HirVec>, } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] @@ -1836,6 +1932,15 @@ pub struct Freevar { pub span: Span } +impl Freevar { + pub fn var_id(&self) -> NodeId { + match self.def { + Def::Local(id) | Def::Upvar(id, ..) => id, + _ => bug!("Freevar::var_id: bad def ({:?})", self.def) + } + } +} + pub type FreevarMap = NodeMap>; pub type CaptureModeMap = NodeMap; diff --git a/src/librustc/hir/pat_util.rs b/src/librustc/hir/pat_util.rs index 144cb34ee3..2bec224362 100644 --- a/src/librustc/hir/pat_util.rs +++ b/src/librustc/hir/pat_util.rs @@ -160,11 +160,13 @@ impl hir::Pat { variants } - /// Checks if the pattern contains any `ref` or `ref mut` bindings, - /// and if yes whether it contains mutable or just immutables ones. + /// Checks if the pattern contains any `ref` or `ref mut` bindings, and if + /// yes whether it contains mutable or just immutables ones. /// - /// FIXME(tschottdorf): this is problematic as the HIR is being scraped, - /// but ref bindings may be implicit after #42640. + /// FIXME(tschottdorf): this is problematic as the HIR is being scraped, but + /// ref bindings are be implicit after #42640 (default match binding modes). + /// + /// See #44848. pub fn contains_explicit_ref_binding(&self) -> Option { let mut result = None; self.each_binding(|annotation, _, _, _| { @@ -188,7 +190,9 @@ impl hir::Arm { /// bindings, and if yes whether its containing mutable ones or just immutables ones. pub fn contains_explicit_ref_binding(&self) -> Option { // FIXME(tschottdorf): contains_explicit_ref_binding() must be removed - // for #42640. + // for #42640 (default match binding modes). + // + // See #44848. self.pats.iter() .filter_map(|pat| pat.contains_explicit_ref_binding()) .max_by_key(|m| match *m { diff --git a/src/librustc/hir/print.rs b/src/librustc/hir/print.rs index 876875bce4..7287e599b2 100644 --- a/src/librustc/hir/print.rs +++ b/src/librustc/hir/print.rs @@ -20,6 +20,7 @@ use syntax::print::pp::Breaks::{Consistent, Inconsistent}; use syntax::print::pprust::PrintState; use syntax::ptr::P; use syntax::symbol::keywords; +use syntax::util::parser::{self, AssocOp, Fixity}; use syntax_pos::{self, BytePos}; use hir; @@ -210,18 +211,6 @@ pub fn visibility_qualified(vis: &hir::Visibility, w: &str) -> String { }) } -fn needs_parentheses(expr: &hir::Expr) -> bool { - match expr.node { - hir::ExprAssign(..) | - hir::ExprBinary(..) | - hir::ExprClosure(..) | - hir::ExprAssignOp(..) | - hir::ExprCast(..) | - hir::ExprType(..) => true, - _ => false, - } -} - impl<'a> State<'a> { pub fn cbox(&mut self, u: usize) -> io::Result<()> { self.boxes.push(pp::Breaks::Consistent); @@ -262,7 +251,7 @@ impl<'a> State<'a> { indented: usize, close_box: bool) -> io::Result<()> { - self.maybe_print_comment(span.hi)?; + self.maybe_print_comment(span.hi())?; self.break_offset_if_not_bol(1, -(indented as isize))?; self.s.word("}")?; if close_box { @@ -324,12 +313,12 @@ impl<'a> State<'a> { let len = elts.len(); let mut i = 0; for elt in elts { - self.maybe_print_comment(get_span(elt).hi)?; + self.maybe_print_comment(get_span(elt).hi())?; op(self, elt)?; i += 1; if i < len { self.s.word(",")?; - self.maybe_print_trailing_comment(get_span(elt), Some(get_span(&elts[i]).hi))?; + self.maybe_print_trailing_comment(get_span(elt), Some(get_span(&elts[i]).hi()))?; self.space_if_not_bol()?; } } @@ -368,7 +357,7 @@ impl<'a> State<'a> { } pub fn print_type(&mut self, ty: &hir::Ty) -> io::Result<()> { - self.maybe_print_comment(ty.span.lo)?; + self.maybe_print_comment(ty.span.lo())?; self.ibox(0)?; match ty.node { hir::TySlice(ref ty) => { @@ -410,7 +399,8 @@ impl<'a> State<'a> { }, span: syntax_pos::DUMMY_SP, }; - self.print_ty_fn(f.abi, f.unsafety, &f.decl, None, &generics)?; + self.print_ty_fn(f.abi, f.unsafety, &f.decl, None, &generics, + &f.arg_names[..])?; } hir::TyPath(ref qpath) => { self.print_qpath(qpath, false)? @@ -458,7 +448,7 @@ impl<'a> State<'a> { pub fn print_foreign_item(&mut self, item: &hir::ForeignItem) -> io::Result<()> { self.hardbreak_if_not_bol()?; - self.maybe_print_comment(item.span.lo)?; + self.maybe_print_comment(item.span.lo())?; self.print_outer_attributes(&item.attrs)?; match item.node { hir::ForeignItemFn(ref decl, ref arg_names, ref generics) => { @@ -531,7 +521,7 @@ impl<'a> State<'a> { /// Pretty-print an item pub fn print_item(&mut self, item: &hir::Item) -> io::Result<()> { self.hardbreak_if_not_bol()?; - self.maybe_print_comment(item.span.lo)?; + self.maybe_print_comment(item.span.lo())?; self.print_outer_attributes(&item.attrs)?; self.ann.pre(self, NodeItem(item))?; match item.node { @@ -797,7 +787,7 @@ impl<'a> State<'a> { self.bopen()?; for v in variants { self.space_if_not_bol()?; - self.maybe_print_comment(v.span.lo)?; + self.maybe_print_comment(v.span.lo())?; self.print_outer_attributes(&v.node.attrs)?; self.ibox(indent_unit)?; self.print_variant(v)?; @@ -842,7 +832,7 @@ impl<'a> State<'a> { if struct_def.is_tuple() { self.popen()?; self.commasep(Inconsistent, struct_def.fields(), |s, field| { - s.maybe_print_comment(field.span.lo)?; + s.maybe_print_comment(field.span.lo())?; s.print_outer_attributes(&field.attrs)?; s.print_visibility(&field.vis)?; s.print_type(&field.ty) @@ -863,7 +853,7 @@ impl<'a> State<'a> { for field in struct_def.fields() { self.hardbreak_if_not_bol()?; - self.maybe_print_comment(field.span.lo)?; + self.maybe_print_comment(field.span.lo())?; self.print_outer_attributes(&field.attrs)?; self.print_visibility(&field.vis)?; self.print_name(field.name)?; @@ -908,7 +898,7 @@ impl<'a> State<'a> { pub fn print_trait_item(&mut self, ti: &hir::TraitItem) -> io::Result<()> { self.ann.pre(self, NodeSubItem(ti.id))?; self.hardbreak_if_not_bol()?; - self.maybe_print_comment(ti.span.lo)?; + self.maybe_print_comment(ti.span.lo())?; self.print_outer_attributes(&ti.attrs)?; match ti.node { hir::TraitItemKind::Const(ref ty, default) => { @@ -938,7 +928,7 @@ impl<'a> State<'a> { pub fn print_impl_item(&mut self, ii: &hir::ImplItem) -> io::Result<()> { self.ann.pre(self, NodeSubItem(ii.id))?; self.hardbreak_if_not_bol()?; - self.maybe_print_comment(ii.span.lo)?; + self.maybe_print_comment(ii.span.lo())?; self.print_outer_attributes(&ii.attrs)?; self.print_defaultness(ii.defaultness)?; @@ -962,7 +952,7 @@ impl<'a> State<'a> { } pub fn print_stmt(&mut self, st: &hir::Stmt) -> io::Result<()> { - self.maybe_print_comment(st.span.lo)?; + self.maybe_print_comment(st.span.lo())?; match st.node { hir::StmtDecl(ref decl, _) => { self.print_decl(&decl)?; @@ -1017,7 +1007,7 @@ impl<'a> State<'a> { hir::PopUnsafeBlock(..) => self.word_space("pop_unsafe")?, hir::DefaultBlock => (), } - self.maybe_print_comment(blk.span.lo)?; + self.maybe_print_comment(blk.span.lo())?; self.ann.pre(self, NodeBlock(blk))?; self.bopen()?; @@ -1030,7 +1020,7 @@ impl<'a> State<'a> { Some(ref expr) => { self.space_if_not_bol()?; self.print_expr(&expr)?; - self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi))?; + self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi()))?; } _ => (), } @@ -1047,7 +1037,7 @@ impl<'a> State<'a> { self.cbox(indent_unit - 1)?; self.ibox(0)?; self.s.word(" else if ")?; - self.print_expr(&i)?; + self.print_expr_as_cond(&i)?; self.s.space()?; self.print_expr(&then)?; self.print_else(e.as_ref().map(|e| &**e)) @@ -1075,7 +1065,7 @@ impl<'a> State<'a> { elseopt: Option<&hir::Expr>) -> io::Result<()> { self.head("if")?; - self.print_expr(test)?; + self.print_expr_as_cond(test)?; self.s.space()?; self.print_expr(blk)?; self.print_else(elseopt) @@ -1091,7 +1081,7 @@ impl<'a> State<'a> { self.print_pat(pat)?; self.s.space()?; self.word_space("=")?; - self.print_expr(expr)?; + self.print_expr_as_cond(expr)?; self.s.space()?; self.print_block(blk)?; self.print_else(elseopt) @@ -1104,8 +1094,31 @@ impl<'a> State<'a> { self.pclose() } - pub fn print_expr_maybe_paren(&mut self, expr: &hir::Expr) -> io::Result<()> { - let needs_par = needs_parentheses(expr); + pub fn print_expr_maybe_paren(&mut self, expr: &hir::Expr, prec: i8) -> io::Result<()> { + let needs_par = expr_precedence(expr) < prec; + if needs_par { + self.popen()?; + } + self.print_expr(expr)?; + if needs_par { + self.pclose()?; + } + Ok(()) + } + + /// Print an expr using syntax that's acceptable in a condition position, such as the `cond` in + /// `if cond { ... }`. + pub fn print_expr_as_cond(&mut self, expr: &hir::Expr) -> io::Result<()> { + let needs_par = match expr.node { + // These cases need parens due to the parse error observed in #26461: `if return {}` + // parses as the erroneous construct `if (return {})`, not `if (return) {}`. + hir::ExprClosure(..) | + hir::ExprRet(..) | + hir::ExprBreak(..) => true, + + _ => contains_exterior_struct_lit(expr), + }; + if needs_par { self.popen()?; } @@ -1182,7 +1195,14 @@ impl<'a> State<'a> { } fn print_expr_call(&mut self, func: &hir::Expr, args: &[hir::Expr]) -> io::Result<()> { - self.print_expr_maybe_paren(func)?; + let prec = + match func.node { + hir::ExprField(..) | + hir::ExprTupField(..) => parser::PREC_FORCE_PAREN, + _ => parser::PREC_POSTFIX, + }; + + self.print_expr_maybe_paren(func, prec)?; self.print_call_post(args) } @@ -1191,14 +1211,20 @@ impl<'a> State<'a> { args: &[hir::Expr]) -> io::Result<()> { let base_args = &args[1..]; - self.print_expr(&args[0])?; + self.print_expr_maybe_paren(&args[0], parser::PREC_POSTFIX)?; self.s.word(".")?; self.print_name(segment.name)?; - if !segment.parameters.lifetimes.is_empty() || - !segment.parameters.types.is_empty() || - !segment.parameters.bindings.is_empty() { - self.print_path_parameters(&segment.parameters, true)?; - } + + segment.with_parameters(|parameters| { + if !parameters.lifetimes.is_empty() || + !parameters.types.is_empty() || + !parameters.bindings.is_empty() + { + self.print_path_parameters(¶meters, segment.infer_types, true) + } else { + Ok(()) + } + })?; self.print_call_post(base_args) } @@ -1207,15 +1233,25 @@ impl<'a> State<'a> { lhs: &hir::Expr, rhs: &hir::Expr) -> io::Result<()> { - self.print_expr(lhs)?; + let assoc_op = bin_op_to_assoc_op(op.node); + let prec = assoc_op.precedence() as i8; + let fixity = assoc_op.fixity(); + + let (left_prec, right_prec) = match fixity { + Fixity::Left => (prec, prec + 1), + Fixity::Right => (prec + 1, prec), + Fixity::None => (prec + 1, prec + 1), + }; + + self.print_expr_maybe_paren(lhs, left_prec)?; self.s.space()?; self.word_space(op.node.as_str())?; - self.print_expr(rhs) + self.print_expr_maybe_paren(rhs, right_prec) } fn print_expr_unary(&mut self, op: hir::UnOp, expr: &hir::Expr) -> io::Result<()> { self.s.word(op.as_str())?; - self.print_expr_maybe_paren(expr) + self.print_expr_maybe_paren(expr, parser::PREC_PREFIX) } fn print_expr_addr_of(&mut self, @@ -1224,18 +1260,18 @@ impl<'a> State<'a> { -> io::Result<()> { self.s.word("&")?; self.print_mutability(mutability)?; - self.print_expr_maybe_paren(expr) + self.print_expr_maybe_paren(expr, parser::PREC_PREFIX) } pub fn print_expr(&mut self, expr: &hir::Expr) -> io::Result<()> { - self.maybe_print_comment(expr.span.lo)?; + self.maybe_print_comment(expr.span.lo())?; self.print_outer_attributes(&expr.attrs)?; self.ibox(indent_unit)?; self.ann.pre(self, NodeExpr(expr))?; match expr.node { hir::ExprBox(ref expr) => { self.word_space("box")?; - self.print_expr(expr)?; + self.print_expr_maybe_paren(expr, parser::PREC_PREFIX)?; } hir::ExprArray(ref exprs) => { self.print_expr_vec(exprs)?; @@ -1268,13 +1304,15 @@ impl<'a> State<'a> { self.print_literal(&lit)?; } hir::ExprCast(ref expr, ref ty) => { - self.print_expr(&expr)?; + let prec = AssocOp::As.precedence() as i8; + self.print_expr_maybe_paren(&expr, prec)?; self.s.space()?; self.word_space("as")?; self.print_type(&ty)?; } hir::ExprType(ref expr, ref ty) => { - self.print_expr(&expr)?; + let prec = AssocOp::Colon.precedence() as i8; + self.print_expr_maybe_paren(&expr, prec)?; self.word_space(":")?; self.print_type(&ty)?; } @@ -1287,7 +1325,7 @@ impl<'a> State<'a> { self.word_space(":")?; } self.head("while")?; - self.print_expr(&test)?; + self.print_expr_as_cond(&test)?; self.s.space()?; self.print_block(&blk)?; } @@ -1304,7 +1342,7 @@ impl<'a> State<'a> { self.cbox(indent_unit)?; self.ibox(4)?; self.word_nbsp("match")?; - self.print_expr(&expr)?; + self.print_expr_as_cond(&expr)?; self.s.space()?; self.bopen()?; for arm in arms { @@ -1312,7 +1350,7 @@ impl<'a> State<'a> { } self.bclose_(expr.span, indent_unit)?; } - hir::ExprClosure(capture_clause, ref decl, body, _fn_decl_span) => { + hir::ExprClosure(capture_clause, ref decl, body, _fn_decl_span, _gen) => { self.print_capture_clause(capture_clause)?; self.print_closure_args(&decl, body)?; @@ -1335,30 +1373,32 @@ impl<'a> State<'a> { self.print_block(&blk)?; } hir::ExprAssign(ref lhs, ref rhs) => { - self.print_expr(&lhs)?; + let prec = AssocOp::Assign.precedence() as i8; + self.print_expr_maybe_paren(&lhs, prec + 1)?; self.s.space()?; self.word_space("=")?; - self.print_expr(&rhs)?; + self.print_expr_maybe_paren(&rhs, prec)?; } hir::ExprAssignOp(op, ref lhs, ref rhs) => { - self.print_expr(&lhs)?; + let prec = AssocOp::Assign.precedence() as i8; + self.print_expr_maybe_paren(&lhs, prec + 1)?; self.s.space()?; self.s.word(op.node.as_str())?; self.word_space("=")?; - self.print_expr(&rhs)?; + self.print_expr_maybe_paren(&rhs, prec)?; } hir::ExprField(ref expr, name) => { - self.print_expr(&expr)?; + self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX)?; self.s.word(".")?; self.print_name(name.node)?; } hir::ExprTupField(ref expr, id) => { - self.print_expr(&expr)?; + self.print_expr_maybe_paren(&expr, parser::PREC_POSTFIX)?; self.s.word(".")?; self.print_usize(id.node)?; } hir::ExprIndex(ref expr, ref index) => { - self.print_expr(&expr)?; + self.print_expr_maybe_paren(&expr, parser::PREC_POSTFIX)?; self.s.word("[")?; self.print_expr(&index)?; self.s.word("]")?; @@ -1374,7 +1414,7 @@ impl<'a> State<'a> { self.s.space()?; } if let Some(ref expr) = *opt_expr { - self.print_expr(expr)?; + self.print_expr_maybe_paren(expr, parser::PREC_JUMP)?; self.s.space()?; } } @@ -1391,7 +1431,7 @@ impl<'a> State<'a> { match *result { Some(ref expr) => { self.s.word(" ")?; - self.print_expr(&expr)?; + self.print_expr_maybe_paren(&expr, parser::PREC_JUMP)?; } _ => (), } @@ -1461,6 +1501,10 @@ impl<'a> State<'a> { self.pclose()?; } + hir::ExprYield(ref expr) => { + self.word_space("yield")?; + self.print_expr_maybe_paren(&expr, parser::PREC_JUMP)?; + } } self.ann.post(self, NodeExpr(expr))?; self.end() @@ -1476,7 +1520,7 @@ impl<'a> State<'a> { } pub fn print_decl(&mut self, decl: &hir::Decl) -> io::Result<()> { - self.maybe_print_comment(decl.span.lo)?; + self.maybe_print_comment(decl.span.lo())?; match decl.node { hir::DeclLocal(ref loc) => { self.space_if_not_bol()?; @@ -1519,7 +1563,7 @@ impl<'a> State<'a> { path: &hir::Path, colons_before_params: bool) -> io::Result<()> { - self.maybe_print_comment(path.span.lo)?; + self.maybe_print_comment(path.span.lo())?; for (i, segment) in path.segments.iter().enumerate() { if i > 0 { @@ -1527,8 +1571,12 @@ impl<'a> State<'a> { } if segment.name != keywords::CrateRoot.name() && segment.name != keywords::DollarCrate.name() { - self.print_name(segment.name)?; - self.print_path_parameters(&segment.parameters, colons_before_params)?; + self.print_name(segment.name)?; + segment.with_parameters(|parameters| { + self.print_path_parameters(parameters, + segment.infer_types, + colons_before_params) + })?; } } @@ -1556,7 +1604,11 @@ impl<'a> State<'a> { if segment.name != keywords::CrateRoot.name() && segment.name != keywords::DollarCrate.name() { self.print_name(segment.name)?; - self.print_path_parameters(&segment.parameters, colons_before_params)?; + segment.with_parameters(|parameters| { + self.print_path_parameters(parameters, + segment.infer_types, + colons_before_params) + })?; } } @@ -1564,7 +1616,11 @@ impl<'a> State<'a> { self.s.word("::")?; let item_segment = path.segments.last().unwrap(); self.print_name(item_segment.name)?; - self.print_path_parameters(&item_segment.parameters, colons_before_params) + item_segment.with_parameters(|parameters| { + self.print_path_parameters(parameters, + item_segment.infer_types, + colons_before_params) + }) } hir::QPath::TypeRelative(ref qself, ref item_segment) => { self.s.word("<")?; @@ -1572,13 +1628,18 @@ impl<'a> State<'a> { self.s.word(">")?; self.s.word("::")?; self.print_name(item_segment.name)?; - self.print_path_parameters(&item_segment.parameters, colons_before_params) + item_segment.with_parameters(|parameters| { + self.print_path_parameters(parameters, + item_segment.infer_types, + colons_before_params) + }) } } } fn print_path_parameters(&mut self, parameters: &hir::PathParameters, + infer_types: bool, colons_before_params: bool) -> io::Result<()> { if parameters.parenthesized { @@ -1615,7 +1676,7 @@ impl<'a> State<'a> { // FIXME(eddyb) This would leak into error messages, e.g.: // "non-exhaustive patterns: `Some::<..>(_)` not covered". - if parameters.infer_types && false { + if infer_types && false { start_or_comma(self)?; self.s.word("..")?; } @@ -1637,7 +1698,7 @@ impl<'a> State<'a> { } pub fn print_pat(&mut self, pat: &hir::Pat) -> io::Result<()> { - self.maybe_print_comment(pat.span.lo)?; + self.maybe_print_comment(pat.span.lo())?; self.ann.pre(self, NodePat(pat))?; // Pat isn't normalized, but the beauty of it // is that it doesn't matter @@ -1893,7 +1954,7 @@ impl<'a> State<'a> { match decl.output { hir::Return(ref ty) => { self.print_type(&ty)?; - self.maybe_print_comment(ty.span.lo) + self.maybe_print_comment(ty.span.lo()) } hir::DefaultReturn(..) => unreachable!(), } @@ -1938,7 +1999,7 @@ impl<'a> State<'a> { } pub fn print_lifetime(&mut self, lifetime: &hir::Lifetime) -> io::Result<()> { - self.print_name(lifetime.name) + self.print_name(lifetime.name.name()) } pub fn print_lifetime_def(&mut self, lifetime: &hir::LifetimeDef) -> io::Result<()> { @@ -2070,7 +2131,7 @@ impl<'a> State<'a> { self.end()?; match decl.output { - hir::Return(ref output) => self.maybe_print_comment(output.span.lo), + hir::Return(ref output) => self.maybe_print_comment(output.span.lo()), _ => Ok(()), } } @@ -2080,7 +2141,8 @@ impl<'a> State<'a> { unsafety: hir::Unsafety, decl: &hir::FnDecl, name: Option, - generics: &hir::Generics) + generics: &hir::Generics, + arg_names: &[Spanned]) -> io::Result<()> { self.ibox(indent_unit)?; if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() { @@ -2103,7 +2165,7 @@ impl<'a> State<'a> { name, &generics, &hir::Inherited, - &[], + arg_names, None)?; self.end() } @@ -2120,13 +2182,13 @@ impl<'a> State<'a> { if (*cmnt).style != comments::Trailing { return Ok(()); } - let span_line = cm.lookup_char_pos(span.hi); + let span_line = cm.lookup_char_pos(span.hi()); let comment_line = cm.lookup_char_pos((*cmnt).pos); let mut next = (*cmnt).pos + BytePos(1); if let Some(p) = next_pos { next = p; } - if span.hi < (*cmnt).pos && (*cmnt).pos < next && + if span.hi() < (*cmnt).pos && (*cmnt).pos < next && span_line.line == comment_line.line { self.print_comment(cmnt)?; } @@ -2242,3 +2304,111 @@ fn stmt_ends_with_semi(stmt: &hir::Stmt_) -> bool { } } } + + +fn expr_precedence(expr: &hir::Expr) -> i8 { + use syntax::util::parser::*; + + match expr.node { + hir::ExprClosure(..) => PREC_CLOSURE, + + hir::ExprBreak(..) | + hir::ExprAgain(..) | + hir::ExprRet(..) | + hir::ExprYield(..) => PREC_JUMP, + + // Binop-like expr kinds, handled by `AssocOp`. + hir::ExprBinary(op, _, _) => bin_op_to_assoc_op(op.node).precedence() as i8, + + hir::ExprCast(..) => AssocOp::As.precedence() as i8, + hir::ExprType(..) => AssocOp::Colon.precedence() as i8, + + hir::ExprAssign(..) | + hir::ExprAssignOp(..) => AssocOp::Assign.precedence() as i8, + + // Unary, prefix + hir::ExprBox(..) | + hir::ExprAddrOf(..) | + hir::ExprUnary(..) => PREC_PREFIX, + + // Unary, postfix + hir::ExprCall(..) | + hir::ExprMethodCall(..) | + hir::ExprField(..) | + hir::ExprTupField(..) | + hir::ExprIndex(..) | + hir::ExprInlineAsm(..) => PREC_POSTFIX, + + // Never need parens + hir::ExprArray(..) | + hir::ExprRepeat(..) | + hir::ExprTup(..) | + hir::ExprLit(..) | + hir::ExprPath(..) | + hir::ExprIf(..) | + hir::ExprWhile(..) | + hir::ExprLoop(..) | + hir::ExprMatch(..) | + hir::ExprBlock(..) | + hir::ExprStruct(..) => PREC_PAREN, + } +} + +fn bin_op_to_assoc_op(op: hir::BinOp_) -> AssocOp { + use hir::BinOp_::*; + match op { + BiAdd => AssocOp::Add, + BiSub => AssocOp::Subtract, + BiMul => AssocOp::Multiply, + BiDiv => AssocOp::Divide, + BiRem => AssocOp::Modulus, + + BiAnd => AssocOp::LAnd, + BiOr => AssocOp::LOr, + + BiBitXor => AssocOp::BitXor, + BiBitAnd => AssocOp::BitAnd, + BiBitOr => AssocOp::BitOr, + BiShl => AssocOp::ShiftLeft, + BiShr => AssocOp::ShiftRight, + + BiEq => AssocOp::Equal, + BiLt => AssocOp::Less, + BiLe => AssocOp::LessEqual, + BiNe => AssocOp::NotEqual, + BiGe => AssocOp::GreaterEqual, + BiGt => AssocOp::Greater, + } +} + +/// Expressions that syntactically contain an "exterior" struct literal i.e. not surrounded by any +/// parens or other delimiters, e.g. `X { y: 1 }`, `X { y: 1 }.method()`, `foo == X { y: 1 }` and +/// `X { y: 1 } == foo` all do, but `(X { y: 1 }) == foo` does not. +fn contains_exterior_struct_lit(value: &hir::Expr) -> bool { + match value.node { + hir::ExprStruct(..) => true, + + hir::ExprAssign(ref lhs, ref rhs) | + hir::ExprAssignOp(_, ref lhs, ref rhs) | + hir::ExprBinary(_, ref lhs, ref rhs) => { + // X { y: 1 } + X { y: 2 } + contains_exterior_struct_lit(&lhs) || contains_exterior_struct_lit(&rhs) + } + hir::ExprUnary(_, ref x) | + hir::ExprCast(ref x, _) | + hir::ExprType(ref x, _) | + hir::ExprField(ref x, _) | + hir::ExprTupField(ref x, _) | + hir::ExprIndex(ref x, _) => { + // &X { y: 1 }, X { y: 1 }.y + contains_exterior_struct_lit(&x) + } + + hir::ExprMethodCall(.., ref exprs) => { + // X { y: 1 }.bar(...) + contains_exterior_struct_lit(&exprs[0]) + } + + _ => false, + } +} diff --git a/src/librustc/ich/caching_codemap_view.rs b/src/librustc/ich/caching_codemap_view.rs index 49e18f100c..e393459027 100644 --- a/src/librustc/ich/caching_codemap_view.rs +++ b/src/librustc/ich/caching_codemap_view.rs @@ -11,7 +11,6 @@ use std::rc::Rc; use syntax::codemap::CodeMap; use syntax_pos::{BytePos, FileMap}; -use ty::TyCtxt; #[derive(Clone)] struct CacheEntry { @@ -23,15 +22,15 @@ struct CacheEntry { file_index: usize, } -pub struct CachingCodemapView<'tcx> { - codemap: &'tcx CodeMap, +#[derive(Clone)] +pub struct CachingCodemapView<'cm> { + codemap: &'cm CodeMap, line_cache: [CacheEntry; 3], time_stamp: usize, } -impl<'gcx> CachingCodemapView<'gcx> { - pub fn new<'a, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> CachingCodemapView<'gcx> { - let codemap = tcx.sess.codemap(); +impl<'cm> CachingCodemapView<'cm> { + pub fn new(codemap: &'cm CodeMap) -> CachingCodemapView<'cm> { let files = codemap.files(); let first_file = files[0].clone(); let entry = CacheEntry { diff --git a/src/librustc/ich/hcx.rs b/src/librustc/ich/hcx.rs index 218483232d..e7a26e14db 100644 --- a/src/librustc/ich/hcx.rs +++ b/src/librustc/ich/hcx.rs @@ -9,72 +9,117 @@ // except according to those terms. use hir; -use hir::def_id::DefId; +use hir::def_id::{DefId, DefIndex}; use hir::map::DefPathHash; +use hir::map::definitions::Definitions; use ich::{self, CachingCodemapView}; +use middle::cstore::CrateStore; use session::config::DebugInfoLevel::NoDebugInfo; -use ty; -use util::nodemap::{NodeMap, ItemLocalMap}; +use ty::{TyCtxt, fast_reject}; +use session::Session; +use std::cmp::Ord; use std::hash as std_hash; -use std::collections::{HashMap, HashSet, BTreeMap}; +use std::cell::RefCell; +use std::collections::HashMap; use syntax::ast; use syntax::attr; +use syntax::codemap::CodeMap; use syntax::ext::hygiene::SyntaxContext; use syntax::symbol::Symbol; use syntax_pos::Span; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher, - StableHasherResult}; +use rustc_data_structures::stable_hasher::{HashStable, StableHashingContextProvider, + StableHasher, StableHasherResult, + ToStableHashKey}; use rustc_data_structures::accumulate_vec::AccumulateVec; +use rustc_data_structures::fx::FxHashSet; + +thread_local!(static IGNORED_ATTR_NAMES: RefCell> = + RefCell::new(FxHashSet())); /// This is the context state available during incr. comp. hashing. It contains /// enough information to transform DefIds and HirIds into stable DefPaths (i.e. /// a reference to the TyCtxt) and it holds a few caches for speeding up various /// things (e.g. each DefId/DefPath is only hashed once). -pub struct StableHashingContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { - tcx: ty::TyCtxt<'a, 'gcx, 'tcx>, - codemap: CachingCodemapView<'gcx>, +#[derive(Clone)] +pub struct StableHashingContext<'gcx> { + sess: &'gcx Session, + definitions: &'gcx Definitions, + cstore: &'gcx CrateStore, + body_resolver: BodyResolver<'gcx>, hash_spans: bool, hash_bodies: bool, overflow_checks_enabled: bool, node_id_hashing_mode: NodeIdHashingMode, - // A sorted array of symbol keys for fast lookup. - ignored_attr_names: Vec, + + // Very often, we are hashing something that does not need the + // CachingCodemapView, so we initialize it lazily. + raw_codemap: &'gcx CodeMap, + caching_codemap: Option>, } #[derive(PartialEq, Eq, Clone, Copy)] pub enum NodeIdHashingMode { Ignore, HashDefPath, - HashTraitsInScope, } -impl<'a, 'gcx, 'tcx> StableHashingContext<'a, 'gcx, 'tcx> { - - pub fn new(tcx: ty::TyCtxt<'a, 'gcx, 'tcx>) -> Self { - let hash_spans_initial = tcx.sess.opts.debuginfo != NoDebugInfo; - let check_overflow_initial = tcx.sess.overflow_checks(); - - let mut ignored_attr_names: Vec<_> = ich::IGNORED_ATTRIBUTES - .iter() - .map(|&s| Symbol::intern(s)) - .collect(); +/// The BodyResolver allows to map a BodyId to the corresponding hir::Body. +/// We could also just store a plain reference to the hir::Crate but we want +/// to avoid that the crate is used to get untracked access to all of the HIR. +#[derive(Clone, Copy)] +struct BodyResolver<'gcx>(&'gcx hir::Crate); + +impl<'gcx> BodyResolver<'gcx> { + // Return a reference to the hir::Body with the given BodyId. + // DOES NOT DO ANY TRACKING, use carefully. + fn body(self, id: hir::BodyId) -> &'gcx hir::Body { + self.0.body(id) + } +} - ignored_attr_names.sort(); +impl<'gcx> StableHashingContext<'gcx> { + // The `krate` here is only used for mapping BodyIds to Bodies. + // Don't use it for anything else or you'll run the risk of + // leaking data out of the tracking system. + pub fn new(sess: &'gcx Session, + krate: &'gcx hir::Crate, + definitions: &'gcx Definitions, + cstore: &'gcx CrateStore) + -> Self { + let hash_spans_initial = sess.opts.debuginfo != NoDebugInfo; + let check_overflow_initial = sess.overflow_checks(); + + debug_assert!(ich::IGNORED_ATTRIBUTES.len() > 0); + IGNORED_ATTR_NAMES.with(|names| { + let mut names = names.borrow_mut(); + if names.is_empty() { + names.extend(ich::IGNORED_ATTRIBUTES.iter() + .map(|&s| Symbol::intern(s))); + } + }); StableHashingContext { - tcx, - codemap: CachingCodemapView::new(tcx), + sess, + body_resolver: BodyResolver(krate), + definitions, + cstore, + caching_codemap: None, + raw_codemap: sess.codemap(), hash_spans: hash_spans_initial, hash_bodies: true, overflow_checks_enabled: check_overflow_initial, node_id_hashing_mode: NodeIdHashingMode::HashDefPath, - ignored_attr_names, } } + #[inline] + pub fn sess(&self) -> &'gcx Session { + self.sess + } + pub fn force_span_hashing(mut self) -> Self { self.hash_spans = true; self @@ -111,13 +156,22 @@ impl<'a, 'gcx, 'tcx> StableHashingContext<'a, 'gcx, 'tcx> { } #[inline] - pub fn tcx(&self) -> ty::TyCtxt<'a, 'gcx, 'tcx> { - self.tcx + pub fn def_path_hash(&self, def_id: DefId) -> DefPathHash { + if def_id.is_local() { + self.definitions.def_path_hash(def_id.index) + } else { + self.cstore.def_path_hash(def_id) + } + } + + #[inline] + pub fn local_def_path_hash(&self, def_index: DefIndex) -> DefPathHash { + self.definitions.def_path_hash(def_index) } #[inline] - pub fn def_path_hash(&mut self, def_id: DefId) -> DefPathHash { - self.tcx.def_path_hash(def_id) + pub fn node_to_hir_id(&self, node_id: ast::NodeId) -> hir::HirId { + self.definitions.node_to_hir_id(node_id) } #[inline] @@ -132,12 +186,22 @@ impl<'a, 'gcx, 'tcx> StableHashingContext<'a, 'gcx, 'tcx> { #[inline] pub fn codemap(&mut self) -> &mut CachingCodemapView<'gcx> { - &mut self.codemap + match self.caching_codemap { + Some(ref mut cm) => { + cm + } + ref mut none => { + *none = Some(CachingCodemapView::new(self.raw_codemap)); + none.as_mut().unwrap() + } + } } #[inline] pub fn is_ignored_attr(&self, name: Symbol) -> bool { - self.ignored_attr_names.binary_search(&name).is_ok() + IGNORED_ATTR_NAMES.with(|names| { + names.borrow().contains(&name) + }) } pub fn hash_hir_item_like(&mut self, @@ -194,43 +258,96 @@ impl<'a, 'gcx, 'tcx> StableHashingContext<'a, 'gcx, 'tcx> { } } +impl<'a, 'gcx, 'lcx> StableHashingContextProvider for TyCtxt<'a, 'gcx, 'lcx> { + type ContextType = StableHashingContext<'gcx>; + fn create_stable_hashing_context(&self) -> Self::ContextType { + (*self).create_stable_hashing_context() + } +} + + +impl<'gcx> StableHashingContextProvider for StableHashingContext<'gcx> { + type ContextType = StableHashingContext<'gcx>; + fn create_stable_hashing_context(&self) -> Self::ContextType { + self.clone() + } +} + +impl<'gcx> ::dep_graph::DepGraphSafe for StableHashingContext<'gcx> { +} + + +impl<'gcx> HashStable> for hir::BodyId { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + if hcx.hash_bodies() { + hcx.body_resolver.body(*self).hash_stable(hcx, hasher); + } + } +} -impl<'a, 'gcx, 'tcx> HashStable> for ast::NodeId { +impl<'gcx> HashStable> for hir::HirId { + #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { match hcx.node_id_hashing_mode { NodeIdHashingMode::Ignore => { - // Most NodeIds in the HIR can be ignored, but if there is a - // corresponding entry in the `trait_map` we need to hash that. - // Make sure we don't ignore too much by checking that there is - // no entry in a debug_assert!(). - debug_assert!(hcx.tcx.trait_map.get(self).is_none()); + // Don't do anything. } NodeIdHashingMode::HashDefPath => { - hcx.tcx.hir.definitions().node_to_hir_id(*self).hash_stable(hcx, hasher); + let hir::HirId { + owner, + local_id, + } = *self; + + hcx.local_def_path_hash(owner).hash_stable(hcx, hasher); + local_id.hash_stable(hcx, hasher); } - NodeIdHashingMode::HashTraitsInScope => { - if let Some(traits) = hcx.tcx.trait_map.get(self) { - // The ordering of the candidates is not fixed. So we hash - // the def-ids and then sort them and hash the collection. - let mut candidates: AccumulateVec<[_; 8]> = - traits.iter() - .map(|&hir::TraitCandidate { def_id, import_id: _ }| { - hcx.def_path_hash(def_id) - }) - .collect(); - if traits.len() > 1 { - candidates.sort(); - } - candidates.hash_stable(hcx, hasher); - } + } + } +} + +impl<'gcx> ToStableHashKey> for hir::HirId { + type KeyType = (DefPathHash, hir::ItemLocalId); + + #[inline] + fn to_stable_hash_key(&self, + hcx: &StableHashingContext<'gcx>) + -> (DefPathHash, hir::ItemLocalId) { + let def_path_hash = hcx.local_def_path_hash(self.owner); + (def_path_hash, self.local_id) + } +} + +impl<'gcx> HashStable> for ast::NodeId { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + match hcx.node_id_hashing_mode { + NodeIdHashingMode::Ignore => { + // Don't do anything. + } + NodeIdHashingMode::HashDefPath => { + hcx.definitions.node_to_hir_id(*self).hash_stable(hcx, hasher); } } } } -impl<'a, 'gcx, 'tcx> HashStable> for Span { +impl<'gcx> ToStableHashKey> for ast::NodeId { + type KeyType = (DefPathHash, hir::ItemLocalId); + + #[inline] + fn to_stable_hash_key(&self, + hcx: &StableHashingContext<'gcx>) + -> (DefPathHash, hir::ItemLocalId) { + hcx.definitions.node_to_hir_id(*self).to_stable_hash_key(hcx) + } +} + +impl<'gcx> HashStable> for Span { // Hash a span in a stable way. We can't directly hash the span's BytePos // fields (that would be similar to hashing pointers, since those are just @@ -242,7 +359,7 @@ impl<'a, 'gcx, 'tcx> HashStable> for Span { // Also, hashing filenames is expensive so we avoid doing it twice when the // span starts and ends in the same file, which is almost always the case. fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { use syntax_pos::Pos; @@ -253,17 +370,17 @@ impl<'a, 'gcx, 'tcx> HashStable> for Span { // If this is not an empty or invalid span, we want to hash the last // position that belongs to it, as opposed to hashing the first // position past it. - let span_hi = if self.hi > self.lo { + let span_hi = if self.hi() > self.lo() { // We might end up in the middle of a multibyte character here, // but that's OK, since we are not trying to decode anything at // this position. - self.hi - ::syntax_pos::BytePos(1) + self.hi() - ::syntax_pos::BytePos(1) } else { - self.hi + self.hi() }; { - let loc1 = hcx.codemap().byte_pos_to_line_and_col(self.lo); + let loc1 = hcx.codemap().byte_pos_to_line_and_col(self.lo()); let loc1 = loc1.as_ref() .map(|&(ref fm, line, col)| (&fm.name[..], line, col.to_usize())) .unwrap_or(("???", 0, 0)); @@ -296,7 +413,7 @@ impl<'a, 'gcx, 'tcx> HashStable> for Span { } } - if self.ctxt == SyntaxContext::empty() { + if self.ctxt() == SyntaxContext::empty() { 0u8.hash_stable(hcx, hasher); } else { 1u8.hash_stable(hcx, hasher); @@ -305,90 +422,47 @@ impl<'a, 'gcx, 'tcx> HashStable> for Span { } } -pub fn hash_stable_hashmap<'a, 'gcx, 'tcx, K, V, R, SK, F, W>( - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, - hasher: &mut StableHasher, - map: &HashMap, - extract_stable_key: F) - where K: Eq + std_hash::Hash, - V: HashStable>, - R: std_hash::BuildHasher, - SK: HashStable> + Ord + Clone, - F: Fn(&mut StableHashingContext<'a, 'gcx, 'tcx>, &K) -> SK, - W: StableHasherResult, -{ - let mut keys: Vec<_> = map.keys() - .map(|k| (extract_stable_key(hcx, k), k)) - .collect(); - keys.sort_unstable_by_key(|&(ref stable_key, _)| stable_key.clone()); - keys.len().hash_stable(hcx, hasher); - for (stable_key, key) in keys { - stable_key.hash_stable(hcx, hasher); - map[key].hash_stable(hcx, hasher); - } -} - -pub fn hash_stable_hashset<'a, 'tcx, 'gcx, K, R, SK, F, W>( - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, +pub fn hash_stable_trait_impls<'gcx, W, R>( + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher, - set: &HashSet, - extract_stable_key: F) - where K: Eq + std_hash::Hash, + blanket_impls: &Vec, + non_blanket_impls: &HashMap, R>) + where W: StableHasherResult, R: std_hash::BuildHasher, - SK: HashStable> + Ord + Clone, - F: Fn(&mut StableHashingContext<'a, 'gcx, 'tcx>, &K) -> SK, - W: StableHasherResult, { - let mut keys: Vec<_> = set.iter() - .map(|k| extract_stable_key(hcx, k)) - .collect(); - keys.sort_unstable(); - keys.hash_stable(hcx, hasher); -} + { + let mut blanket_impls: AccumulateVec<[_; 8]> = blanket_impls + .iter() + .map(|&def_id| hcx.def_path_hash(def_id)) + .collect(); -pub fn hash_stable_nodemap<'a, 'tcx, 'gcx, V, W>( - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, - hasher: &mut StableHasher, - map: &NodeMap) - where V: HashStable>, - W: StableHasherResult, -{ - hash_stable_hashmap(hcx, hasher, map, |hcx, node_id| { - hcx.tcx.hir.definitions().node_to_hir_id(*node_id).local_id - }); -} + if blanket_impls.len() > 1 { + blanket_impls.sort_unstable(); + } -pub fn hash_stable_itemlocalmap<'a, 'tcx, 'gcx, V, W>( - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, - hasher: &mut StableHasher, - map: &ItemLocalMap) - where V: HashStable>, - W: StableHasherResult, -{ - hash_stable_hashmap(hcx, hasher, map, |_, local_id| { - *local_id - }); -} + blanket_impls.hash_stable(hcx, hasher); + } + { + let mut keys: AccumulateVec<[_; 8]> = + non_blanket_impls.keys() + .map(|k| (k, k.map_def(|d| hcx.def_path_hash(d)))) + .collect(); + keys.sort_unstable_by(|&(_, ref k1), &(_, ref k2)| k1.cmp(k2)); + keys.len().hash_stable(hcx, hasher); + for (key, ref stable_key) in keys { + stable_key.hash_stable(hcx, hasher); + let mut impls : AccumulateVec<[_; 8]> = non_blanket_impls[key] + .iter() + .map(|&impl_id| hcx.def_path_hash(impl_id)) + .collect(); + + if impls.len() > 1 { + impls.sort_unstable(); + } -pub fn hash_stable_btreemap<'a, 'tcx, 'gcx, K, V, SK, F, W>( - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, - hasher: &mut StableHasher, - map: &BTreeMap, - extract_stable_key: F) - where K: Eq + Ord, - V: HashStable>, - SK: HashStable> + Ord + Clone, - F: Fn(&mut StableHashingContext<'a, 'gcx, 'tcx>, &K) -> SK, - W: StableHasherResult, -{ - let mut keys: Vec<_> = map.keys() - .map(|k| (extract_stable_key(hcx, k), k)) - .collect(); - keys.sort_unstable_by_key(|&(ref stable_key, _)| stable_key.clone()); - keys.len().hash_stable(hcx, hasher); - for (stable_key, key) in keys { - stable_key.hash_stable(hcx, hasher); - map[key].hash_stable(hcx, hasher); + impls.hash_stable(hcx, hasher); + } } } + diff --git a/src/librustc/ich/impls_cstore.rs b/src/librustc/ich/impls_cstore.rs index e95dbdd15c..18a02ff5c5 100644 --- a/src/librustc/ich/impls_cstore.rs +++ b/src/librustc/ich/impls_cstore.rs @@ -11,6 +11,8 @@ //! This module contains `HashStable` implementations for various data types //! from rustc::middle::cstore in no particular order. +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; + use middle; impl_stable_hash_for!(enum middle::cstore::DepKind { @@ -38,3 +40,42 @@ impl_stable_hash_for!(enum middle::cstore::LinkagePreference { RequireDynamic, RequireStatic }); + +impl_stable_hash_for!(struct middle::cstore::ExternCrate { + def_id, + span, + direct, + path_len +}); + +impl_stable_hash_for!(struct middle::cstore::CrateSource { + dylib, + rlib, + rmeta +}); + +impl HashStable for middle::cstore::ExternBodyNestedBodies { + fn hash_stable(&self, + hcx: &mut HCX, + hasher: &mut StableHasher) { + let middle::cstore::ExternBodyNestedBodies { + nested_bodies: _, + fingerprint, + } = *self; + + fingerprint.hash_stable(hcx, hasher); + } +} + +impl<'a, HCX> HashStable for middle::cstore::ExternConstBody<'a> { + fn hash_stable(&self, + hcx: &mut HCX, + hasher: &mut StableHasher) { + let middle::cstore::ExternConstBody { + body: _, + fingerprint, + } = *self; + + fingerprint.hash_stable(hcx, hasher); + } +} diff --git a/src/librustc/ich/impls_hir.rs b/src/librustc/ich/impls_hir.rs index a791331878..c0fae8bf8b 100644 --- a/src/librustc/ich/impls_hir.rs +++ b/src/librustc/ich/impls_hir.rs @@ -12,45 +12,36 @@ //! types in no particular order. use hir; +use hir::map::DefPathHash; use hir::def_id::{DefId, CrateNum, CRATE_DEF_INDEX}; use ich::{StableHashingContext, NodeIdHashingMode}; +use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, + StableHasher, StableHasherResult}; use std::mem; - use syntax::ast; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher, - StableHasherResult}; - -impl<'a, 'gcx, 'tcx> HashStable> for DefId { +impl<'gcx> HashStable> for DefId { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { hcx.def_path_hash(*self).hash_stable(hcx, hasher); } } +impl<'gcx> ToStableHashKey> for DefId { + type KeyType = DefPathHash; -impl<'a, 'gcx, 'tcx> HashStable> for hir::HirId { #[inline] - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, - hasher: &mut StableHasher) { - let hir::HirId { - owner, - local_id, - } = *self; - - hcx.def_path_hash(DefId::local(owner)).hash_stable(hcx, hasher); - local_id.hash_stable(hcx, hasher); + fn to_stable_hash_key(&self, hcx: &StableHashingContext<'gcx>) -> DefPathHash { + hcx.def_path_hash(*self) } } - -impl<'a, 'gcx, 'tcx> HashStable> for CrateNum { +impl<'gcx> HashStable> for CrateNum { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { hcx.def_path_hash(DefId { krate: *self, @@ -59,8 +50,30 @@ impl<'a, 'gcx, 'tcx> HashStable> for CrateN } } +impl<'gcx> ToStableHashKey> for CrateNum { + type KeyType = DefPathHash; + + #[inline] + fn to_stable_hash_key(&self, hcx: &StableHashingContext<'gcx>) -> DefPathHash { + let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX }; + def_id.to_stable_hash_key(hcx) + } +} + impl_stable_hash_for!(tuple_struct hir::ItemLocalId { index }); +impl<'gcx> ToStableHashKey> +for hir::ItemLocalId { + type KeyType = hir::ItemLocalId; + + #[inline] + fn to_stable_hash_key(&self, + _: &StableHashingContext<'gcx>) + -> hir::ItemLocalId { + *self + } +} + // The following implementations of HashStable for ItemId, TraitItemId, and // ImplItemId deserve special attention. Normally we do not hash NodeIds within // the HIR, since they just signify a HIR nodes own path. But ItemId et al @@ -68,9 +81,9 @@ impl_stable_hash_for!(tuple_struct hir::ItemLocalId { index }); // want to pick up on a reference changing its target, so we hash the NodeIds // in "DefPath Mode". -impl<'a, 'gcx, 'tcx> HashStable> for hir::ItemId { +impl<'gcx> HashStable> for hir::ItemId { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::ItemId { id @@ -82,9 +95,9 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::I } } -impl<'a, 'gcx, 'tcx> HashStable> for hir::TraitItemId { +impl<'gcx> HashStable> for hir::TraitItemId { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::TraitItemId { node_id @@ -96,9 +109,9 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::T } } -impl<'a, 'gcx, 'tcx> HashStable> for hir::ImplItemId { +impl<'gcx> HashStable> for hir::ImplItemId { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::ImplItemId { node_id @@ -110,6 +123,13 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::I } } +impl_stable_hash_for!(enum hir::LifetimeName { + Implicit, + Underscore, + Static, + Name(name) +}); + impl_stable_hash_for!(struct hir::Lifetime { id, span, @@ -130,13 +150,13 @@ impl_stable_hash_for!(struct hir::Path { impl_stable_hash_for!(struct hir::PathSegment { name, + infer_types, parameters }); impl_stable_hash_for!(struct hir::PathParameters { lifetimes, types, - infer_types, bindings, parenthesized }); @@ -157,7 +177,8 @@ impl_stable_hash_for!(struct hir::TyParam { bounds, default, span, - pure_wrt_drop + pure_wrt_drop, + synthetic }); impl_stable_hash_for!(struct hir::Generics { @@ -167,6 +188,10 @@ impl_stable_hash_for!(struct hir::Generics { span }); +impl_stable_hash_for!(enum hir::SyntheticTyParamKind { + ImplTrait +}); + impl_stable_hash_for!(struct hir::WhereClause { id, predicates @@ -218,40 +243,18 @@ impl_stable_hash_for!(struct hir::TypeBinding { span }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::Ty { +impl<'gcx> HashStable> for hir::Ty { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - let node_id_hashing_mode = match self.node { - hir::TySlice(..) | - hir::TyArray(..) | - hir::TyPtr(..) | - hir::TyRptr(..) | - hir::TyBareFn(..) | - hir::TyNever | - hir::TyTup(..) | - hir::TyTraitObject(..) | - hir::TyImplTrait(..) | - hir::TyTypeof(..) | - hir::TyErr | - hir::TyInfer => { - NodeIdHashingMode::Ignore - } - hir::TyPath(..) => { - NodeIdHashingMode::HashTraitsInScope - } - }; - hcx.while_hashing_hir_bodies(true, |hcx| { let hir::Ty { - id, + id: _, + hir_id: _, ref node, ref span, } = *self; - hcx.with_node_id_hashing_mode(node_id_hashing_mode, |hcx| { - id.hash_stable(hcx, hasher); - }); node.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); }) @@ -271,7 +274,8 @@ impl_stable_hash_for!(struct hir::BareFnTy { unsafety, abi, lifetimes, - decl + decl, + arg_names }); impl_stable_hash_for!(enum hir::Ty_ { @@ -302,19 +306,17 @@ impl_stable_hash_for!(enum hir::FunctionRetTy { Return(t) }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::TraitRef { +impl<'gcx> HashStable> for hir::TraitRef { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::TraitRef { ref path, - ref_id, + // Don't hash the ref_id. It is tracked via the thing it is used to access + ref_id: _, } = *self; path.hash_stable(hcx, hasher); - hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashTraitsInScope, |hcx| { - ref_id.hash_stable(hcx, hasher); - }); } } @@ -341,14 +343,14 @@ impl_stable_hash_for!(struct hir::MacroDef { }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::Block { +impl<'gcx> HashStable> for hir::Block { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::Block { ref stmts, ref expr, - id, + id: _, hir_id: _, rules, span, @@ -383,45 +385,24 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::B } expr.hash_stable(hcx, hasher); - id.hash_stable(hcx, hasher); rules.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); targeted_by_break.hash_stable(hcx, hasher); } } -impl<'a, 'gcx, 'tcx> HashStable> for hir::Pat { +impl<'gcx> HashStable> for hir::Pat { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - let node_id_hashing_mode = match self.node { - hir::PatKind::Wild | - hir::PatKind::Binding(..) | - hir::PatKind::Tuple(..) | - hir::PatKind::Box(..) | - hir::PatKind::Ref(..) | - hir::PatKind::Lit(..) | - hir::PatKind::Range(..) | - hir::PatKind::Slice(..) => { - NodeIdHashingMode::Ignore - } - hir::PatKind::Path(..) | - hir::PatKind::Struct(..) | - hir::PatKind::TupleStruct(..) => { - NodeIdHashingMode::HashTraitsInScope - } - }; - let hir::Pat { - id, + id: _, hir_id: _, ref node, ref span } = *self; - hcx.with_node_id_hashing_mode(node_id_hashing_mode, |hcx| { - id.hash_stable(hcx, hasher); - }); + node.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); } @@ -537,20 +518,20 @@ impl_stable_hash_for!(enum hir::UnsafeSource { UserProvided }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::Expr { +impl<'gcx> HashStable> for hir::Expr { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { hcx.while_hashing_hir_bodies(true, |hcx| { let hir::Expr { - id, + id: _, hir_id: _, ref span, ref node, ref attrs } = *self; - let (spans_always_on, node_id_hashing_mode) = match *node { + let spans_always_on = match *node { hir::ExprBox(..) | hir::ExprArray(..) | hir::ExprCall(..) | @@ -569,40 +550,33 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::E hir::ExprBreak(..) | hir::ExprAgain(..) | hir::ExprRet(..) | + hir::ExprYield(..) | hir::ExprInlineAsm(..) | hir::ExprRepeat(..) | - hir::ExprTup(..) => { + hir::ExprTup(..) | + hir::ExprMethodCall(..) | + hir::ExprPath(..) | + hir::ExprStruct(..) | + hir::ExprField(..) => { // For these we only hash the span when debuginfo is on. - (false, NodeIdHashingMode::Ignore) + false } // For the following, spans might be significant because of // panic messages indicating the source location. hir::ExprBinary(op, ..) => { - (hcx.binop_can_panic_at_runtime(op.node), NodeIdHashingMode::Ignore) + hcx.binop_can_panic_at_runtime(op.node) } hir::ExprUnary(op, _) => { - (hcx.unop_can_panic_at_runtime(op), NodeIdHashingMode::Ignore) + hcx.unop_can_panic_at_runtime(op) } hir::ExprAssignOp(op, ..) => { - (hcx.binop_can_panic_at_runtime(op.node), NodeIdHashingMode::Ignore) + hcx.binop_can_panic_at_runtime(op.node) } hir::ExprIndex(..) => { - (true, NodeIdHashingMode::Ignore) - } - // For these we don't care about the span, but want to hash the - // trait in scope - hir::ExprMethodCall(..) | - hir::ExprPath(..) | - hir::ExprStruct(..) | - hir::ExprField(..) => { - (false, NodeIdHashingMode::HashTraitsInScope) + true } }; - hcx.with_node_id_hashing_mode(node_id_hashing_mode, |hcx| { - id.hash_stable(hcx, hasher); - }); - if spans_always_on { hcx.while_hashing_spans(true, |hcx| { span.hash_stable(hcx, hasher); @@ -633,7 +607,7 @@ impl_stable_hash_for!(enum hir::Expr_ { ExprWhile(cond, body, label), ExprLoop(body, label, loop_src), ExprMatch(matchee, arms, match_src), - ExprClosure(capture_clause, decl, body_id, span), + ExprClosure(capture_clause, decl, body_id, span, gen), ExprBlock(blk), ExprAssign(lhs, rhs), ExprAssignOp(op, lhs, rhs), @@ -647,7 +621,8 @@ impl_stable_hash_for!(enum hir::Expr_ { ExprRet(val), ExprInlineAsm(asm, inputs, outputs), ExprStruct(path, fields, base), - ExprRepeat(val, times) + ExprRepeat(val, times), + ExprYield(val) }); impl_stable_hash_for!(enum hir::LocalSource { @@ -661,9 +636,9 @@ impl_stable_hash_for!(enum hir::LoopSource { ForLoop }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::MatchSource { +impl<'gcx> HashStable> for hir::MatchSource { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { use hir::MatchSource; @@ -712,9 +687,9 @@ impl_stable_hash_for!(enum hir::ScopeTarget { Loop(loop_id_result) }); -impl<'a, 'gcx, 'tcx> HashStable> for ast::Ident { +impl<'gcx> HashStable> for ast::Ident { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let ast::Ident { ref name, @@ -725,12 +700,12 @@ impl<'a, 'gcx, 'tcx> HashStable> for ast::I } } -impl<'a, 'gcx, 'tcx> HashStable> for hir::TraitItem { +impl<'gcx> HashStable> for hir::TraitItem { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::TraitItem { - id, + id: _, hir_id: _, name, ref attrs, @@ -739,7 +714,6 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::T } = *self; hcx.hash_hir_item_like(attrs, |hcx| { - id.hash_stable(hcx, hasher); name.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); @@ -759,12 +733,12 @@ impl_stable_hash_for!(enum hir::TraitItemKind { Type(bounds, rhs) }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::ImplItem { +impl<'gcx> HashStable> for hir::ImplItem { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::ImplItem { - id, + id: _, hir_id: _, name, ref vis, @@ -775,7 +749,6 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::I } = *self; hcx.hash_hir_item_like(attrs, |hcx| { - id.hash_stable(hcx, hasher); name.hash_stable(hcx, hasher); vis.hash_stable(hcx, hasher); defaultness.hash_stable(hcx, hasher); @@ -792,9 +765,9 @@ impl_stable_hash_for!(enum hir::ImplItemKind { Type(t) }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::Visibility { +impl<'gcx> HashStable> for hir::Visibility { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -804,7 +777,7 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::V // No fields to hash. } hir::Visibility::Restricted { ref path, id } => { - hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashTraitsInScope, |hcx| { + hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { id.hash_stable(hcx, hasher); }); path.hash_stable(hcx, hasher); @@ -813,9 +786,9 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::V } } -impl<'a, 'gcx, 'tcx> HashStable> for hir::Defaultness { +impl<'gcx> HashStable> for hir::Defaultness { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -834,9 +807,9 @@ impl_stable_hash_for!(enum hir::ImplPolarity { Negative }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::Mod { +impl<'gcx> HashStable> for hir::Mod { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::Mod { inner, @@ -889,20 +862,17 @@ impl_stable_hash_for!(enum hir::VariantData { Unit(id) }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::Item { +impl<'gcx> HashStable> for hir::Item { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - let (node_id_hashing_mode, hash_spans) = match self.node { + let hash_spans = match self.node { hir::ItemStatic(..) | hir::ItemConst(..) | hir::ItemFn(..) => { - (NodeIdHashingMode::Ignore, hcx.hash_spans()) - } - hir::ItemUse(..) => { - (NodeIdHashingMode::HashTraitsInScope, false) + hcx.hash_spans() } - + hir::ItemUse(..) | hir::ItemExternCrate(..) | hir::ItemForeignMod(..) | hir::ItemGlobalAsm(..) | @@ -914,14 +884,14 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::I hir::ItemEnum(..) | hir::ItemStruct(..) | hir::ItemUnion(..) => { - (NodeIdHashingMode::Ignore, false) + false } }; let hir::Item { name, ref attrs, - id, + id: _, hir_id: _, ref node, ref vis, @@ -930,9 +900,6 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::I hcx.hash_hir_item_like(attrs, |hcx| { hcx.while_hashing_spans(hash_spans, |hcx| { - hcx.with_node_id_hashing_mode(node_id_hashing_mode, |hcx| { - id.hash_stable(hcx, hasher); - }); name.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); @@ -978,10 +945,10 @@ impl_stable_hash_for!(struct hir::ImplItemRef { defaultness }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for hir::AssociatedItemKind { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -1022,18 +989,33 @@ impl_stable_hash_for!(struct hir::Arg { hir_id }); -impl_stable_hash_for!(struct hir::Body { - arguments, - value -}); - -impl<'a, 'gcx, 'tcx> HashStable> for hir::BodyId { +impl<'gcx> HashStable> for hir::Body { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - if hcx.hash_bodies() { - hcx.tcx().hir.body(*self).hash_stable(hcx, hasher); - } + let hir::Body { + ref arguments, + ref value, + is_generator, + } = *self; + + hcx.with_node_id_hashing_mode(NodeIdHashingMode::Ignore, |hcx| { + arguments.hash_stable(hcx, hasher); + value.hash_stable(hcx, hasher); + is_generator.hash_stable(hcx, hasher); + }); + } +} + +impl<'gcx> ToStableHashKey> for hir::BodyId { + type KeyType = (DefPathHash, hir::ItemLocalId); + + #[inline] + fn to_stable_hash_key(&self, + hcx: &StableHashingContext<'gcx>) + -> (DefPathHash, hir::ItemLocalId) { + let hir::BodyId { node_id } = *self; + node_id.to_stable_hash_key(hcx) } } @@ -1043,9 +1025,9 @@ impl_stable_hash_for!(struct hir::InlineAsmOutput { is_indirect }); -impl<'a, 'gcx, 'tcx> HashStable> for hir::GlobalAsm { +impl<'gcx> HashStable> for hir::GlobalAsm { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::GlobalAsm { asm, @@ -1056,9 +1038,9 @@ impl<'a, 'gcx, 'tcx> HashStable> for hir::G } } -impl<'a, 'gcx, 'tcx> HashStable> for hir::InlineAsm { +impl<'gcx> HashStable> for hir::InlineAsm { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let hir::InlineAsm { asm, @@ -1133,13 +1115,23 @@ impl_stable_hash_for!(enum hir::Constness { NotConst }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for hir::def_id::DefIndex { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - DefId::local(*self).hash_stable(hcx, hasher); + hcx.local_def_path_hash(*self).hash_stable(hcx, hasher); + } +} + +impl<'gcx> ToStableHashKey> +for hir::def_id::DefIndex { + type KeyType = DefPathHash; + + #[inline] + fn to_stable_hash_key(&self, hcx: &StableHashingContext<'gcx>) -> DefPathHash { + hcx.local_def_path_hash(*self) } } @@ -1149,11 +1141,57 @@ impl_stable_hash_for!(struct hir::def::Export { span }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ::middle::lang_items::LangItem { fn hash_stable(&self, - _: &mut StableHashingContext<'a, 'gcx, 'tcx>, + _: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { ::std::hash::Hash::hash(self, hasher); } } + +impl_stable_hash_for!(struct ::middle::lang_items::LanguageItems { + items, + missing +}); + +impl<'gcx> HashStable> +for hir::TraitCandidate { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { + let hir::TraitCandidate { + def_id, + import_id, + } = *self; + + def_id.hash_stable(hcx, hasher); + import_id.hash_stable(hcx, hasher); + }); + } +} + +impl<'gcx> ToStableHashKey> for hir::TraitCandidate { + type KeyType = (DefPathHash, Option<(DefPathHash, hir::ItemLocalId)>); + + fn to_stable_hash_key(&self, + hcx: &StableHashingContext<'gcx>) + -> Self::KeyType { + let hir::TraitCandidate { + def_id, + import_id, + } = *self; + + let import_id = import_id.map(|node_id| hcx.node_to_hir_id(node_id)) + .map(|hir_id| (hcx.local_def_path_hash(hir_id.owner), + hir_id.local_id)); + (hcx.def_path_hash(def_id), import_id) + } +} + + +impl_stable_hash_for!(struct hir::Freevar { + def, + span +}); diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index faf579186e..6583f24421 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -17,7 +17,7 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; use std::mem; - +impl_stable_hash_for!(struct mir::GeneratorLayout<'tcx> { fields }); impl_stable_hash_for!(struct mir::SourceInfo { span, scope }); impl_stable_hash_for!(enum mir::Mutability { Mut, Not }); impl_stable_hash_for!(enum mir::BorrowKind { Shared, Unique, Mut }); @@ -27,16 +27,20 @@ impl_stable_hash_for!(struct mir::LocalDecl<'tcx> { ty, name, source_info, + internal, + lexical_scope, is_user_variable }); impl_stable_hash_for!(struct mir::UpvarDecl { debug_name, by_ref }); impl_stable_hash_for!(struct mir::BasicBlockData<'tcx> { statements, terminator, is_cleanup }); +impl_stable_hash_for!(struct mir::UnsafetyViolation { source_info, description, lint_node_id }); +impl_stable_hash_for!(struct mir::UnsafetyCheckResult { violations, unsafe_blocks }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for mir::Terminator<'gcx> { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let mir::Terminator { ref kind, @@ -54,9 +58,11 @@ for mir::Terminator<'gcx> { mir::TerminatorKind::SwitchInt { .. } | mir::TerminatorKind::Resume | mir::TerminatorKind::Return | + mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Unreachable | mir::TerminatorKind::Drop { .. } | mir::TerminatorKind::DropAndReplace { .. } | + mir::TerminatorKind::Yield { .. } | mir::TerminatorKind::Call { .. } => false, }; @@ -72,62 +78,78 @@ for mir::Terminator<'gcx> { } } +impl<'gcx, T> HashStable> for mir::ClearOnDecode + where T: HashStable> +{ + #[inline] + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + mir::ClearOnDecode::Clear => {} + mir::ClearOnDecode::Set(ref value) => { + value.hash_stable(hcx, hasher); + } + } + } +} -impl<'a, 'gcx, 'tcx> HashStable> for mir::Local { +impl<'gcx> HashStable> for mir::Local { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'a, 'gcx, 'tcx> HashStable> for mir::BasicBlock { +impl<'gcx> HashStable> for mir::BasicBlock { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'a, 'gcx, 'tcx> HashStable> for mir::Field { +impl<'gcx> HashStable> for mir::Field { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for mir::VisibilityScope { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'a, 'gcx, 'tcx> HashStable> for mir::Promoted { +impl<'gcx> HashStable> for mir::Promoted { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for mir::TerminatorKind<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -146,6 +168,7 @@ for mir::TerminatorKind<'gcx> { } mir::TerminatorKind::Resume | mir::TerminatorKind::Return | + mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Unreachable => {} mir::TerminatorKind::Drop { ref location, target, unwind } => { location.hash_stable(hcx, hasher); @@ -161,6 +184,13 @@ for mir::TerminatorKind<'gcx> { target.hash_stable(hcx, hasher); unwind.hash_stable(hcx, hasher); } + mir::TerminatorKind::Yield { ref value, + resume, + drop } => { + value.hash_stable(hcx, hasher); + resume.hash_stable(hcx, hasher); + drop.hash_stable(hcx, hasher); + } mir::TerminatorKind::Call { ref func, ref args, ref destination, @@ -185,10 +215,10 @@ for mir::TerminatorKind<'gcx> { } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for mir::AssertMessage<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -200,16 +230,18 @@ for mir::AssertMessage<'gcx> { mir::AssertMessage::Math(ref const_math_err) => { const_math_err.hash_stable(hcx, hasher); } + mir::AssertMessage::GeneratorResumedAfterReturn => (), + mir::AssertMessage::GeneratorResumedAfterPanic => (), } } } impl_stable_hash_for!(struct mir::Statement<'tcx> { source_info, kind }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for mir::StatementKind<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -226,8 +258,8 @@ for mir::StatementKind<'gcx> { mir::StatementKind::StorageDead(ref lvalue) => { lvalue.hash_stable(hcx, hasher); } - mir::StatementKind::EndRegion(ref extent) => { - extent.hash_stable(hcx, hasher); + mir::StatementKind::EndRegion(ref region_scope) => { + region_scope.hash_stable(hcx, hasher); } mir::StatementKind::Validate(ref op, ref lvalues) => { op.hash_stable(hcx, hasher); @@ -243,12 +275,12 @@ for mir::StatementKind<'gcx> { } } -impl<'a, 'gcx, 'tcx, T> HashStable> +impl<'gcx, T> HashStable> for mir::ValidationOperand<'gcx, T> - where T: HashStable> + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { self.lval.hash_stable(hcx, hasher); @@ -258,11 +290,11 @@ impl<'a, 'gcx, 'tcx, T> HashStable> } } -impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(extent) }); +impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(region_scope) }); -impl<'a, 'gcx, 'tcx> HashStable> for mir::Lvalue<'gcx> { +impl<'gcx> HashStable> for mir::Lvalue<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -279,14 +311,14 @@ impl<'a, 'gcx, 'tcx> HashStable> for mir::L } } -impl<'a, 'gcx, 'tcx, B, V, T> HashStable> +impl<'gcx, B, V, T> HashStable> for mir::Projection<'gcx, B, V, T> - where B: HashStable>, - V: HashStable>, - T: HashStable> + where B: HashStable>, + V: HashStable>, + T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let mir::Projection { ref base, @@ -298,13 +330,13 @@ for mir::Projection<'gcx, B, V, T> } } -impl<'a, 'gcx, 'tcx, V, T> HashStable> +impl<'gcx, V, T> HashStable> for mir::ProjectionElem<'gcx, V, T> - where V: HashStable>, - T: HashStable> + where V: HashStable>, + T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -334,10 +366,30 @@ for mir::ProjectionElem<'gcx, V, T> } impl_stable_hash_for!(struct mir::VisibilityScopeData { span, parent_scope }); +impl_stable_hash_for!(struct mir::VisibilityScopeInfo { + lint_root, safety +}); -impl<'a, 'gcx, 'tcx> HashStable> for mir::Operand<'gcx> { +impl<'gcx> HashStable> for mir::Safety { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + mir::Safety::Safe | + mir::Safety::BuiltinUnsafe | + mir::Safety::FnUnsafe => {} + mir::Safety::ExplicitUnsafe(node_id) => { + node_id.hash_stable(hcx, hasher); + } + } + } +} + +impl<'gcx> HashStable> for mir::Operand<'gcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -352,9 +404,9 @@ impl<'a, 'gcx, 'tcx> HashStable> for mir::O } } -impl<'a, 'gcx, 'tcx> HashStable> for mir::Rvalue<'gcx> { +impl<'gcx> HashStable> for mir::Rvalue<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -412,10 +464,10 @@ impl_stable_hash_for!(enum mir::CastKind { Unsize }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for mir::AggregateKind<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -433,6 +485,11 @@ for mir::AggregateKind<'gcx> { def_id.hash_stable(hcx, hasher); substs.hash_stable(hcx, hasher); } + mir::AggregateKind::Generator(def_id, ref substs, ref interior) => { + def_id.hash_stable(hcx, hasher); + substs.hash_stable(hcx, hasher); + interior.hash_stable(hcx, hasher); + } } } } @@ -469,16 +526,12 @@ impl_stable_hash_for!(enum mir::NullOp { impl_stable_hash_for!(struct mir::Constant<'tcx> { span, ty, literal }); -impl<'a, 'gcx, 'tcx> HashStable> for mir::Literal<'gcx> { +impl<'gcx> HashStable> for mir::Literal<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { - mir::Literal::Item { def_id, substs } => { - def_id.hash_stable(hcx, hasher); - substs.hash_stable(hcx, hasher); - } mir::Literal::Value { ref value } => { value.hash_stable(hcx, hasher); } diff --git a/src/librustc/ich/impls_misc.rs b/src/librustc/ich/impls_misc.rs new file mode 100644 index 0000000000..951315fb4a --- /dev/null +++ b/src/librustc/ich/impls_misc.rs @@ -0,0 +1,26 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module contains `HashStable` implementations for various data types +//! that don't fit into any of the other impls_xxx modules. + +impl_stable_hash_for!(enum ::session::search_paths::PathKind { + Native, + Crate, + Dependency, + Framework, + ExternFlag, + All +}); + +impl_stable_hash_for!(enum ::rustc_back::PanicStrategy { + Abort, + Unwind +}); diff --git a/src/librustc/ich/impls_syntax.rs b/src/librustc/ich/impls_syntax.rs index b827284271..799e790b85 100644 --- a/src/librustc/ich/impls_syntax.rs +++ b/src/librustc/ich/impls_syntax.rs @@ -18,35 +18,57 @@ use std::mem; use syntax::ast; use syntax::parse::token; +use syntax::symbol::InternedString; use syntax::tokenstream; -use syntax_pos::{Span, FileMap}; +use syntax_pos::FileMap; use hir::def_id::{DefId, CrateNum, CRATE_DEF_INDEX}; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher, - StableHasherResult}; +use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, + StableHasher, StableHasherResult}; use rustc_data_structures::accumulate_vec::AccumulateVec; -impl<'a, 'gcx, 'tcx> HashStable> -for ::syntax::symbol::InternedString { +impl<'gcx> HashStable> for InternedString { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let s: &str = &**self; s.hash_stable(hcx, hasher); } } -impl<'a, 'gcx, 'tcx> HashStable> for ast::Name { +impl<'gcx> ToStableHashKey> for InternedString { + type KeyType = InternedString; + + #[inline] + fn to_stable_hash_key(&self, + _: &StableHashingContext<'gcx>) + -> InternedString { + self.clone() + } +} + +impl<'gcx> HashStable> for ast::Name { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { self.as_str().hash_stable(hcx, hasher); } } +impl<'gcx> ToStableHashKey> for ast::Name { + type KeyType = InternedString; + + #[inline] + fn to_stable_hash_key(&self, + _: &StableHashingContext<'gcx>) + -> InternedString { + self.as_str() + } +} + impl_stable_hash_for!(enum ::syntax::ast::AsmDialect { Att, Intel @@ -81,12 +103,17 @@ impl_stable_hash_for!(enum ::syntax::abi::Abi { }); impl_stable_hash_for!(struct ::syntax::attr::Deprecation { since, note }); -impl_stable_hash_for!(struct ::syntax::attr::Stability { level, feature, rustc_depr }); +impl_stable_hash_for!(struct ::syntax::attr::Stability { + level, + feature, + rustc_depr, + rustc_const_unstable +}); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ::syntax::attr::StabilityLevel { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -102,6 +129,7 @@ for ::syntax::attr::StabilityLevel { } impl_stable_hash_for!(struct ::syntax::attr::RustcDeprecation { since, reason }); +impl_stable_hash_for!(struct ::syntax::attr::RustcConstUnstable { feature }); impl_stable_hash_for!(enum ::syntax::attr::IntType { @@ -137,10 +165,15 @@ impl_stable_hash_for!(struct ::syntax::ast::Lifetime { id, span, ident }); impl_stable_hash_for!(enum ::syntax::ast::StrStyle { Cooked, Raw(pounds) }); impl_stable_hash_for!(enum ::syntax::ast::AttrStyle { Outer, Inner }); -impl<'a, 'gcx, 'tcx> HashStable> for [ast::Attribute] { +impl<'gcx> HashStable> for [ast::Attribute] { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { + if self.len() == 0 { + self.len().hash_stable(hcx, hasher); + return + } + // Some attributes are always ignored during hashing. let filtered: AccumulateVec<[&ast::Attribute; 8]> = self .iter() @@ -157,9 +190,9 @@ impl<'a, 'gcx, 'tcx> HashStable> for [ast:: } } -impl<'a, 'gcx, 'tcx> HashStable> for ast::Attribute { +impl<'gcx> HashStable> for ast::Attribute { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { // Make sure that these have been filtered out. debug_assert!(self.name().map(|name| !hcx.is_ignored_attr(name)).unwrap_or(true)); @@ -186,16 +219,16 @@ impl<'a, 'gcx, 'tcx> HashStable> for ast::A } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for tokenstream::TokenTree { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { tokenstream::TokenTree::Token(span, ref token) => { span.hash_stable(hcx, hasher); - hash_token(token, hcx, hasher, span); + hash_token(token, hcx, hasher); } tokenstream::TokenTree::Delimited(span, ref delimited) => { span.hash_stable(hcx, hasher); @@ -208,10 +241,10 @@ for tokenstream::TokenTree { } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for tokenstream::TokenStream { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { for sub_tt in self.trees() { sub_tt.hash_stable(hcx, hasher); @@ -219,10 +252,9 @@ for tokenstream::TokenStream { } } -fn hash_token<'a, 'gcx, 'tcx, W: StableHasherResult>(token: &token::Token, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, - hasher: &mut StableHasher, - error_reporting_span: Span) { +fn hash_token<'gcx, W: StableHasherResult>(token: &token::Token, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { mem::discriminant(token).hash_stable(hcx, hasher); match *token { token::Token::Eq | @@ -240,6 +272,8 @@ fn hash_token<'a, 'gcx, 'tcx, W: StableHasherResult>(token: &token::Token, token::Token::Dot | token::Token::DotDot | token::Token::DotDotDot | + token::Token::DotDotEq | + token::Token::DotEq | token::Token::Comma | token::Token::Semi | token::Token::Colon | @@ -285,20 +319,8 @@ fn hash_token<'a, 'gcx, 'tcx, W: StableHasherResult>(token: &token::Token, token::Token::Ident(ident) | token::Token::Lifetime(ident) => ident.name.hash_stable(hcx, hasher), - token::Token::Interpolated(ref non_terminal) => { - // FIXME(mw): This could be implemented properly. It's just a - // lot of work, since we would need to hash the AST - // in a stable way, in addition to the HIR. - // Since this is hardly used anywhere, just emit a - // warning for now. - if hcx.tcx().sess.opts.debugging_opts.incremental.is_some() { - let msg = format!("Quasi-quoting might make incremental \ - compilation very inefficient: {:?}", - non_terminal); - hcx.tcx().sess.span_warn(error_reporting_span, &msg[..]); - } - - std_hash::Hash::hash(non_terminal, hasher); + token::Token::Interpolated(_) => { + bug!("interpolated tokens should not be present in the HIR") } token::Token::DocComment(val) | @@ -325,13 +347,14 @@ impl_stable_hash_for!(enum ::syntax::ast::MetaItemKind { NameValue(lit) }); -impl<'a, 'gcx, 'tcx> HashStable> for FileMap { +impl<'gcx> HashStable> for FileMap { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let FileMap { ref name, name_was_remapped, + unmapped_path: _, crate_of_origin, // Do not hash the source as it is not encoded src: _, diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index 6a22147fed..582c4e13a8 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -11,38 +11,39 @@ //! This module contains `HashStable` implementations for various data types //! from rustc::ty in no particular order. -use ich::StableHashingContext; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher, - StableHasherResult}; +use ich::{StableHashingContext, NodeIdHashingMode}; +use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, + StableHasher, StableHasherResult}; use std::hash as std_hash; use std::mem; -use syntax_pos::symbol::InternedString; +use middle::region; +use traits; use ty; -impl<'a, 'gcx, 'tcx, T> HashStable> +impl<'gcx, T> HashStable> for &'gcx ty::Slice - where T: HashStable> { + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { (&self[..]).hash_stable(hcx, hasher); } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ty::subst::Kind<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { self.as_type().hash_stable(hcx, hasher); self.as_region().hash_stable(hcx, hasher); } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ty::RegionKind { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -60,13 +61,16 @@ for ty::RegionKind { def_id.hash_stable(hcx, hasher); name.hash_stable(hcx, hasher); } + ty::ReLateBound(db, ty::BrEnv) => { + db.depth.hash_stable(hcx, hasher); + } ty::ReEarlyBound(ty::EarlyBoundRegion { def_id, index, name }) => { def_id.hash_stable(hcx, hasher); index.hash_stable(hcx, hasher); name.hash_stable(hcx, hasher); } - ty::ReScope(code_extent) => { - code_extent.hash_stable(hcx, hasher); + ty::ReScope(scope) => { + scope.hash_stable(hcx, hasher); } ty::ReFree(ref free_region) => { free_region.hash_stable(hcx, hasher); @@ -80,10 +84,10 @@ for ty::RegionKind { } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ty::adjustment::AutoBorrow<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -98,10 +102,10 @@ for ty::adjustment::AutoBorrow<'gcx> { } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ty::adjustment::Adjust<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -123,19 +127,20 @@ for ty::adjustment::Adjust<'gcx> { impl_stable_hash_for!(struct ty::adjustment::Adjustment<'tcx> { kind, target }); impl_stable_hash_for!(struct ty::adjustment::OverloadedDeref<'tcx> { region, mutbl }); -impl_stable_hash_for!(struct ty::UpvarId { var_id, closure_expr_id }); impl_stable_hash_for!(struct ty::UpvarBorrow<'tcx> { kind, region }); +impl_stable_hash_for!(struct ty::UpvarId { var_id, closure_expr_id }); + impl_stable_hash_for!(enum ty::BorrowKind { ImmBorrow, UniqueImmBorrow, MutBorrow }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ty::UpvarCapture<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -147,6 +152,11 @@ for ty::UpvarCapture<'gcx> { } } +impl_stable_hash_for!(struct ty::GenSig<'tcx> { + yield_ty, + return_ty +}); + impl_stable_hash_for!(struct ty::FnSig<'tcx> { inputs_and_output, variadic, @@ -154,11 +164,11 @@ impl_stable_hash_for!(struct ty::FnSig<'tcx> { abi }); -impl<'a, 'gcx, 'tcx, T> HashStable> for ty::Binder - where T: HashStable> +impl<'gcx, T> HashStable> for ty::Binder + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let ty::Binder(ref inner) = *self; inner.hash_stable(hcx, hasher); @@ -178,13 +188,13 @@ impl_stable_hash_for!(struct ty::TraitPredicate<'tcx> { trait_ref }); impl_stable_hash_for!(tuple_struct ty::EquatePredicate<'tcx> { t1, t2 }); impl_stable_hash_for!(struct ty::SubtypePredicate<'tcx> { a_is_expected, a, b }); -impl<'a, 'gcx, 'tcx, A, B> HashStable> +impl<'gcx, A, B> HashStable> for ty::OutlivesPredicate - where A: HashStable>, - B: HashStable>, + where A: HashStable>, + B: HashStable>, { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let ty::OutlivesPredicate(ref a, ref b) = *self; a.hash_stable(hcx, hasher); @@ -196,9 +206,9 @@ impl_stable_hash_for!(struct ty::ProjectionPredicate<'tcx> { projection_ty, ty } impl_stable_hash_for!(struct ty::ProjectionTy<'tcx> { substs, item_def_id }); -impl<'a, 'gcx, 'tcx> HashStable> for ty::Predicate<'gcx> { +impl<'gcx> HashStable> for ty::Predicate<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -230,13 +240,17 @@ impl<'a, 'gcx, 'tcx> HashStable> for ty::Pr def_id.hash_stable(hcx, hasher); closure_kind.hash_stable(hcx, hasher); } + ty::Predicate::ConstEvaluatable(def_id, substs) => { + def_id.hash_stable(hcx, hasher); + substs.hash_stable(hcx, hasher); + } } } } -impl<'a, 'gcx, 'tcx> HashStable> for ty::AdtFlags { +impl<'gcx> HashStable> for ty::AdtFlags { fn hash_stable(&self, - _: &mut StableHashingContext<'a, 'gcx, 'tcx>, + _: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { std_hash::Hash::hash(self, hasher); } @@ -261,66 +275,128 @@ impl_stable_hash_for!(struct ty::FieldDef { vis }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ::middle::const_val::ConstVal<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - use middle::const_val::ConstVal; + use middle::const_val::ConstVal::*; + use middle::const_val::ConstAggregate::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { - ConstVal::Float(ref value) => { + Integral(ref value) => { value.hash_stable(hcx, hasher); } - ConstVal::Integral(ref value) => { + Float(ref value) => { value.hash_stable(hcx, hasher); } - ConstVal::Str(ref value) => { + Str(ref value) => { value.hash_stable(hcx, hasher); } - ConstVal::ByteStr(ref value) => { + ByteStr(ref value) => { value.hash_stable(hcx, hasher); } - ConstVal::Bool(value) => { + Bool(value) => { value.hash_stable(hcx, hasher); } - ConstVal::Char(value) => { + Char(value) => { value.hash_stable(hcx, hasher); } - ConstVal::Variant(def_id) => { + Variant(def_id) => { def_id.hash_stable(hcx, hasher); } - ConstVal::Function(def_id, substs) => { + Function(def_id, substs) => { def_id.hash_stable(hcx, hasher); - substs.hash_stable(hcx, hasher); + hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { + substs.hash_stable(hcx, hasher); + }); } - ConstVal::Struct(ref name_value_map) => { - let mut values: Vec<(InternedString, &ConstVal)> = - name_value_map.iter() - .map(|(name, val)| (name.as_str(), val)) - .collect(); - + Aggregate(Struct(ref name_values)) => { + let mut values = name_values.to_vec(); values.sort_unstable_by_key(|&(ref name, _)| name.clone()); values.hash_stable(hcx, hasher); } - ConstVal::Tuple(ref value) => { + Aggregate(Tuple(ref value)) => { value.hash_stable(hcx, hasher); } - ConstVal::Array(ref value) => { + Aggregate(Array(ref value)) => { value.hash_stable(hcx, hasher); } - ConstVal::Repeat(ref value, times) => { + Aggregate(Repeat(ref value, times)) => { value.hash_stable(hcx, hasher); times.hash_stable(hcx, hasher); } + Unevaluated(def_id, substs) => { + def_id.hash_stable(hcx, hasher); + substs.hash_stable(hcx, hasher); + } + } + } +} + +impl_stable_hash_for!(struct ::middle::const_val::ByteArray<'tcx> { + data +}); + +impl_stable_hash_for!(struct ty::Const<'tcx> { + ty, + val +}); + +impl_stable_hash_for!(struct ::middle::const_val::ConstEvalErr<'tcx> { + span, + kind +}); + +impl<'gcx> HashStable> +for ::middle::const_val::ErrKind<'gcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + use middle::const_val::ErrKind::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + CannotCast | + MissingStructField | + NonConstPath | + ExpectedConstTuple | + ExpectedConstStruct | + IndexedNonVec | + IndexNotUsize | + MiscBinaryOp | + MiscCatchAll | + IndexOpFeatureGated | + TypeckError => { + // nothing to do + } + UnimplementedConstVal(s) => { + s.hash_stable(hcx, hasher); + } + IndexOutOfBounds { len, index } => { + len.hash_stable(hcx, hasher); + index.hash_stable(hcx, hasher); + } + Math(ref const_math_err) => { + const_math_err.hash_stable(hcx, hasher); + } + LayoutError(ref layout_error) => { + layout_error.hash_stable(hcx, hasher); + } + ErroneousReferencedConstant(ref const_val) => { + const_val.hash_stable(hcx, hasher); + } } } } impl_stable_hash_for!(struct ty::ClosureSubsts<'tcx> { substs }); +impl_stable_hash_for!(struct ty::GeneratorInterior<'tcx> { witness }); + impl_stable_hash_for!(struct ty::GenericPredicates<'tcx> { parent, predicates @@ -337,9 +413,9 @@ impl_stable_hash_for!(enum ty::adjustment::CustomCoerceUnsized { Struct(index) }); -impl<'a, 'gcx, 'tcx> HashStable> for ty::Generics { +impl<'gcx> HashStable> for ty::Generics { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let ty::Generics { parent, @@ -365,16 +441,15 @@ impl<'a, 'gcx, 'tcx> HashStable> for ty::Ge } } -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ty::RegionParameterDef { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let ty::RegionParameterDef { name, def_id, index, - issue_32330: _, pure_wrt_drop } = *self; @@ -391,16 +466,16 @@ impl_stable_hash_for!(struct ty::TypeParameterDef { index, has_default, object_lifetime_default, - pure_wrt_drop + pure_wrt_drop, + synthetic }); - -impl<'a, 'gcx, 'tcx, T> HashStable> +impl<'gcx, T> HashStable> for ::middle::resolve_lifetime::Set1 - where T: HashStable> + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { use middle::resolve_lifetime::Set1; @@ -443,28 +518,15 @@ impl_stable_hash_for!(enum ty::cast::CastKind { FnPtrAddrCast }); -impl<'a, 'gcx, 'tcx> HashStable> -for ::middle::region::CodeExtent -{ - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, - hasher: &mut StableHasher) { - use middle::region::CodeExtent; +impl_stable_hash_for!(struct ::middle::region::FirstStatementIndex { idx }); +impl_stable_hash_for!(struct ::middle::region::Scope { id, code }); - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - CodeExtent::Misc(node_id) | - CodeExtent::DestructionScope(node_id) => { - node_id.hash_stable(hcx, hasher); - } - CodeExtent::CallSiteScope(body_id) | - CodeExtent::ParameterScope(body_id) => { - body_id.hash_stable(hcx, hasher); - } - CodeExtent::Remainder(block_remainder) => { - block_remainder.hash_stable(hcx, hasher); - } - } +impl<'gcx> ToStableHashKey> for region::Scope { + type KeyType = region::Scope; + + #[inline] + fn to_stable_hash_key(&self, _: &StableHashingContext<'gcx>) -> region::Scope { + *self } } @@ -489,11 +551,11 @@ impl_stable_hash_for!(enum ty::BoundRegion { BrEnv }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ty::TypeVariants<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { use ty::TypeVariants::*; @@ -502,6 +564,7 @@ for ty::TypeVariants<'gcx> TyBool | TyChar | TyStr | + TyError | TyNever => { // Nothing more to hash. } @@ -547,6 +610,12 @@ for ty::TypeVariants<'gcx> def_id.hash_stable(hcx, hasher); closure_substs.hash_stable(hcx, hasher); } + TyGenerator(def_id, closure_substs, interior) + => { + def_id.hash_stable(hcx, hasher); + closure_substs.hash_stable(hcx, hasher); + interior.hash_stable(hcx, hasher); + } TyTuple(inner_tys, from_diverging_type_var) => { inner_tys.hash_stable(hcx, hasher); from_diverging_type_var.hash_stable(hcx, hasher); @@ -561,10 +630,8 @@ for ty::TypeVariants<'gcx> TyParam(param_ty) => { param_ty.hash_stable(hcx, hasher); } - - TyError | TyInfer(..) => { - bug!("ty::TypeVariants::hash_stable() - Unexpected variant.") + bug!("ty::TypeVariants::hash_stable() - Unexpected variant {:?}.", *self) } } } @@ -580,11 +647,11 @@ impl_stable_hash_for!(struct ty::TypeAndMut<'tcx> { mutbl }); -impl<'a, 'gcx, 'tcx> HashStable> +impl<'gcx> HashStable> for ty::ExistentialPredicate<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -612,33 +679,14 @@ impl_stable_hash_for!(struct ty::ExistentialProjection<'tcx> { ty }); -impl_stable_hash_for!(enum ty::fast_reject::SimplifiedType { - BoolSimplifiedType, - CharSimplifiedType, - IntSimplifiedType(int_ty), - UintSimplifiedType(int_ty), - FloatSimplifiedType(float_ty), - AdtSimplifiedType(def_id), - StrSimplifiedType, - ArraySimplifiedType, - PtrSimplifiedType, - NeverSimplifiedType, - TupleSimplifiedType(size), - TraitSimplifiedType(def_id), - ClosureSimplifiedType(def_id), - AnonSimplifiedType(def_id), - FunctionSimplifiedType(params), - ParameterSimplifiedType -}); - impl_stable_hash_for!(struct ty::Instance<'tcx> { def, substs }); -impl<'a, 'gcx, 'tcx> HashStable> for ty::InstanceDef<'gcx> { +impl<'gcx> HashStable> for ty::InstanceDef<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -672,3 +720,127 @@ impl<'a, 'gcx, 'tcx> HashStable> for ty::In } } +impl<'gcx> HashStable> for ty::TraitDef { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + let ty::TraitDef { + // We already have the def_path_hash below, no need to hash it twice + def_id: _, + unsafety, + paren_sugar, + has_default_impl, + def_path_hash, + } = *self; + + unsafety.hash_stable(hcx, hasher); + paren_sugar.hash_stable(hcx, hasher); + has_default_impl.hash_stable(hcx, hasher); + def_path_hash.hash_stable(hcx, hasher); + } +} + +impl_stable_hash_for!(struct ty::Destructor { + did +}); + +impl_stable_hash_for!(struct ty::DtorckConstraint<'tcx> { + outlives, + dtorck_types +}); + + +impl<'gcx> HashStable> for ty::CrateVariancesMap { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + let ty::CrateVariancesMap { + ref dependencies, + ref variances, + // This is just an irrelevant helper value. + empty_variance: _, + } = *self; + + dependencies.hash_stable(hcx, hasher); + variances.hash_stable(hcx, hasher); + } +} + +impl_stable_hash_for!(struct ty::AssociatedItem { + def_id, + name, + kind, + vis, + defaultness, + container, + method_has_self_argument +}); + +impl_stable_hash_for!(enum ty::AssociatedKind { + Const, + Method, + Type +}); + +impl_stable_hash_for!(enum ty::AssociatedItemContainer { + TraitContainer(def_id), + ImplContainer(def_id) +}); + + +impl<'gcx, T> HashStable> +for ty::steal::Steal + where T: HashStable> +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + self.borrow().hash_stable(hcx, hasher); + } +} + +impl_stable_hash_for!(struct ty::ParamEnv<'tcx> { + caller_bounds, + reveal +}); + +impl_stable_hash_for!(enum traits::Reveal { + UserFacing, + All +}); + +impl_stable_hash_for!(enum ::middle::privacy::AccessLevel { + Reachable, + Exported, + Public +}); + +impl<'gcx> HashStable> +for ::middle::privacy::AccessLevels { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { + let ::middle::privacy::AccessLevels { + ref map + } = *self; + + map.hash_stable(hcx, hasher); + }); + } +} + +impl_stable_hash_for!(struct ty::CrateInherentImpls { + inherent_impls +}); + +impl_stable_hash_for!(enum ::session::CompileIncomplete { + Stopped, + Errored(error_reported) +}); + +impl_stable_hash_for!(struct ::util::common::ErrorReported {}); + +impl_stable_hash_for!(tuple_struct ::middle::reachable::ReachableSet { + reachable_set +}); diff --git a/src/librustc/ich/mod.rs b/src/librustc/ich/mod.rs index dcf84be0ee..cd0749a686 100644 --- a/src/librustc/ich/mod.rs +++ b/src/librustc/ich/mod.rs @@ -12,9 +12,8 @@ pub use self::fingerprint::Fingerprint; pub use self::caching_codemap_view::CachingCodemapView; -pub use self::hcx::{StableHashingContext, NodeIdHashingMode, hash_stable_hashmap, - hash_stable_hashset, hash_stable_nodemap, - hash_stable_btreemap, hash_stable_itemlocalmap}; +pub use self::hcx::{StableHashingContext, NodeIdHashingMode, + hash_stable_trait_impls}; mod fingerprint; mod caching_codemap_view; mod hcx; @@ -23,6 +22,7 @@ mod impls_const_math; mod impls_cstore; mod impls_hir; mod impls_mir; +mod impls_misc; mod impls_ty; mod impls_syntax; diff --git a/src/librustc/infer/error_reporting/anon_anon_conflict.rs b/src/librustc/infer/error_reporting/anon_anon_conflict.rs deleted file mode 100644 index c80ce3c96f..0000000000 --- a/src/librustc/infer/error_reporting/anon_anon_conflict.rs +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Error Reporting for Anonymous Region Lifetime Errors -//! where both the regions are anonymous. -use hir; -use infer::InferCtxt; -use ty::{self, Region}; -use infer::region_inference::RegionResolutionError::*; -use infer::region_inference::RegionResolutionError; -use hir::map as hir_map; -use middle::resolve_lifetime as rl; -use hir::intravisit::{self, Visitor, NestedVisitorMap}; - -impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { - // This method prints the error message for lifetime errors when both the concerned regions - // are anonymous. - // Consider a case where we have - // fn foo(x: &mut Vec<&u8>, y: &u8) - // { x.push(y); }. - // The example gives - // fn foo(x: &mut Vec<&u8>, y: &u8) { - // --- --- these references are declared with different lifetimes... - // x.push(y); - // ^ ...but data from `y` flows into `x` here - // It has been extended for the case of structs too. - // Consider the example - // struct Ref<'a> { x: &'a u32 } - // fn foo(mut x: Vec, y: Ref) { - // --- --- these structs are declared with different lifetimes... - // x.push(y); - // ^ ...but data from `y` flows into `x` here - // } - // It will later be extended to trait objects. - pub fn try_report_anon_anon_conflict(&self, error: &RegionResolutionError<'tcx>) -> bool { - let (span, sub, sup) = match *error { - ConcreteFailure(ref origin, sub, sup) => (origin.span(), sub, sup), - _ => return false, // inapplicable - }; - - // Determine whether the sub and sup consist of both anonymous (elided) regions. - let anon_reg_sup = or_false!(self.is_suitable_anonymous_region(sup)); - - let anon_reg_sub = or_false!(self.is_suitable_anonymous_region(sub)); - let scope_def_id_sup = anon_reg_sup.def_id; - let bregion_sup = anon_reg_sup.boundregion; - let scope_def_id_sub = anon_reg_sub.def_id; - let bregion_sub = anon_reg_sub.boundregion; - - let ty_sup = or_false!(self.find_anon_type(sup, &bregion_sup)); - - let ty_sub = or_false!(self.find_anon_type(sub, &bregion_sub)); - - let (main_label, label1, label2) = if let (Some(sup_arg), Some(sub_arg)) = - (self.find_arg_with_anonymous_region(sup, sup), - self.find_arg_with_anonymous_region(sub, sub)) { - - let (anon_arg_sup, is_first_sup, anon_arg_sub, is_first_sub) = - (sup_arg.arg, sup_arg.is_first, sub_arg.arg, sub_arg.is_first); - if self.is_self_anon(is_first_sup, scope_def_id_sup) || - self.is_self_anon(is_first_sub, scope_def_id_sub) { - return false; - } - - if self.is_return_type_anon(scope_def_id_sup, bregion_sup) || - self.is_return_type_anon(scope_def_id_sub, bregion_sub) { - return false; - } - - if anon_arg_sup == anon_arg_sub { - (format!("this type was declared with multiple lifetimes..."), - format!(" with one lifetime"), - format!(" into the other")) - } else { - let span_label_var1 = if let Some(simple_name) = anon_arg_sup.pat.simple_name() { - format!(" from `{}`", simple_name) - } else { - format!("") - }; - - let span_label_var2 = if let Some(simple_name) = anon_arg_sub.pat.simple_name() { - format!(" into `{}`", simple_name) - } else { - format!("") - }; - - let span_label = - format!("these two types are declared with different lifetimes...",); - - (span_label, span_label_var1, span_label_var2) - } - } else { - return false; - }; - - struct_span_err!(self.tcx.sess, span, E0623, "lifetime mismatch") - .span_label(ty_sup.span, main_label) - .span_label(ty_sub.span, format!("")) - .span_label(span, format!("...but data{} flows{} here", label1, label2)) - .emit(); - return true; - } - - /// This function calls the `visit_ty` method for the parameters - /// corresponding to the anonymous regions. The `nested_visitor.found_type` - /// contains the anonymous type. - /// - /// # Arguments - /// region - the anonymous region corresponding to the anon_anon conflict - /// br - the bound region corresponding to the above region which is of type `BrAnon(_)` - /// - /// # Example - /// ``` - /// fn foo(x: &mut Vec<&u8>, y: &u8) - /// { x.push(y); } - /// ``` - /// The function returns the nested type corresponding to the anonymous region - /// for e.g. `&u8` and Vec<`&u8`. - pub fn find_anon_type(&self, region: Region<'tcx>, br: &ty::BoundRegion) -> Option<&hir::Ty> { - if let Some(anon_reg) = self.is_suitable_anonymous_region(region) { - let def_id = anon_reg.def_id; - if let Some(node_id) = self.tcx.hir.as_local_node_id(def_id) { - let ret_ty = self.tcx.type_of(def_id); - if let ty::TyFnDef(_, _) = ret_ty.sty { - let inputs: &[_] = - match self.tcx.hir.get(node_id) { - hir_map::NodeItem(&hir::Item { - node: hir::ItemFn(ref fndecl, ..), .. - }) => &fndecl.inputs, - hir_map::NodeTraitItem(&hir::TraitItem { - node: hir::TraitItemKind::Method(ref fndecl, ..), - .. - }) => &fndecl.decl.inputs, - hir_map::NodeImplItem(&hir::ImplItem { - node: hir::ImplItemKind::Method(ref fndecl, ..), - .. - }) => &fndecl.decl.inputs, - - _ => &[], - }; - - return inputs - .iter() - .filter_map(|arg| { - self.find_component_for_bound_region(&**arg, br) - }) - .next(); - } - } - } - None - } - - // This method creates a FindNestedTypeVisitor which returns the type corresponding - // to the anonymous region. - fn find_component_for_bound_region(&self, - arg: &'gcx hir::Ty, - br: &ty::BoundRegion) - -> Option<(&'gcx hir::Ty)> { - let mut nested_visitor = FindNestedTypeVisitor { - infcx: &self, - hir_map: &self.tcx.hir, - bound_region: *br, - found_type: None, - }; - nested_visitor.visit_ty(arg); - nested_visitor.found_type - } -} - -// The FindNestedTypeVisitor captures the corresponding `hir::Ty` of the -// anonymous region. The example above would lead to a conflict between -// the two anonymous lifetimes for &u8 in x and y respectively. This visitor -// would be invoked twice, once for each lifetime, and would -// walk the types like &mut Vec<&u8> and &u8 looking for the HIR -// where that lifetime appears. This allows us to highlight the -// specific part of the type in the error message. -struct FindNestedTypeVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { - infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, - hir_map: &'a hir::map::Map<'gcx>, - // The bound_region corresponding to the Refree(freeregion) - // associated with the anonymous region we are looking for. - bound_region: ty::BoundRegion, - // The type where the anonymous lifetime appears - // for e.g. Vec<`&u8`> and <`&u8`> - found_type: Option<&'gcx hir::Ty>, -} - -impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindNestedTypeVisitor<'a, 'gcx, 'tcx> { - fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { - NestedVisitorMap::OnlyBodies(&self.hir_map) - } - - fn visit_ty(&mut self, arg: &'gcx hir::Ty) { - // Find the index of the anonymous region that was part of the - // error. We will then search the function parameters for a bound - // region at the right depth with the same index. - let br_index = match self.bound_region { - ty::BrAnon(index) => index, - _ => return, - }; - - match arg.node { - hir::TyRptr(ref lifetime, _) => { - match self.infcx.tcx.named_region_map.defs.get(&lifetime.id) { - // the lifetime of the TyRptr - Some(&rl::Region::LateBoundAnon(debruijn_index, anon_index)) => { - if debruijn_index.depth == 1 && anon_index == br_index { - self.found_type = Some(arg); - return; // we can stop visiting now - } - } - Some(&rl::Region::Static) | - Some(&rl::Region::EarlyBound(_, _)) | - Some(&rl::Region::LateBound(_, _)) | - Some(&rl::Region::Free(_, _)) | - None => { - debug!("no arg found"); - } - } - } - // Checks if it is of type `hir::TyPath` which corresponds to a struct. - hir::TyPath(_) => { - let subvisitor = &mut TyPathVisitor { - infcx: self.infcx, - found_it: false, - bound_region: self.bound_region, - hir_map: self.hir_map, - }; - intravisit::walk_ty(subvisitor, arg); // call walk_ty; as visit_ty is empty, - // this will visit only outermost type - if subvisitor.found_it { - self.found_type = Some(arg); - } - } - _ => {} - } - // walk the embedded contents: e.g., if we are visiting `Vec<&Foo>`, - // go on to visit `&Foo` - intravisit::walk_ty(self, arg); - } -} - -// The visitor captures the corresponding `hir::Ty` of the anonymous region -// in the case of structs ie. `hir::TyPath`. -// This visitor would be invoked for each lifetime corresponding to a struct, -// and would walk the types like Vec in the above example and Ref looking for the HIR -// where that lifetime appears. This allows us to highlight the -// specific part of the type in the error message. -struct TyPathVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { - infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, - hir_map: &'a hir::map::Map<'gcx>, - found_it: bool, - bound_region: ty::BoundRegion, -} - -impl<'a, 'gcx, 'tcx> Visitor<'gcx> for TyPathVisitor<'a, 'gcx, 'tcx> { - fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { - NestedVisitorMap::OnlyBodies(&self.hir_map) - } - - fn visit_lifetime(&mut self, lifetime: &hir::Lifetime) { - let br_index = match self.bound_region { - ty::BrAnon(index) => index, - _ => return, - }; - - match self.infcx.tcx.named_region_map.defs.get(&lifetime.id) { - // the lifetime of the TyPath! - Some(&rl::Region::LateBoundAnon(debruijn_index, anon_index)) => { - if debruijn_index.depth == 1 && anon_index == br_index { - self.found_it = true; - } - } - Some(&rl::Region::Static) | - Some(&rl::Region::EarlyBound(_, _)) | - Some(&rl::Region::LateBound(_, _)) | - Some(&rl::Region::Free(_, _)) | - None => { - debug!("no arg found"); - } - } - } - - fn visit_ty(&mut self, arg: &'gcx hir::Ty) { - // ignore nested types - // - // If you have a type like `Foo<'a, &Ty>` we - // are only interested in the immediate lifetimes ('a). - // - // Making `visit_ty` empty will ignore the `&Ty` embedded - // inside, it will get reached by the outer visitor. - debug!("`Ty` corresponding to a struct is {:?}", arg); - } -} diff --git a/src/librustc/infer/error_reporting/different_lifetimes.rs b/src/librustc/infer/error_reporting/different_lifetimes.rs new file mode 100644 index 0000000000..ee30db2625 --- /dev/null +++ b/src/librustc/infer/error_reporting/different_lifetimes.rs @@ -0,0 +1,394 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Error Reporting for Anonymous Region Lifetime Errors +//! where both the regions are anonymous. +use hir; +use infer::InferCtxt; +use ty::{self, Region}; +use infer::region_inference::RegionResolutionError::*; +use infer::region_inference::RegionResolutionError; +use hir::map as hir_map; +use middle::resolve_lifetime as rl; +use hir::intravisit::{self, Visitor, NestedVisitorMap}; +use infer::error_reporting::util::AnonymousArgInfo; + +impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + // This method prints the error message for lifetime errors when both the concerned regions + // are anonymous. + // Consider a case where we have + // fn foo(x: &mut Vec<&u8>, y: &u8) + // { x.push(y); }. + // The example gives + // fn foo(x: &mut Vec<&u8>, y: &u8) { + // --- --- these references are declared with different lifetimes... + // x.push(y); + // ^ ...but data from `y` flows into `x` here + // It has been extended for the case of structs too. + // Consider the example + // struct Ref<'a> { x: &'a u32 } + // fn foo(mut x: Vec, y: Ref) { + // --- --- these structs are declared with different lifetimes... + // x.push(y); + // ^ ...but data from `y` flows into `x` here + // } + // It will later be extended to trait objects. + pub fn try_report_anon_anon_conflict(&self, error: &RegionResolutionError<'tcx>) -> bool { + let (span, sub, sup) = match *error { + ConcreteFailure(ref origin, sub, sup) => (origin.span(), sub, sup), + _ => return false, // inapplicable + }; + + // Determine whether the sub and sup consist of both anonymous (elided) regions. + let anon_reg_sup = or_false!(self.is_suitable_region(sup)); + + let anon_reg_sub = or_false!(self.is_suitable_region(sub)); + let scope_def_id_sup = anon_reg_sup.def_id; + let bregion_sup = anon_reg_sup.boundregion; + let scope_def_id_sub = anon_reg_sub.def_id; + let bregion_sub = anon_reg_sub.boundregion; + + let ty_sup = or_false!(self.find_anon_type(sup, &bregion_sup)); + + let ty_sub = or_false!(self.find_anon_type(sub, &bregion_sub)); + + debug!("try_report_anon_anon_conflict: found_arg1={:?} sup={:?} br1={:?}", + ty_sub, + sup, + bregion_sup); + debug!("try_report_anon_anon_conflict: found_arg2={:?} sub={:?} br2={:?}", + ty_sup, + sub, + bregion_sub); + + let (ty_sup, ty_fndecl_sup) = ty_sup; + let (ty_sub, ty_fndecl_sub) = ty_sub; + + let AnonymousArgInfo { arg: anon_arg_sup, .. } = + or_false!(self.find_arg_with_region(sup, sup)); + let AnonymousArgInfo { arg: anon_arg_sub, .. } = + or_false!(self.find_arg_with_region(sub, sub)); + + let sup_is_ret_type = + self.is_return_type_anon(scope_def_id_sup, bregion_sup, ty_fndecl_sup); + let sub_is_ret_type = + self.is_return_type_anon(scope_def_id_sub, bregion_sub, ty_fndecl_sub); + + let span_label_var1 = if let Some(simple_name) = anon_arg_sup.pat.simple_name() { + format!(" from `{}`", simple_name) + } else { + format!("") + }; + + let span_label_var2 = if let Some(simple_name) = anon_arg_sub.pat.simple_name() { + format!(" into `{}`", simple_name) + } else { + format!("") + }; + + + let (span_1, span_2, main_label, span_label) = match (sup_is_ret_type, sub_is_ret_type) { + (None, None) => { + let (main_label_1, span_label_1) = if ty_sup == ty_sub { + + (format!("this type is declared with multiple lifetimes..."), + format!("...but data{} flows{} here", + format!(" with one lifetime"), + format!(" into the other"))) + } else { + (format!("these two types are declared with different lifetimes..."), + format!("...but data{} flows{} here", + span_label_var1, + span_label_var2)) + }; + (ty_sup.span, ty_sub.span, main_label_1, span_label_1) + } + + (Some(ret_span), _) => { + (ty_sub.span, + ret_span, + format!("this parameter and the return type are declared \ + with different lifetimes...",), + format!("...but data{} is returned here", span_label_var1)) + } + (_, Some(ret_span)) => { + (ty_sup.span, + ret_span, + format!("this parameter and the return type are declared \ + with different lifetimes...",), + format!("...but data{} is returned here", span_label_var1)) + } + }; + + + struct_span_err!(self.tcx.sess, span, E0623, "lifetime mismatch") + .span_label(span_1, main_label) + .span_label(span_2, format!("")) + .span_label(span, span_label) + .emit(); + return true; + } + + /// This function calls the `visit_ty` method for the parameters + /// corresponding to the anonymous regions. The `nested_visitor.found_type` + /// contains the anonymous type. + /// + /// # Arguments + /// region - the anonymous region corresponding to the anon_anon conflict + /// br - the bound region corresponding to the above region which is of type `BrAnon(_)` + /// + /// # Example + /// ``` + /// fn foo(x: &mut Vec<&u8>, y: &u8) + /// { x.push(y); } + /// ``` + /// The function returns the nested type corresponding to the anonymous region + /// for e.g. `&u8` and Vec<`&u8`. + pub fn find_anon_type(&self, + region: Region<'tcx>, + br: &ty::BoundRegion) + -> Option<(&hir::Ty, &hir::FnDecl)> { + if let Some(anon_reg) = self.is_suitable_region(region) { + let def_id = anon_reg.def_id; + if let Some(node_id) = self.tcx.hir.as_local_node_id(def_id) { + let fndecl = match self.tcx.hir.get(node_id) { + hir_map::NodeItem(&hir::Item { node: hir::ItemFn(ref fndecl, ..), .. }) => { + &fndecl + } + hir_map::NodeTraitItem(&hir::TraitItem { + node: hir::TraitItemKind::Method(ref m, ..), .. + }) | + hir_map::NodeImplItem(&hir::ImplItem { + node: hir::ImplItemKind::Method(ref m, ..), .. + }) => &m.decl, + _ => return None, + }; + + return fndecl + .inputs + .iter() + .filter_map(|arg| self.find_component_for_bound_region(arg, br)) + .next() + .map(|ty| (ty, &**fndecl)); + } + } + None + } + + // This method creates a FindNestedTypeVisitor which returns the type corresponding + // to the anonymous region. + fn find_component_for_bound_region(&self, + arg: &'gcx hir::Ty, + br: &ty::BoundRegion) + -> Option<(&'gcx hir::Ty)> { + let mut nested_visitor = FindNestedTypeVisitor { + infcx: &self, + hir_map: &self.tcx.hir, + bound_region: *br, + found_type: None, + depth: 1, + }; + nested_visitor.visit_ty(arg); + nested_visitor.found_type + } +} + +// The FindNestedTypeVisitor captures the corresponding `hir::Ty` of the +// anonymous region. The example above would lead to a conflict between +// the two anonymous lifetimes for &u8 in x and y respectively. This visitor +// would be invoked twice, once for each lifetime, and would +// walk the types like &mut Vec<&u8> and &u8 looking for the HIR +// where that lifetime appears. This allows us to highlight the +// specific part of the type in the error message. +struct FindNestedTypeVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + hir_map: &'a hir::map::Map<'gcx>, + // The bound_region corresponding to the Refree(freeregion) + // associated with the anonymous region we are looking for. + bound_region: ty::BoundRegion, + // The type where the anonymous lifetime appears + // for e.g. Vec<`&u8`> and <`&u8`> + found_type: Option<&'gcx hir::Ty>, + depth: u32, +} + +impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindNestedTypeVisitor<'a, 'gcx, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { + NestedVisitorMap::OnlyBodies(&self.hir_map) + } + + fn visit_ty(&mut self, arg: &'gcx hir::Ty) { + match arg.node { + hir::TyBareFn(_) => { + self.depth += 1; + intravisit::walk_ty(self, arg); + self.depth -= 1; + return; + } + + hir::TyTraitObject(ref bounds, _) => { + for bound in bounds { + self.depth += 1; + self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None); + self.depth -= 1; + } + } + + hir::TyRptr(ref lifetime, _) => { + // the lifetime of the TyRptr + let hir_id = self.infcx.tcx.hir.node_to_hir_id(lifetime.id); + match (self.infcx.tcx.named_region(hir_id), self.bound_region) { + // Find the index of the anonymous region that was part of the + // error. We will then search the function parameters for a bound + // region at the right depth with the same index + (Some(rl::Region::LateBoundAnon(debruijn_index, anon_index)), + ty::BrAnon(br_index)) => { + debug!("LateBoundAnon depth = {:?} anon_index = {:?} br_index={:?}", + debruijn_index.depth, + anon_index, + br_index); + if debruijn_index.depth == self.depth && anon_index == br_index { + self.found_type = Some(arg); + return; // we can stop visiting now + } + } + + // Find the index of the named region that was part of the + // error. We will then search the function parameters for a bound + // region at the right depth with the same index + (Some(rl::Region::EarlyBound(_, id)), ty::BrNamed(def_id, _)) => { + debug!("EarlyBound self.infcx.tcx.hir.local_def_id(id)={:?} \ + def_id={:?}", id, def_id); + if id == def_id { + self.found_type = Some(arg); + return; // we can stop visiting now + } + } + + // Find the index of the named region that was part of the + // error. We will then search the function parameters for a bound + // region at the right depth with the same index + (Some(rl::Region::LateBound(debruijn_index, id)), ty::BrNamed(def_id, _)) => { + debug!("FindNestedTypeVisitor::visit_ty: LateBound depth = {:?}", + debruijn_index.depth); + debug!("self.infcx.tcx.hir.local_def_id(id)={:?}", id); + debug!("def_id={:?}", def_id); + if debruijn_index.depth == self.depth && id == def_id { + self.found_type = Some(arg); + return; // we can stop visiting now + } + } + + (Some(rl::Region::Static), _) | + (Some(rl::Region::Free(_, _)), _) | + (Some(rl::Region::EarlyBound(_, _)), _) | + (Some(rl::Region::LateBound(_, _)), _) | + (Some(rl::Region::LateBoundAnon(_, _)), _) | + (None, _) => { + debug!("no arg found"); + } + } + } + // Checks if it is of type `hir::TyPath` which corresponds to a struct. + hir::TyPath(_) => { + let subvisitor = &mut TyPathVisitor { + infcx: self.infcx, + found_it: false, + bound_region: self.bound_region, + hir_map: self.hir_map, + depth: self.depth, + }; + intravisit::walk_ty(subvisitor, arg); // call walk_ty; as visit_ty is empty, + // this will visit only outermost type + if subvisitor.found_it { + self.found_type = Some(arg); + } + } + _ => {} + } + // walk the embedded contents: e.g., if we are visiting `Vec<&Foo>`, + // go on to visit `&Foo` + intravisit::walk_ty(self, arg); + } +} + +// The visitor captures the corresponding `hir::Ty` of the anonymous region +// in the case of structs ie. `hir::TyPath`. +// This visitor would be invoked for each lifetime corresponding to a struct, +// and would walk the types like Vec in the above example and Ref looking for the HIR +// where that lifetime appears. This allows us to highlight the +// specific part of the type in the error message. +struct TyPathVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + hir_map: &'a hir::map::Map<'gcx>, + found_it: bool, + bound_region: ty::BoundRegion, + depth: u32, +} + +impl<'a, 'gcx, 'tcx> Visitor<'gcx> for TyPathVisitor<'a, 'gcx, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { + NestedVisitorMap::OnlyBodies(&self.hir_map) + } + + fn visit_lifetime(&mut self, lifetime: &hir::Lifetime) { + + let hir_id = self.infcx.tcx.hir.node_to_hir_id(lifetime.id); + match (self.infcx.tcx.named_region(hir_id), self.bound_region) { + // the lifetime of the TyPath! + (Some(rl::Region::LateBoundAnon(debruijn_index, anon_index)), ty::BrAnon(br_index)) => { + if debruijn_index.depth == self.depth && anon_index == br_index { + self.found_it = true; + return; + } + } + + (Some(rl::Region::EarlyBound(_, id)), ty::BrNamed(def_id, _)) => { + debug!("EarlyBound self.infcx.tcx.hir.local_def_id(id)={:?} \ + def_id={:?}", id, def_id); + if id == def_id { + self.found_it = true; + return; // we can stop visiting now + } + } + + (Some(rl::Region::LateBound(debruijn_index, id)), ty::BrNamed(def_id, _)) => { + debug!("FindNestedTypeVisitor::visit_ty: LateBound depth = {:?}", + debruijn_index.depth); + debug!("id={:?}", id); + debug!("def_id={:?}", def_id); + if debruijn_index.depth == self.depth && id == def_id { + self.found_it = true; + return; // we can stop visiting now + } + } + + (Some(rl::Region::Static), _) | + (Some(rl::Region::EarlyBound(_, _)), _) | + (Some(rl::Region::LateBound(_, _)), _) | + (Some(rl::Region::LateBoundAnon(_, _)), _) | + (Some(rl::Region::Free(_, _)), _) | + (None, _) => { + debug!("no arg found"); + } + } + } + + fn visit_ty(&mut self, arg: &'gcx hir::Ty) { + // ignore nested types + // + // If you have a type like `Foo<'a, &Ty>` we + // are only interested in the immediate lifetimes ('a). + // + // Making `visit_ty` empty will ignore the `&Ty` embedded + // inside, it will get reached by the outer visitor. + debug!("`Ty` corresponding to a struct is {:?}", arg); + } +} diff --git a/src/librustc/infer/error_reporting/mod.rs b/src/librustc/infer/error_reporting/mod.rs index 68eb4639bc..895894a0bb 100644 --- a/src/librustc/infer/error_reporting/mod.rs +++ b/src/librustc/infer/error_reporting/mod.rs @@ -66,13 +66,14 @@ use hir::map as hir_map; use hir::def_id::DefId; use middle::region; use traits::{ObligationCause, ObligationCauseCode}; -use ty::{self, TyCtxt, TypeFoldable}; -use ty::{Region, Issue32330}; +use ty::{self, Region, Ty, TyCtxt, TypeFoldable}; use ty::error::TypeError; use syntax::ast::DUMMY_NODE_ID; use syntax_pos::{Pos, Span}; use errors::{DiagnosticBuilder, DiagnosticStyledString}; +use rustc_data_structures::indexed_vec::Idx; + mod note; mod need_type_info; @@ -80,10 +81,11 @@ mod need_type_info; mod named_anon_conflict; #[macro_use] mod util; -mod anon_anon_conflict; +mod different_lifetimes; impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn note_and_explain_region(self, + region_scope_tree: ®ion::ScopeTree, err: &mut DiagnosticBuilder, prefix: &str, region: ty::Region<'tcx>, @@ -119,7 +121,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { fn explain_span<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, heading: &str, span: Span) -> (String, Option) { - let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo); + let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo()); (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize() + 1), Some(span)) } @@ -131,14 +133,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { format!("{}unknown scope: {:?}{}. Please report a bug.", prefix, scope, suffix) }; - let span = match scope.span(&self.hir) { - Some(s) => s, - None => { - err.note(&unknown_scope()); - return; - } - }; - let tag = match self.hir.find(scope.node_id()) { + let span = scope.span(self, region_scope_tree); + let tag = match self.hir.find(scope.node_id(self, region_scope_tree)) { Some(hir_map::NodeBlock(_)) => "block", Some(hir_map::NodeExpr(expr)) => match expr.node { hir::ExprCall(..) => "call", @@ -158,21 +154,21 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { return; } }; - let scope_decorated_tag = match scope { - region::CodeExtent::Misc(_) => tag, - region::CodeExtent::CallSiteScope(_) => { + let scope_decorated_tag = match scope.data() { + region::ScopeData::Node(_) => tag, + region::ScopeData::CallSite(_) => { "scope of call-site for function" } - region::CodeExtent::ParameterScope(_) => { + region::ScopeData::Arguments(_) => { "scope of function body" } - region::CodeExtent::DestructionScope(_) => { + region::ScopeData::Destruction(_) => { new_string = format!("destruction scope surrounding {}", tag); &new_string[..] } - region::CodeExtent::Remainder(r) => { + region::ScopeData::Remainder(r) => { new_string = format!("block suffix following statement {}", - r.first_statement_index); + r.first_statement_index.index()); &new_string[..] } }; @@ -261,8 +257,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { - - pub fn report_region_errors(&self, errors: &Vec>) { + pub fn report_region_errors(&self, + region_scope_tree: ®ion::ScopeTree, + errors: &Vec>) { debug!("report_region_errors(): {} errors to start", errors.len()); // try to pre-process the errors, which will group some of them @@ -286,16 +283,16 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // the error. If all of these fails, we fall back to a rather // general bit of code that displays the error information ConcreteFailure(origin, sub, sup) => { - - self.report_concrete_failure(origin, sub, sup).emit(); + self.report_concrete_failure(region_scope_tree, origin, sub, sup).emit(); } GenericBoundFailure(kind, param_ty, sub) => { - self.report_generic_bound_failure(kind, param_ty, sub); + self.report_generic_bound_failure(region_scope_tree, kind, param_ty, sub); } SubSupConflict(var_origin, sub_origin, sub_r, sup_origin, sup_r) => { - self.report_sub_sup_conflict(var_origin, + self.report_sub_sup_conflict(region_scope_tree, + var_origin, sub_origin, sub_r, sup_origin, @@ -338,11 +335,20 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { GenericBoundFailure(..) => true, }; - if errors.iter().all(|e| is_bound_failure(e)) { + + let mut errors = if errors.iter().all(|e| is_bound_failure(e)) { errors.clone() } else { errors.iter().filter(|&e| !is_bound_failure(e)).cloned().collect() - } + }; + + // sort the errors by span, for better error message stability. + errors.sort_by_key(|u| match *u { + ConcreteFailure(ref sro, _, _) => sro.span(), + GenericBoundFailure(ref sro, _, _) => sro.span(), + SubSupConflict(ref rvo, _, _, _, _) => rvo.span(), + }); + errors } /// Adds a note if the types come from similarly named crates @@ -362,7 +368,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // for imported and non-imported crates if exp_path == found_path || exp_abs_path == found_abs_path { - let crate_name = self.tcx.sess.cstore.crate_name(did1.krate); + let crate_name = self.tcx.crate_name(did1.krate); err.span_note(sp, &format!("Perhaps two different versions \ of crate `{}` are being used?", crate_name)); @@ -423,7 +429,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { name: String, sub: &ty::subst::Substs<'tcx>, pos: usize, - other_ty: &ty::Ty<'tcx>) { + other_ty: &Ty<'tcx>) { // `value` and `other_value` hold two incomplete type representation for display. // `name` is the path of both types being compared. `sub` value.push_highlighted(name); @@ -496,7 +502,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { path: String, sub: &ty::subst::Substs<'tcx>, other_path: String, - other_ty: &ty::Ty<'tcx>) -> Option<()> { + other_ty: &Ty<'tcx>) -> Option<()> { for (i, ta) in sub.types().enumerate() { if &ta == other_ty { self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty); @@ -527,7 +533,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Compare two given types, eliding parts that are the same between them and highlighting /// relevant differences, and return two representation of those types for highlighted printing. - fn cmp(&self, t1: ty::Ty<'tcx>, t2: ty::Ty<'tcx>) + fn cmp(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> (DiagnosticStyledString, DiagnosticStyledString) { match (&t1.sty, &t2.sty) { @@ -715,35 +721,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.note_and_explain_type_err(diag, terr, span); } - pub fn note_issue_32330(&self, - diag: &mut DiagnosticBuilder<'tcx>, - terr: &TypeError<'tcx>) - { - debug!("note_issue_32330: terr={:?}", terr); - match *terr { - TypeError::RegionsInsufficientlyPolymorphic(_, _, Some(box Issue32330 { - fn_def_id, region_name - })) | - TypeError::RegionsOverlyPolymorphic(_, _, Some(box Issue32330 { - fn_def_id, region_name - })) => { - diag.note( - &format!("lifetime parameter `{0}` declared on fn `{1}` \ - appears only in the return type, \ - but here is required to be higher-ranked, \ - which means that `{0}` must appear in both \ - argument and return types", - region_name, - self.tcx.item_path_str(fn_def_id))); - diag.note( - &format!("this error is the result of a recent bug fix; \ - for more information, see issue #33685 \ - ")); - } - _ => {} - } - } - pub fn report_and_explain_type_error(&self, trace: TypeTrace<'tcx>, terr: &TypeError<'tcx>) @@ -763,7 +740,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } }; self.note_type_err(&mut diag, &trace.cause, None, Some(trace.values), terr); - self.note_issue_32330(&mut diag, terr); diag } @@ -778,7 +754,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } fn expected_found_str_ty(&self, - exp_found: &ty::error::ExpectedFound>) + exp_found: &ty::error::ExpectedFound>) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { let exp_found = self.resolve_type_vars_if_possible(exp_found); if exp_found.references_error() { @@ -804,14 +780,49 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } fn report_generic_bound_failure(&self, + region_scope_tree: ®ion::ScopeTree, origin: SubregionOrigin<'tcx>, bound_kind: GenericKind<'tcx>, sub: Region<'tcx>) { - // FIXME: it would be better to report the first error message - // with the span of the parameter itself, rather than the span - // where the error was detected. But that span is not readily - // accessible. + // Attempt to obtain the span of the parameter so we can + // suggest adding an explicit lifetime bound to it. + let type_param_span = match (self.in_progress_tables, bound_kind) { + (Some(ref table), GenericKind::Param(ref param)) => { + let table = table.borrow(); + table.local_id_root.and_then(|did| { + let generics = self.tcx.generics_of(did); + // Account for the case where `did` corresponds to `Self`, which doesn't have + // the expected type argument. + if !param.is_self() { + let type_param = generics.type_param(param, self.tcx); + let hir = &self.tcx.hir; + hir.as_local_node_id(type_param.def_id).map(|id| { + // Get the `hir::TyParam` to verify wether it already has any bounds. + // We do this to avoid suggesting code that ends up as `T: 'a'b`, + // instead we suggest `T: 'a + 'b` in that case. + let has_lifetimes = if let hir_map::NodeTyParam(ref p) = hir.get(id) { + p.bounds.len() > 0 + } else { + false + }; + let sp = hir.span(id); + // `sp` only covers `T`, change it so that it covers + // `T:` when appropriate + let sp = if has_lifetimes { + sp.to(sp.next_point().next_point()) + } else { + sp + }; + (sp, has_lifetimes) + }) + } else { + None + } + }) + } + _ => None, + }; let labeled_user_string = match bound_kind { GenericKind::Param(ref p) => @@ -833,6 +844,26 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { return; } + fn binding_suggestion<'tcx, S: fmt::Display>(err: &mut DiagnosticBuilder<'tcx>, + type_param_span: Option<(Span, bool)>, + bound_kind: GenericKind<'tcx>, + sub: S) { + let consider = &format!("consider adding an explicit lifetime bound `{}: {}`...", + bound_kind, + sub); + if let Some((sp, has_lifetimes)) = type_param_span { + let tail = if has_lifetimes { + " + " + } else { + "" + }; + let suggestion = format!("{}: {}{}", bound_kind, sub, tail); + err.span_suggestion_short(sp, consider, suggestion); + } else { + err.help(consider); + } + } + let mut err = match *sub { ty::ReEarlyBound(_) | ty::ReFree(ty::FreeRegion {bound_region: ty::BrNamed(..), ..}) => { @@ -842,9 +873,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0309, "{} may not live long enough", labeled_user_string); - err.help(&format!("consider adding an explicit lifetime bound `{}: {}`...", - bound_kind, - sub)); + binding_suggestion(&mut err, type_param_span, bound_kind, sub); err } @@ -855,9 +884,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0310, "{} may not live long enough", labeled_user_string); - err.help(&format!("consider adding an explicit lifetime \ - bound `{}: 'static`...", - bound_kind)); + binding_suggestion(&mut err, type_param_span, bound_kind, "'static"); err } @@ -871,6 +898,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { err.help(&format!("consider adding an explicit lifetime bound for `{}`", bound_kind)); self.tcx.note_and_explain_region( + region_scope_tree, &mut err, &format!("{} must be valid for ", labeled_user_string), sub, @@ -884,6 +912,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } fn report_sub_sup_conflict(&self, + region_scope_tree: ®ion::ScopeTree, var_origin: RegionVariableOrigin, sub_origin: SubregionOrigin<'tcx>, sub_region: Region<'tcx>, @@ -891,14 +920,14 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { sup_region: Region<'tcx>) { let mut err = self.report_inference_failure(var_origin); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "first, the lifetime cannot outlive ", sup_region, "..."); self.note_region_origin(&mut err, &sup_origin); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "but, the lifetime must be valid for ", sub_region, "..."); @@ -936,7 +965,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { format!(" for lifetime parameter {}in trait containing associated type `{}`", br_string(br), self.tcx.associated_item(def_id).name) } - infer::EarlyBoundRegion(_, name, _) => { + infer::EarlyBoundRegion(_, name) => { format!(" for lifetime parameter `{}`", name) } @@ -945,8 +974,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { name) } infer::UpvarRegion(ref upvar_id, _) => { - format!(" for capture of `{}` by closure", - self.tcx.local_var_name_str_def_index(upvar_id.var_id)) + let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_id); + let var_name = self.tcx.hir.name(var_node_id); + format!(" for capture of `{}` by closure", var_name) } }; diff --git a/src/librustc/infer/error_reporting/named_anon_conflict.rs b/src/librustc/infer/error_reporting/named_anon_conflict.rs index 0aae008396..80fb4ce8e0 100644 --- a/src/librustc/infer/error_reporting/named_anon_conflict.rs +++ b/src/librustc/infer/error_reporting/named_anon_conflict.rs @@ -13,6 +13,7 @@ use infer::InferCtxt; use infer::region_inference::RegionResolutionError::*; use infer::region_inference::RegionResolutionError; +use ty; impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // This method generates the error message for the case when @@ -24,57 +25,82 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { _ => return false, // inapplicable }; + debug!("try_report_named_anon_conflict(sub={:?}, sup={:?})", + sub, + sup); + // Determine whether the sub and sup consist of one named region ('a) // and one anonymous (elided) region. If so, find the parameter arg // where the anonymous region appears (there must always be one; we // only introduced anonymous regions in parameters) as well as a // version new_ty of its type where the anonymous region is replaced // with the named one.//scope_def_id - let (named, anon_arg_info, region_info) = - if sub.is_named_region() && self.is_suitable_anonymous_region(sup).is_some() { + let (named, anon, anon_arg_info, region_info) = + if self.is_named_region(sub) && self.is_suitable_region(sup).is_some() && + self.find_arg_with_region(sup, sub).is_some() { (sub, - self.find_arg_with_anonymous_region(sup, sub).unwrap(), - self.is_suitable_anonymous_region(sup).unwrap()) - } else if sup.is_named_region() && self.is_suitable_anonymous_region(sub).is_some() { + sup, + self.find_arg_with_region(sup, sub).unwrap(), + self.is_suitable_region(sup).unwrap()) + } else if self.is_named_region(sup) && self.is_suitable_region(sub).is_some() && + self.find_arg_with_region(sub, sup).is_some() { (sup, - self.find_arg_with_anonymous_region(sub, sup).unwrap(), - self.is_suitable_anonymous_region(sub).unwrap()) + sub, + self.find_arg_with_region(sub, sup).unwrap(), + self.is_suitable_region(sub).unwrap()) } else { return false; // inapplicable }; + debug!("try_report_named_anon_conflict: named = {:?}", named); + debug!("try_report_named_anon_conflict: anon_arg_info = {:?}", + anon_arg_info); + debug!("try_report_named_anon_conflict: region_info = {:?}", + region_info); + let (arg, new_ty, br, is_first, scope_def_id, is_impl_item) = (anon_arg_info.arg, anon_arg_info.arg_ty, anon_arg_info.bound_region, anon_arg_info.is_first, region_info.def_id, region_info.is_impl_item); - if is_impl_item { - return false; + match br { + ty::BrAnon(_) => {} + _ => { + /* not an anonymous region */ + debug!("try_report_named_anon_conflict: not an anonymous region"); + return false; + } } - if self.is_return_type_anon(scope_def_id, br) || self.is_self_anon(is_first, scope_def_id) { + if is_impl_item { + debug!("try_report_named_anon_conflict: impl item, bail out"); return false; - } else { - - let (error_var, span_label_var) = if let Some(simple_name) = arg.pat.simple_name() { - (format!("the type of `{}`", simple_name), format!("the type of `{}`", simple_name)) - } else { - ("parameter type".to_owned(), "type".to_owned()) - }; + } - struct_span_err!(self.tcx.sess, - span, - E0621, - "explicit lifetime required in {}", - error_var) - .span_label(arg.pat.span, - format!("consider changing {} to `{}`", span_label_var, new_ty)) - .span_label(span, format!("lifetime `{}` required", named)) - .emit(); + if let Some((_, fndecl)) = self.find_anon_type(anon, &br) { + if self.is_return_type_anon(scope_def_id, br, fndecl).is_some() || + self.is_self_anon(is_first, scope_def_id) { + return false; + } + } + let (error_var, span_label_var) = if let Some(simple_name) = arg.pat.simple_name() { + (format!("the type of `{}`", simple_name), format!("the type of `{}`", simple_name)) + } else { + ("parameter type".to_owned(), "type".to_owned()) + }; - } + struct_span_err!(self.tcx.sess, + span, + E0621, + "explicit lifetime required in {}", + error_var) + .span_label(arg.pat.span, + format!("consider changing {} to `{}`", span_label_var, new_ty)) + .span_label(span, format!("lifetime `{}` required", named)) + .emit(); return true; + } } diff --git a/src/librustc/infer/error_reporting/note.rs b/src/librustc/infer/error_reporting/note.rs index 87047d0df1..1f0fd7b01d 100644 --- a/src/librustc/infer/error_reporting/note.rs +++ b/src/librustc/infer/error_reporting/note.rs @@ -9,6 +9,7 @@ // except according to those terms. use infer::{self, InferCtxt, SubregionOrigin}; +use middle::region; use ty::{self, Region}; use ty::error::TypeError; use errors::DiagnosticBuilder; @@ -42,10 +43,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { "...so that reference does not outlive borrowed content"); } infer::ReborrowUpvar(span, ref upvar_id) => { + let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_id); + let var_name = self.tcx.hir.name(var_node_id); err.span_note(span, - &format!("...so that closure can access `{}`", - self.tcx - .local_var_name_str_def_index(upvar_id.var_id))); + &format!("...so that closure can access `{}`", var_name)); } infer::InfStackClosure(span) => { err.span_note(span, "...so that closure does not outlive its stack frame"); @@ -62,7 +63,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { err.span_note(span, &format!("...so that captured variable `{}` does not outlive the \ enclosing closure", - self.tcx.local_var_name_str(id))); + self.tcx.hir.name(id))); } infer::IndexSlice(span) => { err.span_note(span, "...so that slice is not indexed outside the lifetime"); @@ -144,6 +145,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } pub(super) fn report_concrete_failure(&self, + region_scope_tree: ®ion::ScopeTree, origin: SubregionOrigin<'tcx>, sub: Region<'tcx>, sup: Region<'tcx>) @@ -151,7 +153,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { match origin { infer::Subtype(trace) => { let terr = TypeError::RegionsDoesNotOutlive(sup, sub); - self.report_and_explain_type_error(trace, &terr) + let mut err = self.report_and_explain_type_error(trace, &terr); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "", sup, "..."); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "...does not necessarily outlive ", sub, ""); + err } infer::Reborrow(span) => { let mut err = struct_span_err!(self.tcx.sess, @@ -159,45 +165,45 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0312, "lifetime of reference outlives lifetime of \ borrowed content..."); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "...the reference is valid for ", sub, "..."); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "...but the borrowed content is only valid for ", sup, ""); err } infer::ReborrowUpvar(span, ref upvar_id) => { + let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_id); + let var_name = self.tcx.hir.name(var_node_id); let mut err = struct_span_err!(self.tcx.sess, span, E0313, "lifetime of borrowed pointer outlives lifetime \ of captured variable `{}`...", - self.tcx - .local_var_name_str_def_index(upvar_id.var_id)); - self.tcx.note_and_explain_region(&mut err, + var_name); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "...the borrowed pointer is valid for ", sub, "..."); - self.tcx - .note_and_explain_region( - &mut err, - &format!("...but `{}` is only valid for ", - self.tcx.local_var_name_str_def_index(upvar_id.var_id)), - sup, - ""); + self.tcx.note_and_explain_region( + region_scope_tree, + &mut err, + &format!("...but `{}` is only valid for ", var_name), + sup, + ""); err } infer::InfStackClosure(span) => { let mut err = struct_span_err!(self.tcx.sess, span, E0314, "closure outlives stack frame"); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "...the closure must be valid for ", sub, "..."); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "...but the closure's stack frame is only valid \ for ", sup, @@ -209,8 +215,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span, E0315, "cannot invoke closure outside of its lifetime"); - self.tcx - .note_and_explain_region(&mut err, "the closure is only valid for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the closure is only valid for ", sup, ""); err } infer::DerefPointer(span) => { @@ -218,8 +224,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span, E0473, "dereference of reference outside its lifetime"); - self.tcx - .note_and_explain_region(&mut err, "the reference is only valid for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the reference is only valid for ", sup, ""); err } infer::FreeVariable(span, id) => { @@ -228,10 +234,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0474, "captured variable `{}` does not outlive the \ enclosing closure", - self.tcx.local_var_name_str(id)); - self.tcx - .note_and_explain_region(&mut err, "captured variable is valid for ", sup, ""); - self.tcx.note_and_explain_region(&mut err, "closure is valid for ", sub, ""); + self.tcx.hir.name(id)); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "captured variable is valid for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "closure is valid for ", sub, ""); err } infer::IndexSlice(span) => { @@ -239,7 +246,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span, E0475, "index of slice outside its lifetime"); - self.tcx.note_and_explain_region(&mut err, "the slice is only valid for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the slice is only valid for ", sup, ""); err } infer::RelateObjectBound(span) => { @@ -248,8 +256,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0476, "lifetime of the source pointer does not outlive \ lifetime bound of the object type"); - self.tcx.note_and_explain_region(&mut err, "object type is valid for ", sub, ""); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "object type is valid for ", sub, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "source pointer is only valid for ", sup, ""); @@ -264,10 +273,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.ty_to_string(ty)); match *sub { ty::ReStatic => { - self.tcx.note_and_explain_region(&mut err, "type must satisfy ", sub, "") + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "type must satisfy ", sub, "") } _ => { - self.tcx.note_and_explain_region(&mut err, "type must outlive ", sub, "") + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "type must outlive ", sub, "") } } err @@ -275,11 +286,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { infer::RelateRegionParamBound(span) => { let mut err = struct_span_err!(self.tcx.sess, span, E0478, "lifetime bound not satisfied"); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "lifetime parameter instantiated with ", sup, ""); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "but lifetime parameter must outlive ", sub, ""); @@ -292,7 +303,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { "the type `{}` (provided as the value of a type \ parameter) is not valid at this point", self.ty_to_string(ty)); - self.tcx.note_and_explain_region(&mut err, "type must outlive ", sub, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "type must outlive ", sub, ""); err } infer::CallRcvr(span) => { @@ -301,8 +313,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0480, "lifetime of method receiver does not outlive the \ method call"); - self.tcx - .note_and_explain_region(&mut err, "the receiver is only valid for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the receiver is only valid for ", sup, ""); err } infer::CallArg(span) => { @@ -311,7 +323,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0481, "lifetime of function argument does not outlive \ the function call"); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "the function argument is only valid for ", sup, ""); @@ -323,7 +335,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0482, "lifetime of return value does not outlive the \ function call"); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "the return value is only valid for ", sup, ""); @@ -335,8 +347,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0483, "lifetime of operand does not outlive the \ operation"); - self.tcx - .note_and_explain_region(&mut err, "the operand is only valid for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the operand is only valid for ", sup, ""); err } infer::AddrOf(span) => { @@ -344,8 +356,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span, E0484, "reference is not valid at the time of borrow"); - self.tcx - .note_and_explain_region(&mut err, "the borrow is only valid for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the borrow is only valid for ", sup, ""); err } infer::AutoBorrow(span) => { @@ -354,7 +366,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0485, "automatically reference is not valid at the time \ of borrow"); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "the automatic borrow is only valid for ", sup, ""); @@ -367,7 +379,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { "type of expression contains references that are \ not valid during the expression: `{}`", self.ty_to_string(t)); - self.tcx.note_and_explain_region(&mut err, "type is only valid for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "type is only valid for ", sup, ""); err } infer::SafeDestructor(span) => { @@ -377,8 +390,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { "unsafe use of destructor: destructor might be \ called while references are dead"); // FIXME (22171): terms "super/subregion" are suboptimal - self.tcx.note_and_explain_region(&mut err, "superregion: ", sup, ""); - self.tcx.note_and_explain_region(&mut err, "subregion: ", sub, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "superregion: ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "subregion: ", sub, ""); err } infer::BindingTypeIsNotValidAtDecl(span) => { @@ -387,8 +402,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0488, "lifetime of variable does not enclose its \ declaration"); - self.tcx - .note_and_explain_region(&mut err, "the variable is only valid for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the variable is only valid for ", sup, ""); err } infer::ParameterInScope(_, span) => { @@ -396,8 +411,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span, E0489, "type/lifetime parameter not in scope here"); - self.tcx - .note_and_explain_region(&mut err, "the parameter is only valid for ", sub, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the parameter is only valid for ", sub, ""); err } infer::DataBorrowed(ty, span) => { @@ -406,8 +421,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { E0490, "a value of type `{}` is borrowed for too long", self.ty_to_string(ty)); - self.tcx.note_and_explain_region(&mut err, "the type is valid for ", sub, ""); - self.tcx.note_and_explain_region(&mut err, "but the borrow lasts for ", sup, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the type is valid for ", sub, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "but the borrow lasts for ", sup, ""); err } infer::ReferenceOutlivesReferent(ty, span) => { @@ -417,8 +434,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { "in type `{}`, reference has a longer lifetime \ than the data it references", self.ty_to_string(ty)); - self.tcx.note_and_explain_region(&mut err, "the pointer is valid for ", sub, ""); - self.tcx.note_and_explain_region(&mut err, + self.tcx.note_and_explain_region(region_scope_tree, &mut err, + "the pointer is valid for ", sub, ""); + self.tcx.note_and_explain_region(region_scope_tree, &mut err, "but the referenced data is only valid for ", sup, ""); diff --git a/src/librustc/infer/error_reporting/util.rs b/src/librustc/infer/error_reporting/util.rs index 04153038da..47db3f1b79 100644 --- a/src/librustc/infer/error_reporting/util.rs +++ b/src/librustc/infer/error_reporting/util.rs @@ -12,26 +12,31 @@ //! anonymous regions. use hir; use infer::InferCtxt; -use ty::{self, Region}; +use ty::{self, Region, Ty}; use hir::def_id::DefId; use hir::map as hir_map; +use syntax_pos::Span; macro_rules! or_false { ($v:expr) => { - match $v { - Some(v) => v, - None => return false, - } + match $v { + Some(v) => v, + None => { + debug!("or_false failed: {}", stringify!($v)); + return false; + } + } } } // The struct contains the information about the anonymous region // we are searching for. +#[derive(Debug)] pub struct AnonymousArgInfo<'tcx> { // the argument corresponding to the anonymous region pub arg: &'tcx hir::Arg, // the type corresponding to the anonymopus region argument - pub arg_ty: ty::Ty<'tcx>, + pub arg_ty: Ty<'tcx>, // the ty::BoundRegion corresponding to the anonymous region pub bound_region: ty::BoundRegion, // corresponds to id the argument is the first parameter @@ -41,6 +46,7 @@ pub struct AnonymousArgInfo<'tcx> { // This struct contains information regarding the // Refree((FreeRegion) corresponding to lifetime conflict +#[derive(Debug)] pub struct FreeRegionInfo { // def id corresponding to FreeRegion pub def_id: DefId, @@ -62,47 +68,54 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // i32, which is the type of y but with the anonymous region replaced // with 'a, the corresponding bound region and is_first which is true if // the hir::Arg is the first argument in the function declaration. - pub fn find_arg_with_anonymous_region(&self, - anon_region: Region<'tcx>, - replace_region: Region<'tcx>) - -> Option { - - if let ty::ReFree(ref free_region) = *anon_region { - let id = free_region.scope; - let hir = &self.tcx.hir; - if let Some(node_id) = hir.as_local_node_id(id) { - if let Some(body_id) = hir.maybe_body_owned_by(node_id) { - let body = hir.body(body_id); - if let Some(tables) = self.in_progress_tables { - body.arguments - .iter() - .enumerate() - .filter_map(|(index, arg)| { - let ty = tables.borrow().node_id_to_type(arg.hir_id); - let mut found_anon_region = false; - let new_arg_ty = self.tcx - .fold_regions(&ty, &mut false, |r, _| if *r == *anon_region { - found_anon_region = true; - replace_region - } else { - r - }); - if found_anon_region { - let is_first = index == 0; - Some(AnonymousArgInfo { - arg: arg, - arg_ty: new_arg_ty, - bound_region: free_region.bound_region, - is_first: is_first, - }) + pub fn find_arg_with_region(&self, + anon_region: Region<'tcx>, + replace_region: Region<'tcx>) + -> Option { + + let (id, bound_region) = match *anon_region { + ty::ReFree(ref free_region) => (free_region.scope, free_region.bound_region), + ty::ReEarlyBound(ref ebr) => { + (self.tcx.parent_def_id(ebr.def_id).unwrap(), + ty::BoundRegion::BrNamed(ebr.def_id, ebr.name)) + } + _ => return None, // not a free region + }; + + let hir = &self.tcx.hir; + if let Some(node_id) = hir.as_local_node_id(id) { + if let Some(body_id) = hir.maybe_body_owned_by(node_id) { + let body = hir.body(body_id); + if let Some(tables) = self.in_progress_tables { + body.arguments + .iter() + .enumerate() + .filter_map(|(index, arg)| { + let ty = match tables.borrow().node_id_to_type_opt(arg.hir_id) { + Some(v) => v, + None => return None, // sometimes the tables are not yet populated + }; + let mut found_anon_region = false; + let new_arg_ty = self.tcx + .fold_regions(&ty, &mut false, |r, _| if *r == *anon_region { + found_anon_region = true; + replace_region } else { - None - } - }) - .next() - } else { - None - } + r + }); + if found_anon_region { + let is_first = index == 0; + Some(AnonymousArgInfo { + arg: arg, + arg_ty: new_arg_ty, + bound_region: bound_region, + is_first: is_first, + }) + } else { + None + } + }) + .next() } else { None } @@ -114,43 +127,48 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } - // This method returns whether the given Region is Anonymous - // and returns the DefId and the BoundRegion corresponding to the given region. - pub fn is_suitable_anonymous_region(&self, region: Region<'tcx>) -> Option { - if let ty::ReFree(ref free_region) = *region { - if let ty::BrAnon(..) = free_region.bound_region { - let anonymous_region_binding_scope = free_region.scope; - let node_id = self.tcx - .hir - .as_local_node_id(anonymous_region_binding_scope) - .unwrap(); - let mut is_impl_item = false; - match self.tcx.hir.find(node_id) { - - Some(hir_map::NodeItem(..)) | - Some(hir_map::NodeTraitItem(..)) => { - // Success -- proceed to return Some below - } - Some(hir_map::NodeImplItem(..)) => { - is_impl_item = - self.is_bound_region_in_impl_item(anonymous_region_binding_scope); - } - _ => return None, - } - return Some(FreeRegionInfo { - def_id: anonymous_region_binding_scope, - boundregion: free_region.bound_region, - is_impl_item: is_impl_item, - }); + // This method returns the DefId and the BoundRegion corresponding to the given region. + pub fn is_suitable_region(&self, region: Region<'tcx>) -> Option { + + let (suitable_region_binding_scope, bound_region) = match *region { + ty::ReFree(ref free_region) => (free_region.scope, free_region.bound_region), + ty::ReEarlyBound(ref ebr) => { + (self.tcx.parent_def_id(ebr.def_id).unwrap(), + ty::BoundRegion::BrNamed(ebr.def_id, ebr.name)) } - } - None + _ => return None, // not a free region + }; + + let node_id = self.tcx + .hir + .as_local_node_id(suitable_region_binding_scope) + .unwrap(); + let is_impl_item = match self.tcx.hir.find(node_id) { + + Some(hir_map::NodeItem(..)) | + Some(hir_map::NodeTraitItem(..)) => false, + Some(hir_map::NodeImplItem(..)) => { + self.is_bound_region_in_impl_item(suitable_region_binding_scope) + } + _ => return None, + }; + + return Some(FreeRegionInfo { + def_id: suitable_region_binding_scope, + boundregion: bound_region, + is_impl_item: is_impl_item, + }); + } // Here, we check for the case where the anonymous region // is in the return type. // FIXME(#42703) - Need to handle certain cases here. - pub fn is_return_type_anon(&self, scope_def_id: DefId, br: ty::BoundRegion) -> bool { + pub fn is_return_type_anon(&self, + scope_def_id: DefId, + br: ty::BoundRegion, + decl: &hir::FnDecl) + -> Option { let ret_ty = self.tcx.type_of(scope_def_id); match ret_ty.sty { ty::TyFnDef(_, _) => { @@ -158,12 +176,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let late_bound_regions = self.tcx .collect_referenced_late_bound_regions(&sig.output()); if late_bound_regions.iter().any(|r| *r == br) { - return true; + return Some(decl.output.span()); } } _ => {} } - false + None } // Here we check for the case where anonymous region // corresponds to self and if yes, we display E0312. @@ -177,9 +195,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } // Here we check if the bound region is in Impl Item. - pub fn is_bound_region_in_impl_item(&self, anonymous_region_binding_scope: DefId) -> bool { + pub fn is_bound_region_in_impl_item(&self, suitable_region_binding_scope: DefId) -> bool { let container_id = self.tcx - .associated_item(anonymous_region_binding_scope) + .associated_item(suitable_region_binding_scope) .container .id(); if self.tcx.impl_trait_ref(container_id).is_some() { @@ -193,4 +211,17 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } false } + + // This method returns whether the given Region is Named + pub fn is_named_region(&self, region: Region<'tcx>) -> bool { + match *region { + ty::ReFree(ref free_region) => { + match free_region.bound_region { + ty::BrNamed(..) => true, + _ => false, + } + } + _ => false, + } + } } diff --git a/src/librustc/infer/freshen.rs b/src/librustc/infer/freshen.rs index 0fd4327cc6..c274f8bda9 100644 --- a/src/librustc/infer/freshen.rs +++ b/src/librustc/infer/freshen.rs @@ -43,6 +43,7 @@ use ty::{self, Ty, TyCtxt, TypeFoldable}; use ty::fold::TypeFolder; +use ty::subst::Substs; use util::nodemap::FxHashMap; use hir::def_id::DefId; @@ -101,6 +102,78 @@ impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> { self.freshen_count += 1; self.infcx.tcx.mk_infer(freshener(index)) } + + fn freshen_closure_like(&mut self, + def_id: DefId, + substs: ty::ClosureSubsts<'tcx>, + t: Ty<'tcx>, + markers: M, + combine: C) + -> Ty<'tcx> + where M: FnOnce(&mut Self) -> (Ty<'tcx>, Ty<'tcx>), + C: FnOnce(&'tcx Substs<'tcx>) -> Ty<'tcx> + { + let tcx = self.infcx.tcx; + + let closure_in_progress = self.infcx.in_progress_tables.map_or(false, |tables| { + tcx.hir.as_local_node_id(def_id).map_or(false, |closure_id| { + tables.borrow().local_id_root == + Some(DefId::local(tcx.hir.node_to_hir_id(closure_id).owner)) + }) + }); + + if !closure_in_progress { + // If this closure belongs to another infcx, its kind etc. were + // fully inferred and its signature/kind are exactly what's listed + // in its infcx. So we don't need to add the markers for them. + return t.super_fold_with(self); + } + + // We are encoding a closure in progress. Because we want our freshening + // key to contain all inference information needed to make sense of our + // value, we need to encode the closure signature and kind. The way + // we do that is to add them as 2 variables to the closure substs, + // basically because it's there (and nobody cares about adding extra stuff + // to substs). + // + // This means the "freshened" closure substs ends up looking like + // fresh_substs = [PARENT_SUBSTS* ; UPVARS* ; SIG_MARKER ; KIND_MARKER] + let (marker_1, marker_2) = if self.closure_set.contains(&def_id) { + // We found the closure def-id within its own signature. Just + // leave a new freshened type - any matching operations would + // have found and compared the exterior closure already to + // get here. + // + // In that case, we already know what the signature would + // be - the parent closure on the stack already contains a + // "copy" of the signature, so there is no reason to encode + // it again for injectivity. Just use a fresh type variable + // to make everything comparable. + // + // For example (closure kinds omitted for clarity) + // t=[closure FOO sig=[closure BAR sig=[closure FOO ..]]] + // Would get encoded to + // t=[closure FOO sig=[closure BAR sig=[closure FOO sig=$0]]] + // + // and we can decode by having + // $0=[closure BAR {sig doesn't exist in decode}] + // and get + // t=[closure FOO] + // sig[FOO] = [closure BAR] + // sig[BAR] = [closure FOO] + (self.next_fresh(ty::FreshTy), self.next_fresh(ty::FreshTy)) + } else { + self.closure_set.push(def_id); + let markers = markers(self); + self.closure_set.pop(); + markers + }; + + combine(tcx.mk_substs( + substs.substs.iter().map(|k| k.fold_with(self)).chain( + [marker_1, marker_2].iter().cloned().map(From::from) + ))) + } } impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { @@ -177,79 +250,48 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { } ty::TyClosure(def_id, substs) => { - let closure_in_progress = self.infcx.in_progress_tables.map_or(false, |tables| { - tcx.hir.as_local_node_id(def_id).map_or(false, |closure_id| { - tables.borrow().local_id_root == - Some(DefId::local(tcx.hir.node_to_hir_id(closure_id).owner)) - }) - }); - - if !closure_in_progress { - // If this closure belongs to another infcx, its kind etc. were - // fully inferred and its signature/kind are exactly what's listed - // in its infcx. So we don't need to add the markers for them. - return t.super_fold_with(self); - } + self.freshen_closure_like( + def_id, substs, t, + |this| { + // HACK: use a "random" integer type to mark the kind. Because + // different closure kinds shouldn't get unified during + // selection, the "subtyping" relationship (where any kind is + // better than no kind) shouldn't matter here, just that the + // types are different. + let closure_kind = this.infcx.closure_kind(def_id); + let closure_kind_marker = match closure_kind { + None => tcx.types.i8, + Some(ty::ClosureKind::Fn) => tcx.types.i16, + Some(ty::ClosureKind::FnMut) => tcx.types.i32, + Some(ty::ClosureKind::FnOnce) => tcx.types.i64, + }; - // We are encoding a closure in progress. Because we want our freshening - // key to contain all inference information needed to make sense of our - // value, we need to encode the closure signature and kind. The way - // we do that is to add them as 2 variables to the closure substs, - // basically because it's there (and nobody cares about adding extra stuff - // to substs). - // - // This means the "freshened" closure substs ends up looking like - // fresh_substs = [PARENT_SUBSTS* ; UPVARS* ; SIG_MARKER ; KIND_MARKER] - - let closure_sig_marker = if self.closure_set.contains(&def_id) { - // We found the closure def-id within its own signature. Just - // leave a new freshened type - any matching operations would - // have found and compared the exterior closure already to - // get here. - // - // In that case, we already know what the signature would - // be - the parent closure on the stack already contains a - // "copy" of the signature, so there is no reason to encode - // it again for injectivity. Just use a fresh type variable - // to make everything comparable. - // - // For example (closure kinds omitted for clarity) - // t=[closure FOO sig=[closure BAR sig=[closure FOO ..]]] - // Would get encoded to - // t=[closure FOO sig=[closure BAR sig=[closure FOO sig=$0]]] - // - // and we can decode by having - // $0=[closure BAR {sig doesn't exist in decode}] - // and get - // t=[closure FOO] - // sig[FOO] = [closure BAR] - // sig[BAR] = [closure FOO] - self.next_fresh(ty::FreshTy) - } else { - self.closure_set.push(def_id); - let closure_sig = self.infcx.fn_sig(def_id); - let closure_sig_marker = tcx.mk_fn_ptr(closure_sig.fold_with(self)); - self.closure_set.pop(); - closure_sig_marker - }; - - // HACK: use a "random" integer type to mark the kind. Because different - // closure kinds shouldn't get unified during selection, the "subtyping" - // relationship (where any kind is better than no kind) shouldn't - // matter here, just that the types are different. - let closure_kind = self.infcx.closure_kind(def_id); - let closure_kind_marker = match closure_kind { - None => tcx.types.i8, - Some(ty::ClosureKind::Fn) => tcx.types.i16, - Some(ty::ClosureKind::FnMut) => tcx.types.i32, - Some(ty::ClosureKind::FnOnce) => tcx.types.i64, - }; + let closure_sig = this.infcx.fn_sig(def_id); + (tcx.mk_fn_ptr(closure_sig.fold_with(this)), + closure_kind_marker) + }, + |substs| tcx.mk_closure(def_id, substs) + ) + } - let params = tcx.mk_substs( - substs.substs.iter().map(|k| k.fold_with(self)).chain( - [closure_sig_marker, closure_kind_marker].iter().cloned().map(From::from) - )); - tcx.mk_closure(def_id, params) + ty::TyGenerator(def_id, substs, interior) => { + self.freshen_closure_like( + def_id, substs, t, + |this| { + let gen_sig = this.infcx.generator_sig(def_id).unwrap(); + // FIXME: want to revise this strategy when generator + // signatures can actually contain LBRs. + let sig = this.tcx().no_late_bound_regions(&gen_sig) + .unwrap_or_else(|| { + bug!("late-bound regions in signature of {:?}", + def_id) + }); + (sig.yield_ty, sig.return_ty).fold_with(this) + }, + |substs| { + tcx.mk_generator(def_id, ty::ClosureSubsts { substs }, interior) + } + ) } ty::TyBool | diff --git a/src/librustc/infer/higher_ranked/mod.rs b/src/librustc/infer/higher_ranked/mod.rs index 760d443f0e..0d02420457 100644 --- a/src/librustc/infer/higher_ranked/mod.rs +++ b/src/librustc/infer/higher_ranked/mod.rs @@ -13,9 +13,7 @@ use super::{CombinedSnapshot, InferCtxt, - LateBoundRegion, HigherRankedType, - RegionVariableOrigin, SubregionOrigin, SkolemizationMap}; use super::combine::CombineFields; @@ -29,15 +27,6 @@ use util::nodemap::{FxHashMap, FxHashSet}; pub struct HrMatchResult { pub value: U, - - /// Normally, when we do a higher-ranked match operation, we - /// expect all higher-ranked regions to be constrained as part of - /// the match operation. However, in the transition period for - /// #32330, it can happen that we sometimes have unconstrained - /// regions that get instantiated with fresh variables. In that - /// case, we collect the set of unconstrained bound regions here - /// and replace them with fresh variables. - pub unconstrained_regions: Vec, } impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { @@ -108,7 +97,6 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { /// that do not appear in `T`. If that happens, those regions are /// unconstrained, and this routine replaces them with `'static`. pub fn higher_ranked_match(&mut self, - span: Span, a_pair: &Binder<(T, U)>, b_match: &T, a_is_expected: bool) @@ -158,28 +146,16 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // be any region from the sets above, except for other members of // `skol_map`. There should always be a representative if things // are properly well-formed. - let mut unconstrained_regions = vec![]; let skol_representatives: FxHashMap<_, _> = skol_resolution_map .iter() - .map(|(&skol, &(br, ref regions))| { + .map(|(&skol, &(_, ref regions))| { let representative = regions.iter() .filter(|&&r| !skol_resolution_map.contains_key(r)) .cloned() .next() - .unwrap_or_else(|| { // [1] - unconstrained_regions.push(br); - self.infcx.next_region_var( - LateBoundRegion(span, br, HigherRankedType)) - }); - - // [1] There should always be a representative, - // unless the higher-ranked region did not appear - // in the values being matched. We should reject - // as ill-formed cases that can lead to this, but - // right now we sometimes issue warnings (see - // #32330). + .expect("no representative region"); (skol, representative) }) @@ -216,10 +192,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // We are now done with these skolemized variables. self.infcx.pop_skolemized(skol_map, snapshot); - Ok(HrMatchResult { - value: a_value, - unconstrained_regions, - }) + Ok(HrMatchResult { value: a_value }) }); } @@ -657,28 +630,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { skol_br, tainted_region); - let issue_32330 = if let &ty::ReVar(vid) = tainted_region { - match self.region_vars.var_origin(vid) { - RegionVariableOrigin::EarlyBoundRegion(_, _, issue_32330) => { - issue_32330.map(Box::new) - } - _ => None - } - } else { - None - }; - - if overly_polymorphic { + return Err(if overly_polymorphic { debug!("Overly polymorphic!"); - return Err(TypeError::RegionsOverlyPolymorphic(skol_br, - tainted_region, - issue_32330)); + TypeError::RegionsOverlyPolymorphic(skol_br, tainted_region) } else { debug!("Not as polymorphic!"); - return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br, - tainted_region, - issue_32330)); - } + TypeError::RegionsInsufficientlyPolymorphic(skol_br, tainted_region) + }) } } diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs index 453a8777ad..39bcd70357 100644 --- a/src/librustc/infer/mod.rs +++ b/src/librustc/infer/mod.rs @@ -20,7 +20,7 @@ pub use self::region_inference::{GenericKind, VerifyBound}; use hir::def_id::DefId; use middle::free_region::{FreeRegionMap, RegionRelations}; -use middle::region::RegionMaps; +use middle::region; use middle::lang_items; use mir::tcx::LvalueTy; use ty::subst::{Kind, Subst, Substs}; @@ -299,7 +299,7 @@ pub enum RegionVariableOrigin { Coercion(Span), // Region variables created as the values for early-bound regions - EarlyBoundRegion(Span, ast::Name, Option), + EarlyBoundRegion(Span, ast::Name), // Region variables created for bound regions // in a function or method that is called @@ -442,6 +442,7 @@ macro_rules! impl_trans_normalize { impl_trans_normalize!('gcx, Ty<'gcx>, + &'gcx ty::Const<'gcx>, &'gcx Substs<'gcx>, ty::FnSig<'gcx>, ty::PolyFnSig<'gcx>, @@ -493,7 +494,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { let param_env = ty::ParamEnv::empty(Reveal::All); let value = self.erase_regions(value); - if !value.has_projection_types() { + if !value.has_projections() { return value; } @@ -515,7 +516,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { let value = self.erase_regions(value); - if !value.has_projection_types() { + if !value.has_projections() { return value; } @@ -643,7 +644,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } - pub fn unsolved_variables(&self) -> Vec> { + pub fn unsolved_variables(&self) -> Vec> { let mut variables = Vec::new(); let unbound_ty_vars = self.type_variables @@ -989,7 +990,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span: Span, def: &ty::RegionParameterDef) -> ty::Region<'tcx> { - self.next_region_var(EarlyBoundRegion(span, def.name, def.issue_32330)) + self.next_region_var(EarlyBoundRegion(span, def.name)) } /// Create a type inference variable for the given @@ -1070,7 +1071,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn resolve_regions_and_report_errors(&self, region_context: DefId, - region_map: &RegionMaps, + region_map: ®ion::ScopeTree, free_regions: &FreeRegionMap<'tcx>) { let region_rels = RegionRelations::new(self.tcx, region_context, @@ -1084,7 +1085,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // this infcx was in use. This is totally hokey but // otherwise we have a hard time separating legit region // errors from silly ones. - self.report_region_errors(&errors); // see error_reporting module + self.report_region_errors(region_map, &errors); // see error_reporting module } } @@ -1290,14 +1291,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { -> InferResult<'tcx, HrMatchResult>> { let match_pair = match_a.map_bound(|p| (p.projection_ty.trait_ref(self.tcx), p.ty)); - let span = cause.span; let trace = TypeTrace { cause, values: TraitRefs(ExpectedFound::new(true, match_pair.skip_binder().0, match_b)) }; let mut combine = self.combine_fields(trace, param_env); - let result = combine.higher_ranked_match(span, &match_pair, &match_b, true)?; + let result = combine.higher_ranked_match(&match_pair, &match_b, true)?; Ok(InferOk { value: result, obligations: combine.obligations }) } @@ -1376,6 +1376,19 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.fn_sig(def_id) } + + pub fn generator_sig(&self, def_id: DefId) -> Option> { + if let Some(tables) = self.in_progress_tables { + if let Some(id) = self.tcx.hir.as_local_node_id(def_id) { + let hir_id = self.tcx.hir.node_to_hir_id(id); + if let Some(&ty) = tables.borrow().generator_sigs().get(hir_id) { + return ty.map(|t| ty::Binder(t)); + } + } + } + + self.tcx.generator_sig(def_id) + } } impl<'a, 'gcx, 'tcx> TypeTrace<'tcx> { diff --git a/src/librustc/infer/region_inference/graphviz.rs b/src/librustc/infer/region_inference/graphviz.rs index 81a8984e75..5cf6aa350b 100644 --- a/src/librustc/infer/region_inference/graphviz.rs +++ b/src/librustc/infer/region_inference/graphviz.rs @@ -21,7 +21,7 @@ use graphviz as dot; use hir::def_id::DefIndex; use ty; use middle::free_region::RegionRelations; -use middle::region::CodeExtent; +use middle::region; use super::Constraint; use infer::SubregionOrigin; use infer::region_inference::RegionVarBindings; @@ -136,7 +136,7 @@ enum Node { #[derive(Clone, PartialEq, Eq, Debug, Copy)] enum Edge<'tcx> { Constraint(Constraint<'tcx>), - EnclScope(CodeExtent, CodeExtent), + EnclScope(region::Scope, region::Scope), } impl<'a, 'gcx, 'tcx> ConstraintGraph<'a, 'gcx, 'tcx> { @@ -159,7 +159,7 @@ impl<'a, 'gcx, 'tcx> ConstraintGraph<'a, 'gcx, 'tcx> { add_node(n2); } - region_rels.region_maps.each_encl_scope(|sub, sup| { + region_rels.region_scope_tree.each_encl_scope(|sub, sup| { add_node(Node::Region(ty::ReScope(sub))); add_node(Node::Region(ty::ReScope(sup))); }); @@ -245,7 +245,9 @@ impl<'a, 'gcx, 'tcx> dot::GraphWalk<'a> for ConstraintGraph<'a, 'gcx, 'tcx> { fn edges(&self) -> dot::Edges> { debug!("constraint graph has {} edges", self.map.len()); let mut v: Vec<_> = self.map.keys().map(|e| Edge::Constraint(*e)).collect(); - self.region_rels.region_maps.each_encl_scope(|sub, sup| v.push(Edge::EnclScope(sub, sup))); + self.region_rels.region_scope_tree.each_encl_scope(|sub, sup| { + v.push(Edge::EnclScope(sub, sup)) + }); debug!("region graph has {} edges", v.len()); Cow::Owned(v) } diff --git a/src/librustc/infer/region_inference/mod.rs b/src/librustc/infer/region_inference/mod.rs index 5588b6d9ad..8351be4907 100644 --- a/src/librustc/infer/region_inference/mod.rs +++ b/src/librustc/infer/region_inference/mod.rs @@ -935,14 +935,14 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { // reasonably compare free regions and scopes: let fr_scope = match (a, b) { (&ReEarlyBound(ref br), _) | (_, &ReEarlyBound(ref br)) => { - region_rels.region_maps.early_free_extent(self.tcx, br) + region_rels.region_scope_tree.early_free_scope(self.tcx, br) } (&ReFree(ref fr), _) | (_, &ReFree(ref fr)) => { - region_rels.region_maps.free_extent(self.tcx, fr) + region_rels.region_scope_tree.free_scope(self.tcx, fr) } _ => bug!() }; - let r_id = region_rels.region_maps.nearest_common_ancestor(fr_scope, s_id); + let r_id = region_rels.region_scope_tree.nearest_common_ancestor(fr_scope, s_id); if r_id == fr_scope { // if the free region's scope `fr.scope` is bigger than // the scope region `s_id`, then the LUB is the free @@ -963,7 +963,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { // The region corresponding to an outer block is a // subtype of the region corresponding to an inner // block. - let lub = region_rels.region_maps.nearest_common_ancestor(a_id, b_id); + let lub = region_rels.region_scope_tree.nearest_common_ancestor(a_id, b_id); self.tcx.mk_region(ReScope(lub)) } diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 82f01c36fe..015dbbb7af 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -8,7 +8,28 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! The Rust compiler. +//! The "main crate" of the Rust compiler. This crate contains common +//! type definitions that are used by the other crates in the rustc +//! "family". Some prominent examples (note that each of these modules +//! has their own README with further details). +//! +//! - **HIR.** The "high-level (H) intermediate representation (IR)" is +//! defined in the `hir` module. +//! - **MIR.** The "mid-level (M) intermediate representation (IR)" is +//! defined in the `mir` module. This module contains only the +//! *definition* of the MIR; the passes that transform and operate +//! on MIR are found in `librustc_mir` crate. +//! - **Types.** The internal representation of types used in rustc is +//! defined in the `ty` module. This includes the **type context** +//! (or `tcx`), which is the central context during most of +//! compilation, containing the interners and other things. +//! - **Traits.** Trait resolution is implemented in the `traits` module. +//! - **Type inference.** The type inference code can be found in the `infer` module; +//! this code handles low-level equality and subtyping operations. The +//! type check pass in the compiler is found in the `librustc_typeck` crate. +//! +//! For a deeper explanation of how the compiler works and is +//! organized, see the README.md file in this directory. //! //! # Note //! @@ -36,9 +57,13 @@ #![feature(trace_macros)] #![feature(test)] +#![cfg_attr(stage0, feature(const_fn))] +#![cfg_attr(not(stage0), feature(const_atomic_bool_new))] + #![recursion_limit="256"] extern crate arena; +#[macro_use] extern crate bitflags; extern crate core; extern crate fmt_macros; extern crate getopts; @@ -47,14 +72,13 @@ extern crate graphviz; extern crate libc; extern crate owning_ref; extern crate rustc_back; -extern crate rustc_data_structures; +#[macro_use] extern crate rustc_data_structures; extern crate serialize; extern crate rustc_const_math; extern crate rustc_errors as errors; #[macro_use] extern crate log; #[macro_use] extern crate syntax; extern crate syntax_pos; -#[macro_use] #[no_link] extern crate rustc_bitflags; extern crate jobserver; extern crate serialize as rustc_serialize; // used by deriving @@ -88,8 +112,8 @@ pub mod middle { pub mod dataflow; pub mod dead; pub mod dependency_format; - pub mod effect; pub mod entry; + pub mod exported_symbols; pub mod free_region; pub mod intrinsicck; pub mod lang_items; @@ -101,6 +125,7 @@ pub mod middle { pub mod recursion_limit; pub mod resolve_lifetime; pub mod stability; + pub mod trans; pub mod weak_lang_items; } diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs index cbe642a9a7..5fe75d8ca7 100644 --- a/src/librustc/lint/builtin.rs +++ b/src/librustc/lint/builtin.rs @@ -216,6 +216,12 @@ declare_lint! { "detects use of deprecated items" } +declare_lint! { + pub UNUSED_UNSAFE, + Warn, + "unnecessary use of an `unsafe` block" +} + /// Does nothing as a lint pass, but registers some `Lint`s /// which are used by other parts of the compiler. #[derive(Copy, Clone)] @@ -256,7 +262,8 @@ impl LintPass for HardwiredLints { MISSING_FRAGMENT_SPECIFIER, PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES, LATE_BOUND_LIFETIME_ARGUMENTS, - DEPRECATED + DEPRECATED, + UNUSED_UNSAFE ) } } diff --git a/src/librustc/lint/levels.rs b/src/librustc/lint/levels.rs index ab086e5b8e..4bc37747f2 100644 --- a/src/librustc/lint/levels.rs +++ b/src/librustc/lint/levels.rs @@ -12,9 +12,12 @@ use std::cmp; use errors::DiagnosticBuilder; use hir::HirId; +use ich::StableHashingContext; use lint::builtin; use lint::context::CheckLintNameResult; use lint::{self, Lint, LintId, Level, LintSource}; +use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, + StableHasher, StableHasherResult}; use session::Session; use syntax::ast; use syntax::attr; @@ -247,13 +250,27 @@ impl<'a> LintLevelsBuilder<'a> { self.cur, Some(&specs)); let msg = format!("unknown lint: `{}`", name); - lint::struct_lint_level(self.sess, + let mut db = lint::struct_lint_level(self.sess, lint, level, src, Some(li.span.into()), - &msg) - .emit(); + &msg); + if name.as_str().chars().any(|c| c.is_uppercase()) { + let name_lower = name.as_str().to_lowercase(); + if let CheckLintNameResult::NoLint = + store.check_lint_name(&name_lower) { + db.emit(); + } else { + db.span_suggestion( + li.span, + "lowercase the lint name", + name_lower + ).emit(); + } + } else { + db.emit(); + } } } } @@ -367,4 +384,68 @@ impl LintLevelMap { self.sets.get_lint_level(lint, *idx, None) }) } + + /// Returns if this `id` has lint level information. + pub fn lint_level_set(&self, id: HirId) -> Option { + self.id_to_set.get(&id).cloned() + } +} + +impl<'gcx> HashStable> for LintLevelMap { + #[inline] + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + let LintLevelMap { + ref sets, + ref id_to_set, + } = *self; + + id_to_set.hash_stable(hcx, hasher); + + let LintLevelSets { + ref list, + lint_cap, + } = *sets; + + lint_cap.hash_stable(hcx, hasher); + + hcx.while_hashing_spans(true, |hcx| { + list.len().hash_stable(hcx, hasher); + + // We are working under the assumption here that the list of + // lint-sets is built in a deterministic order. + for lint_set in list { + ::std::mem::discriminant(lint_set).hash_stable(hcx, hasher); + + match *lint_set { + LintSet::CommandLine { ref specs } => { + specs.hash_stable(hcx, hasher); + } + LintSet::Node { ref specs, parent } => { + specs.hash_stable(hcx, hasher); + parent.hash_stable(hcx, hasher); + } + } + } + }) + } +} + +impl HashStable for LintId { + #[inline] + fn hash_stable(&self, + hcx: &mut HCX, + hasher: &mut StableHasher) { + self.lint_name_raw().hash_stable(hcx, hasher); + } +} + +impl ToStableHashKey for LintId { + type KeyType = &'static str; + + #[inline] + fn to_stable_hash_key(&self, _: &HCX) -> &'static str { + self.lint_name_raw() + } } diff --git a/src/librustc/lint/mod.rs b/src/librustc/lint/mod.rs index c64e1c0808..42b5e2dd83 100644 --- a/src/librustc/lint/mod.rs +++ b/src/librustc/lint/mod.rs @@ -305,6 +305,10 @@ impl LintId { } } + pub fn lint_name_raw(&self) -> &'static str { + self.lint.name + } + /// Get the name of the lint. pub fn to_string(&self) -> String { self.lint.name_lower() @@ -317,6 +321,13 @@ pub enum Level { Allow, Warn, Deny, Forbid } +impl_stable_hash_for!(enum self::Level { + Allow, + Warn, + Deny, + Forbid +}); + impl Level { /// Convert a level to a lower-case string. pub fn as_str(self) -> &'static str { @@ -354,6 +365,12 @@ pub enum LintSource { CommandLine(Symbol), } +impl_stable_hash_for!(enum self::LintSource { + Default, + Node(name, span), + CommandLine(text) +}); + pub type LevelSource = (Level, LintSource); pub mod builtin; diff --git a/src/librustc/macros.rs b/src/librustc/macros.rs index f3d66b49de..f0285d6a93 100644 --- a/src/librustc/macros.rs +++ b/src/librustc/macros.rs @@ -73,10 +73,10 @@ macro_rules! __impl_stable_hash_field { #[macro_export] macro_rules! impl_stable_hash_for { (enum $enum_name:path { $( $variant:ident $( ( $($arg:ident),* ) )* ),* }) => { - impl<'a, 'tcx, 'lcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a, 'tcx, 'lcx>> for $enum_name { + impl<'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'tcx>> for $enum_name { #[inline] fn hash_stable(&self, - __ctx: &mut $crate::ich::StableHashingContext<'a, 'tcx, 'lcx>, + __ctx: &mut $crate::ich::StableHashingContext<'tcx>, __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { use $enum_name::*; ::std::mem::discriminant(self).hash_stable(__ctx, __hasher); @@ -92,10 +92,10 @@ macro_rules! impl_stable_hash_for { } }; (struct $struct_name:path { $($field:ident),* }) => { - impl<'a, 'tcx, 'lcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a, 'tcx, 'lcx>> for $struct_name { + impl<'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'tcx>> for $struct_name { #[inline] fn hash_stable(&self, - __ctx: &mut $crate::ich::StableHashingContext<'a, 'tcx, 'lcx>, + __ctx: &mut $crate::ich::StableHashingContext<'tcx>, __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { let $struct_name { $(ref $field),* @@ -106,10 +106,10 @@ macro_rules! impl_stable_hash_for { } }; (tuple_struct $struct_name:path { $($field:ident),* }) => { - impl<'a, 'tcx, 'lcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a, 'tcx, 'lcx>> for $struct_name { + impl<'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'tcx>> for $struct_name { #[inline] fn hash_stable(&self, - __ctx: &mut $crate::ich::StableHashingContext<'a, 'tcx, 'lcx>, + __ctx: &mut $crate::ich::StableHashingContext<'tcx>, __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { let $struct_name ( $(ref $field),* @@ -125,11 +125,11 @@ macro_rules! impl_stable_hash_for { macro_rules! impl_stable_hash_for_spanned { ($T:path) => ( - impl<'a, 'tcx, 'lcx> HashStable> for ::syntax::codemap::Spanned<$T> + impl<'tcx> HashStable> for ::syntax::codemap::Spanned<$T> { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'tcx, 'lcx>, + hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher) { self.node.hash_stable(hcx, hasher); self.span.hash_stable(hcx, hasher); diff --git a/src/librustc/middle/const_val.rs b/src/librustc/middle/const_val.rs index b6b1648f39..7b23998046 100644 --- a/src/librustc/middle/const_val.rs +++ b/src/librustc/middle/const_val.rs @@ -8,64 +8,66 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use self::ConstVal::*; pub use rustc_const_math::ConstInt; -use hir; -use hir::def::Def; use hir::def_id::DefId; -use traits::Reveal; use ty::{self, TyCtxt, layout}; use ty::subst::Substs; -use util::common::ErrorReported; use rustc_const_math::*; use graphviz::IntoCow; use errors::DiagnosticBuilder; +use serialize::{self, Encodable, Encoder, Decodable, Decoder}; use syntax::symbol::InternedString; use syntax::ast; use syntax_pos::Span; use std::borrow::Cow; -use std::collections::BTreeMap; -use std::rc::Rc; -pub type EvalResult<'tcx> = Result, ConstEvalErr<'tcx>>; +pub type EvalResult<'tcx> = Result<&'tcx ty::Const<'tcx>, ConstEvalErr<'tcx>>; -#[derive(Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq)] pub enum ConstVal<'tcx> { - Float(ConstFloat), Integral(ConstInt), + Float(ConstFloat), Str(InternedString), - ByteStr(Rc>), + ByteStr(ByteArray<'tcx>), Bool(bool), Char(char), Variant(DefId), Function(DefId, &'tcx Substs<'tcx>), - Struct(BTreeMap>), - Tuple(Vec>), - Array(Vec>), - Repeat(Box>, u64), + Aggregate(ConstAggregate<'tcx>), + Unevaluated(DefId, &'tcx Substs<'tcx>), } -impl<'tcx> ConstVal<'tcx> { - pub fn description(&self) -> &'static str { - match *self { - Float(f) => f.description(), - Integral(i) => i.description(), - Str(_) => "string literal", - ByteStr(_) => "byte string literal", - Bool(_) => "boolean", - Char(..) => "char", - Variant(_) => "enum variant", - Struct(_) => "struct", - Tuple(_) => "tuple", - Function(..) => "function definition", - Array(..) => "array", - Repeat(..) => "repeat", - } +#[derive(Copy, Clone, Debug, Hash, RustcEncodable, Eq, PartialEq)] +pub struct ByteArray<'tcx> { + pub data: &'tcx [u8], +} + +impl<'tcx> serialize::UseSpecializedDecodable for ByteArray<'tcx> {} + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum ConstAggregate<'tcx> { + Struct(&'tcx [(ast::Name, &'tcx ty::Const<'tcx>)]), + Tuple(&'tcx [&'tcx ty::Const<'tcx>]), + Array(&'tcx [&'tcx ty::Const<'tcx>]), + Repeat(&'tcx ty::Const<'tcx>, u64), +} + +impl<'tcx> Encodable for ConstAggregate<'tcx> { + fn encode(&self, _: &mut S) -> Result<(), S::Error> { + bug!("should never encode ConstAggregate::{:?}", self) + } +} + +impl<'tcx> Decodable for ConstAggregate<'tcx> { + fn decode(_: &mut D) -> Result { + bug!("should never decode ConstAggregate") } +} +impl<'tcx> ConstVal<'tcx> { pub fn to_const_int(&self) -> Option { match *self { ConstVal::Integral(i) => Some(i), @@ -86,8 +88,6 @@ pub struct ConstEvalErr<'tcx> { pub enum ErrKind<'tcx> { CannotCast, MissingStructField, - NegateOn(ConstVal<'tcx>), - NotOn(ConstVal<'tcx>), NonConstPath, UnimplementedConstVal(&'static str), @@ -146,9 +146,6 @@ impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> { match self.kind { CannotCast => simple!("can't cast this type"), - NegateOn(ref const_val) => simple!("negate on {}", const_val.description()), - NotOn(ref const_val) => simple!("not on {}", const_val.description()), - MissingStructField => simple!("nonexistent struct field"), NonConstPath => simple!("non-constant path in constant expression"), UnimplementedConstVal(what) => @@ -221,37 +218,3 @@ impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> { self.struct_error(tcx, primary_span, primary_kind).emit(); } } - -/// Returns the value of the length-valued expression -pub fn eval_length(tcx: TyCtxt, - count: hir::BodyId, - reason: &str) - -> Result -{ - let count_expr = &tcx.hir.body(count).value; - let count_def_id = tcx.hir.body_owner_def_id(count); - let param_env = ty::ParamEnv::empty(Reveal::UserFacing); - let substs = Substs::identity_for_item(tcx.global_tcx(), count_def_id); - match tcx.at(count_expr.span).const_eval(param_env.and((count_def_id, substs))) { - Ok(Integral(Usize(count))) => { - let val = count.as_u64(tcx.sess.target.uint_type); - assert_eq!(val as usize as u64, val); - Ok(val as usize) - }, - Ok(_) | - Err(ConstEvalErr { kind: ErrKind::TypeckError, .. }) => Err(ErrorReported), - Err(err) => { - let mut diag = err.struct_error(tcx, count_expr.span, reason); - - if let hir::ExprPath(hir::QPath::Resolved(None, ref path)) = count_expr.node { - if let Def::Local(..) = path.def { - diag.note(&format!("`{}` is a variable", - tcx.hir.node_to_pretty_string(count_expr.id))); - } - } - - diag.emit(); - Err(ErrorReported) - } - } -} diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 288390cb5e..a97bfa0536 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -22,19 +22,20 @@ //! are *mostly* used as a part of that interface, but these should //! probably get a better home if someone can find one. +use hir; use hir::def; -use hir::def_id::{CrateNum, DefId, DefIndex}; +use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE}; use hir::map as hir_map; use hir::map::definitions::{Definitions, DefKey, DefPathTable}; use hir::svh::Svh; use ich; -use middle::lang_items; use ty::{self, TyCtxt}; use session::Session; use session::search_paths::PathKind; -use util::nodemap::{NodeSet, DefIdMap}; +use util::nodemap::NodeSet; use std::any::Any; +use std::collections::BTreeMap; use std::path::{Path, PathBuf}; use std::rc::Rc; use owning_ref::ErasedBoxRef; @@ -43,8 +44,6 @@ use syntax::ext::base::SyntaxExtension; use syntax::symbol::Symbol; use syntax_pos::Span; use rustc_back::target::Target; -use hir; -use rustc_back::PanicStrategy; pub use self::NativeLibraryKind::*; @@ -135,7 +134,7 @@ pub struct NativeLibrary { pub kind: NativeLibraryKind, pub name: Symbol, pub cfg: Option, - pub foreign_items: Vec, + pub foreign_items: Vec, } pub enum LoadedMacro { @@ -221,77 +220,68 @@ pub trait MetadataLoader { -> Result, String>; } +#[derive(Clone)] +pub struct ExternConstBody<'tcx> { + pub body: &'tcx hir::Body, + + // It would require a lot of infrastructure to enable stable-hashing Bodies + // from other crates, so we hash on export and just store the fingerprint + // with them. + pub fingerprint: ich::Fingerprint, +} + +#[derive(Clone)] +pub struct ExternBodyNestedBodies { + pub nested_bodies: Rc>, + + // It would require a lot of infrastructure to enable stable-hashing Bodies + // from other crates, so we hash on export and just store the fingerprint + // with them. + pub fingerprint: ich::Fingerprint, +} + /// A store of Rust crates, through with their metadata /// can be accessed. +/// +/// Note that this trait should probably not be expanding today. All new +/// functionality should be driven through queries instead! +/// +/// If you find a method on this trait named `{name}_untracked` it signifies +/// that it's *not* tracked for dependency information throughout compilation +/// (it'd break incremental compilation) and should only be called pre-HIR (e.g. +/// during resolve) pub trait CrateStore { fn crate_data_as_rc_any(&self, krate: CrateNum) -> Rc; // access to the metadata loader fn metadata_loader(&self) -> &MetadataLoader; - // item info - fn visibility(&self, def: DefId) -> ty::Visibility; - fn visible_parent_map<'a>(&'a self, sess: &Session) -> ::std::cell::Ref<'a, DefIdMap>; - fn item_generics_cloned(&self, def: DefId) -> ty::Generics; - - // trait info - fn implementations_of_trait(&self, filter: Option) -> Vec; - - // impl info - fn impl_defaultness(&self, def: DefId) -> hir::Defaultness; - - // trait/impl-item info - fn associated_item_cloned(&self, def: DefId) -> ty::AssociatedItem; - - // flags - fn is_dllimport_foreign_item(&self, def: DefId) -> bool; - fn is_statically_included_foreign_item(&self, def_id: DefId) -> bool; - - // crate metadata - fn dep_kind(&self, cnum: CrateNum) -> DepKind; - fn export_macros(&self, cnum: CrateNum); - fn lang_items(&self, cnum: CrateNum) -> Vec<(DefIndex, usize)>; - fn missing_lang_items(&self, cnum: CrateNum) -> Vec; - fn is_compiler_builtins(&self, cnum: CrateNum) -> bool; - fn is_sanitizer_runtime(&self, cnum: CrateNum) -> bool; - fn is_profiler_runtime(&self, cnum: CrateNum) -> bool; - fn panic_strategy(&self, cnum: CrateNum) -> PanicStrategy; - /// The name of the crate as it is referred to in source code of the current - /// crate. - fn crate_name(&self, cnum: CrateNum) -> Symbol; - /// The name of the crate as it is stored in the crate's metadata. - fn original_crate_name(&self, cnum: CrateNum) -> Symbol; - fn crate_hash(&self, cnum: CrateNum) -> Svh; - fn crate_disambiguator(&self, cnum: CrateNum) -> Symbol; - fn plugin_registrar_fn(&self, cnum: CrateNum) -> Option; - fn derive_registrar_fn(&self, cnum: CrateNum) -> Option; - fn native_libraries(&self, cnum: CrateNum) -> Vec; - fn exported_symbols(&self, cnum: CrateNum) -> Vec; - fn is_no_builtins(&self, cnum: CrateNum) -> bool; - // resolve fn def_key(&self, def: DefId) -> DefKey; fn def_path(&self, def: DefId) -> hir_map::DefPath; fn def_path_hash(&self, def: DefId) -> hir_map::DefPathHash; fn def_path_table(&self, cnum: CrateNum) -> Rc; - fn struct_field_names(&self, def: DefId) -> Vec; - fn item_children(&self, did: DefId, sess: &Session) -> Vec; - fn load_macro(&self, did: DefId, sess: &Session) -> LoadedMacro; - // misc. metadata - fn item_body<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> &'tcx hir::Body; + // "queries" used in resolve that aren't tracked for incremental compilation + fn visibility_untracked(&self, def: DefId) -> ty::Visibility; + fn export_macros_untracked(&self, cnum: CrateNum); + fn dep_kind_untracked(&self, cnum: CrateNum) -> DepKind; + fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol; + fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> Symbol; + fn crate_hash_untracked(&self, cnum: CrateNum) -> Svh; + fn struct_field_names_untracked(&self, def: DefId) -> Vec; + fn item_children_untracked(&self, did: DefId, sess: &Session) -> Vec; + fn load_macro_untracked(&self, did: DefId, sess: &Session) -> LoadedMacro; + fn extern_mod_stmt_cnum_untracked(&self, emod_id: ast::NodeId) -> Option; + fn item_generics_cloned_untracked(&self, def: DefId) -> ty::Generics; + fn associated_item_cloned_untracked(&self, def: DefId) -> ty::AssociatedItem; + fn postorder_cnums_untracked(&self) -> Vec; // This is basically a 1-based range of ints, which is a little // silly - I may fix that. - fn crates(&self) -> Vec; - fn used_libraries(&self) -> Vec; - fn used_link_args(&self) -> Vec; + fn crates_untracked(&self) -> Vec; // utility functions - fn used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, LibSource)>; - fn used_crate_source(&self, cnum: CrateNum) -> CrateSource; - fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option; fn encode_metadata<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, @@ -336,57 +326,22 @@ impl CrateStore for DummyCrateStore { fn crate_data_as_rc_any(&self, krate: CrateNum) -> Rc { bug!("crate_data_as_rc_any") } // item info - fn visibility(&self, def: DefId) -> ty::Visibility { bug!("visibility") } - fn visible_parent_map<'a>(&'a self, session: &Session) - -> ::std::cell::Ref<'a, DefIdMap> - { - bug!("visible_parent_map") - } - fn item_generics_cloned(&self, def: DefId) -> ty::Generics + fn visibility_untracked(&self, def: DefId) -> ty::Visibility { bug!("visibility") } + fn item_generics_cloned_untracked(&self, def: DefId) -> ty::Generics { bug!("item_generics_cloned") } - // trait info - fn implementations_of_trait(&self, filter: Option) -> Vec { vec![] } - - // impl info - fn impl_defaultness(&self, def: DefId) -> hir::Defaultness { bug!("impl_defaultness") } - // trait/impl-item info - fn associated_item_cloned(&self, def: DefId) -> ty::AssociatedItem + fn associated_item_cloned_untracked(&self, def: DefId) -> ty::AssociatedItem { bug!("associated_item_cloned") } - // flags - fn is_dllimport_foreign_item(&self, id: DefId) -> bool { false } - fn is_statically_included_foreign_item(&self, def_id: DefId) -> bool { false } - // crate metadata - fn lang_items(&self, cnum: CrateNum) -> Vec<(DefIndex, usize)> - { bug!("lang_items") } - fn missing_lang_items(&self, cnum: CrateNum) -> Vec - { bug!("missing_lang_items") } - fn dep_kind(&self, cnum: CrateNum) -> DepKind { bug!("is_explicitly_linked") } - fn export_macros(&self, cnum: CrateNum) { bug!("export_macros") } - fn is_compiler_builtins(&self, cnum: CrateNum) -> bool { bug!("is_compiler_builtins") } - fn is_profiler_runtime(&self, cnum: CrateNum) -> bool { bug!("is_profiler_runtime") } - fn is_sanitizer_runtime(&self, cnum: CrateNum) -> bool { bug!("is_sanitizer_runtime") } - fn panic_strategy(&self, cnum: CrateNum) -> PanicStrategy { - bug!("panic_strategy") + fn dep_kind_untracked(&self, cnum: CrateNum) -> DepKind { bug!("is_explicitly_linked") } + fn export_macros_untracked(&self, cnum: CrateNum) { bug!("export_macros") } + fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol { bug!("crate_name") } + fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> Symbol { + bug!("crate_disambiguator") } - fn crate_name(&self, cnum: CrateNum) -> Symbol { bug!("crate_name") } - fn original_crate_name(&self, cnum: CrateNum) -> Symbol { - bug!("original_crate_name") - } - fn crate_hash(&self, cnum: CrateNum) -> Svh { bug!("crate_hash") } - fn crate_disambiguator(&self, cnum: CrateNum) - -> Symbol { bug!("crate_disambiguator") } - fn plugin_registrar_fn(&self, cnum: CrateNum) -> Option - { bug!("plugin_registrar_fn") } - fn derive_registrar_fn(&self, cnum: CrateNum) -> Option - { bug!("derive_registrar_fn") } - fn native_libraries(&self, cnum: CrateNum) -> Vec - { bug!("native_libraries") } - fn exported_symbols(&self, cnum: CrateNum) -> Vec { bug!("exported_symbols") } - fn is_no_builtins(&self, cnum: CrateNum) -> bool { bug!("is_no_builtins") } + fn crate_hash_untracked(&self, cnum: CrateNum) -> Svh { bug!("crate_hash") } // resolve fn def_key(&self, def: DefId) -> DefKey { bug!("def_key") } @@ -399,29 +354,18 @@ impl CrateStore for DummyCrateStore { fn def_path_table(&self, cnum: CrateNum) -> Rc { bug!("def_path_table") } - fn struct_field_names(&self, def: DefId) -> Vec { bug!("struct_field_names") } - fn item_children(&self, did: DefId, sess: &Session) -> Vec { - bug!("item_children") + fn struct_field_names_untracked(&self, def: DefId) -> Vec { + bug!("struct_field_names") } - fn load_macro(&self, did: DefId, sess: &Session) -> LoadedMacro { bug!("load_macro") } - - // misc. metadata - fn item_body<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> &'tcx hir::Body { - bug!("item_body") + fn item_children_untracked(&self, did: DefId, sess: &Session) -> Vec { + bug!("item_children") } + fn load_macro_untracked(&self, did: DefId, sess: &Session) -> LoadedMacro { bug!("load_macro") } - // This is basically a 1-based range of ints, which is a little - // silly - I may fix that. - fn crates(&self) -> Vec { vec![] } - fn used_libraries(&self) -> Vec { vec![] } - fn used_link_args(&self) -> Vec { vec![] } + fn crates_untracked(&self) -> Vec { vec![] } // utility functions - fn used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, LibSource)> - { vec![] } - fn used_crate_source(&self, cnum: CrateNum) -> CrateSource { bug!("used_crate_source") } - fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option { None } + fn extern_mod_stmt_cnum_untracked(&self, emod_id: ast::NodeId) -> Option { None } fn encode_metadata<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, @@ -430,6 +374,7 @@ impl CrateStore for DummyCrateStore { bug!("encode_metadata") } fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") } + fn postorder_cnums_untracked(&self) -> Vec { bug!("postorder_cnums_untracked") } // access to the metadata loader fn metadata_loader(&self) -> &MetadataLoader { bug!("metadata_loader") } @@ -439,3 +384,48 @@ pub trait CrateLoader { fn process_item(&mut self, item: &ast::Item, defs: &Definitions); fn postprocess(&mut self, krate: &ast::Crate); } + +// This method is used when generating the command line to pass through to +// system linker. The linker expects undefined symbols on the left of the +// command line to be defined in libraries on the right, not the other way +// around. For more info, see some comments in the add_used_library function +// below. +// +// In order to get this left-to-right dependency ordering, we perform a +// topological sort of all crates putting the leaves at the right-most +// positions. +pub fn used_crates(tcx: TyCtxt, prefer: LinkagePreference) + -> Vec<(CrateNum, LibSource)> +{ + let mut libs = tcx.crates() + .iter() + .cloned() + .filter_map(|cnum| { + if tcx.dep_kind(cnum).macros_only() { + return None + } + let source = tcx.used_crate_source(cnum); + let path = match prefer { + LinkagePreference::RequireDynamic => source.dylib.clone().map(|p| p.0), + LinkagePreference::RequireStatic => source.rlib.clone().map(|p| p.0), + }; + let path = match path { + Some(p) => LibSource::Some(p), + None => { + if source.rmeta.is_some() { + LibSource::MetadataOnly + } else { + LibSource::None + } + } + }; + Some((cnum, path)) + }) + .collect::>(); + let mut ordering = tcx.postorder_cnums(LOCAL_CRATE); + Rc::make_mut(&mut ordering).reverse(); + libs.sort_by_key(|&(a, _)| { + ordering.iter().position(|x| *x == a) + }); + libs +} diff --git a/src/librustc/middle/dataflow.rs b/src/librustc/middle/dataflow.rs index d394c0f0c8..e88678dea1 100644 --- a/src/librustc/middle/dataflow.rs +++ b/src/librustc/middle/dataflow.rs @@ -20,12 +20,11 @@ use ty::TyCtxt; use std::io; use std::mem; use std::usize; -use syntax::ast; use syntax::print::pprust::PrintState; use rustc_data_structures::graph::OUTGOING; -use util::nodemap::NodeMap; +use util::nodemap::FxHashMap; use hir; use hir::intravisit::{self, IdRange}; use hir::print as pprust; @@ -56,7 +55,7 @@ pub struct DataFlowContext<'a, 'tcx: 'a, O> { // mapping from node to cfg node index // FIXME (#6298): Shouldn't this go with CFG? - nodeid_to_index: NodeMap>, + local_id_to_index: FxHashMap>, // Bit sets per cfg node. The following three fields (`gens`, `kills`, // and `on_entry`) all have the same structure. For each id in @@ -97,15 +96,16 @@ struct PropagationContext<'a, 'b: 'a, 'tcx: 'b, O: 'a> { changed: bool } -fn get_cfg_indices<'a>(id: ast::NodeId, index: &'a NodeMap>) -> &'a [CFGIndex] { - let opt_indices = index.get(&id); - opt_indices.map(|v| &v[..]).unwrap_or(&[]) +fn get_cfg_indices<'a>(id: hir::ItemLocalId, + index: &'a FxHashMap>) + -> &'a [CFGIndex] { + index.get(&id).map_or(&[], |v| &v[..]) } impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { - fn has_bitset_for_nodeid(&self, n: ast::NodeId) -> bool { - assert!(n != ast::DUMMY_NODE_ID); - self.nodeid_to_index.contains_key(&n) + fn has_bitset_for_local_id(&self, n: hir::ItemLocalId) -> bool { + assert!(n != hir::DUMMY_ITEM_LOCAL_ID); + self.local_id_to_index.contains_key(&n) } } @@ -117,19 +117,20 @@ impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O ps: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { let id = match node { - pprust::NodeName(_) => ast::CRATE_NODE_ID, - pprust::NodeExpr(expr) => expr.id, - pprust::NodeBlock(blk) => blk.id, - pprust::NodeItem(_) | pprust::NodeSubItem(_) => ast::CRATE_NODE_ID, - pprust::NodePat(pat) => pat.id + pprust::NodeName(_) => return Ok(()), + pprust::NodeExpr(expr) => expr.hir_id.local_id, + pprust::NodeBlock(blk) => blk.hir_id.local_id, + pprust::NodeItem(_) | + pprust::NodeSubItem(_) => return Ok(()), + pprust::NodePat(pat) => pat.hir_id.local_id }; - if !self.has_bitset_for_nodeid(id) { + if !self.has_bitset_for_local_id(id) { return Ok(()); } assert!(self.bits_per_id > 0); - let indices = get_cfg_indices(id, &self.nodeid_to_index); + let indices = get_cfg_indices(id, &self.local_id_to_index); for &cfgidx in indices { let (start, end) = self.compute_id_range(cfgidx); let on_entry = &self.on_entry[start.. end]; @@ -157,7 +158,7 @@ impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O }; ps.synth_comment( - format!("id {}: {}{}{}{}", id, entry_str, + format!("id {}: {}{}{}{}", id.as_usize(), entry_str, gens_str, action_kills_str, scope_kills_str))?; ps.s.space()?; } @@ -165,9 +166,10 @@ impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O } } -fn build_nodeid_to_index(body: Option<&hir::Body>, - cfg: &cfg::CFG) -> NodeMap> { - let mut index = NodeMap(); +fn build_local_id_to_index(body: Option<&hir::Body>, + cfg: &cfg::CFG) + -> FxHashMap> { + let mut index = FxHashMap(); // FIXME (#6298): Would it be better to fold formals from decl // into cfg itself? i.e. introduce a fn-based flow-graph in @@ -188,14 +190,14 @@ fn build_nodeid_to_index(body: Option<&hir::Body>, /// Add mappings from the ast nodes for the formal bindings to /// the entry-node in the graph. - fn add_entries_from_fn_body(index: &mut NodeMap>, + fn add_entries_from_fn_body(index: &mut FxHashMap>, body: &hir::Body, entry: CFGIndex) { use hir::intravisit::Visitor; struct Formals<'a> { entry: CFGIndex, - index: &'a mut NodeMap>, + index: &'a mut FxHashMap>, } let mut formals = Formals { entry: entry, index: index }; for arg in &body.arguments { @@ -207,7 +209,7 @@ fn build_nodeid_to_index(body: Option<&hir::Body>, } fn visit_pat(&mut self, p: &hir::Pat) { - self.index.entry(p.id).or_insert(vec![]).push(self.entry); + self.index.entry(p.hir_id.local_id).or_insert(vec![]).push(self.entry); intravisit::walk_pat(self, p) } } @@ -259,13 +261,13 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { let kills2 = zeroes; let on_entry = vec![entry; num_nodes * words_per_id]; - let nodeid_to_index = build_nodeid_to_index(body, cfg); + let local_id_to_index = build_local_id_to_index(body, cfg); DataFlowContext { tcx, analysis_name, words_per_id, - nodeid_to_index, + local_id_to_index, bits_per_id, oper, gens, @@ -275,14 +277,14 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { } } - pub fn add_gen(&mut self, id: ast::NodeId, bit: usize) { + pub fn add_gen(&mut self, id: hir::ItemLocalId, bit: usize) { //! Indicates that `id` generates `bit` - debug!("{} add_gen(id={}, bit={})", + debug!("{} add_gen(id={:?}, bit={})", self.analysis_name, id, bit); - assert!(self.nodeid_to_index.contains_key(&id)); + assert!(self.local_id_to_index.contains_key(&id)); assert!(self.bits_per_id > 0); - let indices = get_cfg_indices(id, &self.nodeid_to_index); + let indices = get_cfg_indices(id, &self.local_id_to_index); for &cfgidx in indices { let (start, end) = self.compute_id_range(cfgidx); let gens = &mut self.gens[start.. end]; @@ -290,14 +292,14 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { } } - pub fn add_kill(&mut self, kind: KillFrom, id: ast::NodeId, bit: usize) { + pub fn add_kill(&mut self, kind: KillFrom, id: hir::ItemLocalId, bit: usize) { //! Indicates that `id` kills `bit` - debug!("{} add_kill(id={}, bit={})", + debug!("{} add_kill(id={:?}, bit={})", self.analysis_name, id, bit); - assert!(self.nodeid_to_index.contains_key(&id)); + assert!(self.local_id_to_index.contains_key(&id)); assert!(self.bits_per_id > 0); - let indices = get_cfg_indices(id, &self.nodeid_to_index); + let indices = get_cfg_indices(id, &self.local_id_to_index); for &cfgidx in indices { let (start, end) = self.compute_id_range(cfgidx); let kills = match kind { @@ -341,15 +343,15 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { } - pub fn each_bit_on_entry(&self, id: ast::NodeId, mut f: F) -> bool where + pub fn each_bit_on_entry(&self, id: hir::ItemLocalId, mut f: F) -> bool where F: FnMut(usize) -> bool, { //! Iterates through each bit that is set on entry to `id`. //! Only useful after `propagate()` has been called. - if !self.has_bitset_for_nodeid(id) { + if !self.has_bitset_for_local_id(id) { return true; } - let indices = get_cfg_indices(id, &self.nodeid_to_index); + let indices = get_cfg_indices(id, &self.local_id_to_index); for &cfgidx in indices { if !self.each_bit_for_node(EntryOrExit::Entry, cfgidx, |i| f(i)) { return false; @@ -387,11 +389,11 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { self.each_bit(slice, f) } - pub fn each_gen_bit(&self, id: ast::NodeId, mut f: F) -> bool where + pub fn each_gen_bit(&self, id: hir::ItemLocalId, mut f: F) -> bool where F: FnMut(usize) -> bool, { //! Iterates through each bit in the gen set for `id`. - if !self.has_bitset_for_nodeid(id) { + if !self.has_bitset_for_local_id(id) { return true; } @@ -401,11 +403,11 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { return true; } - let indices = get_cfg_indices(id, &self.nodeid_to_index); + let indices = get_cfg_indices(id, &self.local_id_to_index); for &cfgidx in indices { let (start, end) = self.compute_id_range(cfgidx); let gens = &self.gens[start.. end]; - debug!("{} each_gen_bit(id={}, gens={})", + debug!("{} each_gen_bit(id={:?}, gens={})", self.analysis_name, id, bits_to_string(gens)); if !self.each_bit(gens, |i| f(i)) { return false; @@ -472,17 +474,17 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { let mut orig_kills = self.scope_kills[start.. end].to_vec(); let mut changed = false; - for &node_id in &edge.data.exiting_scopes { - let opt_cfg_idx = self.nodeid_to_index.get(&node_id); + for &id in &edge.data.exiting_scopes { + let opt_cfg_idx = self.local_id_to_index.get(&id); match opt_cfg_idx { Some(indices) => { for &cfg_idx in indices { let (start, end) = self.compute_id_range(cfg_idx); let kills = &self.scope_kills[start.. end]; if bitwise(&mut orig_kills, kills, &Union) { - debug!("scope exits: scope id={} \ + debug!("scope exits: scope id={:?} \ (node={:?} of {:?}) added killset: {}", - node_id, cfg_idx, indices, + id, cfg_idx, indices, bits_to_string(kills)); changed = true; } @@ -490,8 +492,8 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { } None => { debug!("{} add_kills_from_flow_exits flow_exit={:?} \ - no cfg_idx for exiting_scope={}", - self.analysis_name, flow_exit, node_id); + no cfg_idx for exiting_scope={:?}", + self.analysis_name, flow_exit, id); } } } @@ -559,7 +561,7 @@ impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> { // Iterate over nodes in reverse postorder for &node_index in nodes_po.iter().rev() { let node = cfg.graph.node(node_index); - debug!("DataFlowContext::walk_cfg idx={:?} id={} begin in_out={}", + debug!("DataFlowContext::walk_cfg idx={:?} id={:?} begin in_out={}", node_index, node.data.id(), bits_to_string(in_out)); let (start, end) = self.dfcx.compute_id_range(node_index); diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index b862472269..a9d9f6f28e 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -79,7 +79,8 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { self.check_def_id(def.def_id()); } _ if self.ignore_non_const_paths => (), - Def::PrimTy(..) | Def::SelfTy(..) => (), + Def::PrimTy(..) | Def::SelfTy(..) | + Def::Local(..) | Def::Upvar(..) => {} Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => { if let Some(enum_id) = self.tcx.parent_def_id(variant_id) { self.check_def_id(enum_id); @@ -469,7 +470,7 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { fn should_warn_about_field(&mut self, field: &hir::StructField) -> bool { let field_type = self.tcx.type_of(self.tcx.hir.local_def_id(field.id)); let is_marker_field = match field_type.ty_to_def_id() { - Some(def_id) => self.tcx.lang_items.items().iter().any(|item| *item == Some(def_id)), + Some(def_id) => self.tcx.lang_items().items().iter().any(|item| *item == Some(def_id)), _ => false }; !field.is_positional() @@ -552,9 +553,22 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { if self.should_warn_about_item(item) { + // For items that have a definition with a signature followed by a + // block, point only at the signature. + let span = match item.node { + hir::ItemFn(..) | + hir::ItemMod(..) | + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemTrait(..) | + hir::ItemDefaultImpl(..) | + hir::ItemImpl(..) => self.tcx.sess.codemap().def_span(item.span), + _ => item.span, + }; self.warn_dead_code( item.id, - item.span, + span, item.name, item.node.descriptive_variant() ); @@ -569,8 +583,7 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { g: &'tcx hir::Generics, id: ast::NodeId) { if self.should_warn_about_variant(&variant.node) { - self.warn_dead_code(variant.node.data.id(), variant.span, - variant.node.name, "variant"); + self.warn_dead_code(variant.node.data.id(), variant.span, variant.node.name, "variant"); } else { intravisit::walk_variant(self, variant, g, id); } @@ -595,15 +608,17 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { match impl_item.node { hir::ImplItemKind::Const(_, body_id) => { if !self.symbol_is_live(impl_item.id, None) { - self.warn_dead_code(impl_item.id, impl_item.span, - impl_item.name, "associated const"); + self.warn_dead_code(impl_item.id, + impl_item.span, + impl_item.name, + "associated const"); } self.visit_nested_body(body_id) } hir::ImplItemKind::Method(_, body_id) => { if !self.symbol_is_live(impl_item.id, None) { - self.warn_dead_code(impl_item.id, impl_item.span, - impl_item.name, "method"); + let span = self.tcx.sess.codemap().def_span(impl_item.span); + self.warn_dead_code(impl_item.id, span, impl_item.name, "method"); } self.visit_nested_body(body_id) } diff --git a/src/librustc/middle/dependency_format.rs b/src/librustc/middle/dependency_format.rs index 837ab4fd4a..db0ecb6aa5 100644 --- a/src/librustc/middle/dependency_format.rs +++ b/src/librustc/middle/dependency_format.rs @@ -66,7 +66,7 @@ use hir::def_id::CrateNum; use session; use session::config; use ty::TyCtxt; -use middle::cstore::DepKind; +use middle::cstore::{self, DepKind}; use middle::cstore::LinkagePreference::{self, RequireStatic, RequireDynamic}; use util::nodemap::FxHashMap; use rustc_back::PanicStrategy; @@ -112,52 +112,61 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return Vec::new(); } - match ty { - // If the global prefer_dynamic switch is turned off, first attempt - // static linkage (this can fail). - config::CrateTypeExecutable if !sess.opts.cg.prefer_dynamic => { - if let Some(v) = attempt_static(tcx) { - return v; - } - } - - // No linkage happens with rlibs, we just needed the metadata (which we - // got long ago), so don't bother with anything. - config::CrateTypeRlib => return Vec::new(), - - // Staticlibs and cdylibs must have all static dependencies. If any fail - // to be found, we generate some nice pretty errors. - config::CrateTypeStaticlib | - config::CrateTypeCdylib => { - if let Some(v) = attempt_static(tcx) { - return v; - } - for cnum in sess.cstore.crates() { - if sess.cstore.dep_kind(cnum).macros_only() { continue } - let src = sess.cstore.used_crate_source(cnum); - if src.rlib.is_some() { continue } - sess.err(&format!("dependency `{}` not found in rlib format", - sess.cstore.crate_name(cnum))); - } - return Vec::new(); - } + let preferred_linkage = match ty { + // cdylibs must have all static dependencies. + config::CrateTypeCdylib => Linkage::Static, // Generating a dylib without `-C prefer-dynamic` means that we're going // to try to eagerly statically link all dependencies. This is normally // done for end-product dylibs, not intermediate products. - config::CrateTypeDylib if !sess.opts.cg.prefer_dynamic => { - if let Some(v) = attempt_static(tcx) { - return v; - } - } + config::CrateTypeDylib if !sess.opts.cg.prefer_dynamic => Linkage::Static, + config::CrateTypeDylib => Linkage::Dynamic, + + // If the global prefer_dynamic switch is turned off, or the final + // executable will be statically linked, prefer static crate linkage. + config::CrateTypeExecutable if !sess.opts.cg.prefer_dynamic || + sess.crt_static() => Linkage::Static, + config::CrateTypeExecutable => Linkage::Dynamic, - // Everything else falls through below. This will happen either with the - // `-C prefer-dynamic` or because we're a proc-macro crate. Note that // proc-macro crates are required to be dylibs, and they're currently // required to link to libsyntax as well. - config::CrateTypeExecutable | - config::CrateTypeDylib | - config::CrateTypeProcMacro => {}, + config::CrateTypeProcMacro => Linkage::Dynamic, + + // No linkage happens with rlibs, we just needed the metadata (which we + // got long ago), so don't bother with anything. + config::CrateTypeRlib => Linkage::NotLinked, + + // staticlibs must have all static dependencies. + config::CrateTypeStaticlib => Linkage::Static, + }; + + if preferred_linkage == Linkage::NotLinked { + // If the crate is not linked, there are no link-time dependencies. + return Vec::new(); + } + + if preferred_linkage == Linkage::Static { + // Attempt static linkage first. For dylibs and executables, we may be + // able to retry below with dynamic linkage. + if let Some(v) = attempt_static(tcx) { + return v; + } + + // Staticlibs, cdylibs, and static executables must have all static + // dependencies. If any are not found, generate some nice pretty errors. + if ty == config::CrateTypeCdylib || ty == config::CrateTypeStaticlib || + (ty == config::CrateTypeExecutable && sess.crt_static() && + !sess.target.target.options.crt_static_allows_dylibs) { + for &cnum in tcx.crates().iter() { + if tcx.dep_kind(cnum).macros_only() { continue } + let src = tcx.used_crate_source(cnum); + if src.rlib.is_some() { continue } + sess.err(&format!("crate `{}` required to be available in rlib format, \ + but was not found in this form", + tcx.crate_name(cnum))); + } + return Vec::new(); + } } let mut formats = FxHashMap(); @@ -165,24 +174,23 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Sweep all crates for found dylibs. Add all dylibs, as well as their // dependencies, ensuring there are no conflicts. The only valid case for a // dependency to be relied upon twice is for both cases to rely on a dylib. - for cnum in sess.cstore.crates() { - if sess.cstore.dep_kind(cnum).macros_only() { continue } - let name = sess.cstore.crate_name(cnum); - let src = sess.cstore.used_crate_source(cnum); + for &cnum in tcx.crates().iter() { + if tcx.dep_kind(cnum).macros_only() { continue } + let name = tcx.crate_name(cnum); + let src = tcx.used_crate_source(cnum); if src.dylib.is_some() { info!("adding dylib: {}", name); - add_library(sess, cnum, RequireDynamic, &mut formats); - let deps = tcx.dylib_dependency_formats(cnum.as_def_id()); + add_library(tcx, cnum, RequireDynamic, &mut formats); + let deps = tcx.dylib_dependency_formats(cnum); for &(depnum, style) in deps.iter() { - info!("adding {:?}: {}", style, - sess.cstore.crate_name(depnum)); - add_library(sess, depnum, style, &mut formats); + info!("adding {:?}: {}", style, tcx.crate_name(depnum)); + add_library(tcx, depnum, style, &mut formats); } } } // Collect what we've got so far in the return vector. - let last_crate = sess.cstore.crates().len(); + let last_crate = tcx.crates().len(); let mut ret = (1..last_crate+1).map(|cnum| { match formats.get(&CrateNum::new(cnum)) { Some(&RequireDynamic) => Linkage::Dynamic, @@ -196,14 +204,14 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // // If the crate hasn't been included yet and it's not actually required // (e.g. it's an allocator) then we skip it here as well. - for cnum in sess.cstore.crates() { - let src = sess.cstore.used_crate_source(cnum); + for &cnum in tcx.crates().iter() { + let src = tcx.used_crate_source(cnum); if src.dylib.is_none() && !formats.contains_key(&cnum) && - sess.cstore.dep_kind(cnum) == DepKind::Explicit { + tcx.dep_kind(cnum) == DepKind::Explicit { assert!(src.rlib.is_some() || src.rmeta.is_some()); - info!("adding staticlib: {}", sess.cstore.crate_name(cnum)); - add_library(sess, cnum, RequireStatic, &mut formats); + info!("adding staticlib: {}", tcx.crate_name(cnum)); + add_library(tcx, cnum, RequireStatic, &mut formats); ret[cnum.as_usize() - 1] = Linkage::Static; } } @@ -215,7 +223,7 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Things like allocators and panic runtimes may not have been activated // quite yet, so do so here. activate_injected_dep(sess.injected_panic_runtime.get(), &mut ret, - &|cnum| tcx.is_panic_runtime(cnum.as_def_id())); + &|cnum| tcx.is_panic_runtime(cnum)); activate_injected_allocator(sess, &mut ret); // When dylib B links to dylib A, then when using B we must also link to A. @@ -226,7 +234,7 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // making sure that everything is available in the requested format. for (cnum, kind) in ret.iter().enumerate() { let cnum = CrateNum::new(cnum + 1); - let src = sess.cstore.used_crate_source(cnum); + let src = tcx.used_crate_source(cnum); match *kind { Linkage::NotLinked | Linkage::IncludedFromDylib => {} @@ -237,10 +245,9 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, Linkage::Static => "rlib", _ => "dylib", }; - let name = sess.cstore.crate_name(cnum); - sess.err(&format!("crate `{}` required to be available in {}, \ - but it was not available in this form", - name, kind)); + sess.err(&format!("crate `{}` required to be available in {} format, \ + but was not found in this form", + tcx.crate_name(cnum), kind)); } } } @@ -248,7 +255,7 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return ret; } -fn add_library(sess: &session::Session, +fn add_library(tcx: TyCtxt, cnum: CrateNum, link: LinkagePreference, m: &mut FxHashMap) { @@ -262,8 +269,8 @@ fn add_library(sess: &session::Session, // This error is probably a little obscure, but I imagine that it // can be refined over time. if link2 != link || link == RequireStatic { - sess.struct_err(&format!("cannot satisfy dependencies so `{}` only \ - shows up once", sess.cstore.crate_name(cnum))) + tcx.sess.struct_err(&format!("cannot satisfy dependencies so `{}` only \ + shows up once", tcx.crate_name(cnum))) .help("having upstream crates all available in one format \ will likely make this go away") .emit(); @@ -275,16 +282,16 @@ fn add_library(sess: &session::Session, fn attempt_static<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option { let sess = &tcx.sess; - let crates = sess.cstore.used_crates(RequireStatic); + let crates = cstore::used_crates(tcx, RequireStatic); if !crates.iter().by_ref().all(|&(_, ref p)| p.is_some()) { return None } // All crates are available in an rlib format, so we're just going to link // everything in explicitly so long as it's actually required. - let last_crate = sess.cstore.crates().len(); + let last_crate = tcx.crates().len(); let mut ret = (1..last_crate+1).map(|cnum| { - if sess.cstore.dep_kind(CrateNum::new(cnum)) == DepKind::Explicit { + if tcx.dep_kind(CrateNum::new(cnum)) == DepKind::Explicit { Linkage::Static } else { Linkage::NotLinked @@ -295,7 +302,7 @@ fn attempt_static<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option(tcx: TyCtxt<'a, 'tcx, 'tcx>, list: &[Linkage]) { } let cnum = CrateNum::new(i + 1); - if tcx.is_panic_runtime(cnum.as_def_id()) { + if tcx.is_panic_runtime(cnum) { if let Some((prev, _)) = panic_runtime { - let prev_name = sess.cstore.crate_name(prev); - let cur_name = sess.cstore.crate_name(cnum); + let prev_name = tcx.crate_name(prev); + let cur_name = tcx.crate_name(cnum); sess.err(&format!("cannot link together two \ panic runtimes: {} and {}", prev_name, cur_name)); } - panic_runtime = Some((cnum, sess.cstore.panic_strategy(cnum))); + panic_runtime = Some((cnum, tcx.panic_strategy(cnum))); } } @@ -379,7 +386,7 @@ fn verify_ok<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, list: &[Linkage]) { sess.err(&format!("the linked panic runtime `{}` is \ not compiled with this crate's \ panic strategy `{}`", - sess.cstore.crate_name(cnum), + tcx.crate_name(cnum), desired_strategy.desc())); } @@ -395,8 +402,8 @@ fn verify_ok<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, list: &[Linkage]) { continue } let cnum = CrateNum::new(i + 1); - let found_strategy = sess.cstore.panic_strategy(cnum); - let is_compiler_builtins = sess.cstore.is_compiler_builtins(cnum); + let found_strategy = tcx.panic_strategy(cnum); + let is_compiler_builtins = tcx.is_compiler_builtins(cnum); if is_compiler_builtins || desired_strategy == found_strategy { continue } @@ -405,7 +412,7 @@ fn verify_ok<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, list: &[Linkage]) { panic strategy `{}` which is \ incompatible with this crate's \ strategy of `{}`", - sess.cstore.crate_name(cnum), + tcx.crate_name(cnum), found_strategy.desc(), desired_strategy.desc())); } diff --git a/src/librustc/middle/effect.rs b/src/librustc/middle/effect.rs deleted file mode 100644 index 98934d6070..0000000000 --- a/src/librustc/middle/effect.rs +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Enforces the Rust effect system. Currently there is just one effect, -//! `unsafe`. -use self::RootUnsafeContext::*; - -use ty::{self, TyCtxt}; -use lint; - -use syntax::ast; -use syntax_pos::Span; -use hir::{self, PatKind}; -use hir::def::Def; -use hir::intravisit::{self, FnKind, Visitor, NestedVisitorMap}; - -#[derive(Copy, Clone)] -struct UnsafeContext { - push_unsafe_count: usize, - root: RootUnsafeContext, -} - -impl UnsafeContext { - fn new(root: RootUnsafeContext) -> UnsafeContext { - UnsafeContext { root: root, push_unsafe_count: 0 } - } -} - -#[derive(Copy, Clone, PartialEq)] -enum RootUnsafeContext { - SafeContext, - UnsafeFn, - UnsafeBlock(ast::NodeId), -} - -struct EffectCheckVisitor<'a, 'tcx: 'a> { - tcx: TyCtxt<'a, 'tcx, 'tcx>, - tables: &'a ty::TypeckTables<'tcx>, - body_id: hir::BodyId, - - /// Whether we're in an unsafe context. - unsafe_context: UnsafeContext, -} - -impl<'a, 'tcx> EffectCheckVisitor<'a, 'tcx> { - fn require_unsafe_ext(&mut self, node_id: ast::NodeId, span: Span, - description: &str, is_lint: bool) { - if self.unsafe_context.push_unsafe_count > 0 { return; } - match self.unsafe_context.root { - SafeContext => { - if is_lint { - self.tcx.lint_node(lint::builtin::SAFE_EXTERN_STATICS, - node_id, - span, - &format!("{} requires unsafe function or \ - block (error E0133)", description)); - } else { - // Report an error. - struct_span_err!( - self.tcx.sess, span, E0133, - "{} requires unsafe function or block", description) - .span_label(span, description) - .emit(); - } - } - UnsafeBlock(block_id) => { - // OK, but record this. - debug!("effect: recording unsafe block as used: {}", block_id); - self.tcx.used_unsafe.borrow_mut().insert(block_id); - } - UnsafeFn => {} - } - } - - fn require_unsafe(&mut self, span: Span, description: &str) { - self.require_unsafe_ext(ast::DUMMY_NODE_ID, span, description, false) - } -} - -impl<'a, 'tcx> Visitor<'tcx> for EffectCheckVisitor<'a, 'tcx> { - fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { - NestedVisitorMap::None - } - - fn visit_nested_body(&mut self, body: hir::BodyId) { - let old_tables = self.tables; - let old_body_id = self.body_id; - self.tables = self.tcx.body_tables(body); - self.body_id = body; - let body = self.tcx.hir.body(body); - self.visit_body(body); - self.tables = old_tables; - self.body_id = old_body_id; - } - - fn visit_fn(&mut self, fn_kind: FnKind<'tcx>, fn_decl: &'tcx hir::FnDecl, - body_id: hir::BodyId, span: Span, id: ast::NodeId) { - - let (is_item_fn, is_unsafe_fn) = match fn_kind { - FnKind::ItemFn(_, _, unsafety, ..) => - (true, unsafety == hir::Unsafety::Unsafe), - FnKind::Method(_, sig, ..) => - (true, sig.unsafety == hir::Unsafety::Unsafe), - _ => (false, false), - }; - - let old_unsafe_context = self.unsafe_context; - if is_unsafe_fn { - self.unsafe_context = UnsafeContext::new(UnsafeFn) - } else if is_item_fn { - self.unsafe_context = UnsafeContext::new(SafeContext) - } - - intravisit::walk_fn(self, fn_kind, fn_decl, body_id, span, id); - - self.unsafe_context = old_unsafe_context - } - - fn visit_block(&mut self, block: &'tcx hir::Block) { - let old_unsafe_context = self.unsafe_context; - match block.rules { - hir::UnsafeBlock(source) => { - // By default only the outermost `unsafe` block is - // "used" and so nested unsafe blocks are pointless - // (the inner ones are unnecessary and we actually - // warn about them). As such, there are two cases when - // we need to create a new context, when we're - // - outside `unsafe` and found a `unsafe` block - // (normal case) - // - inside `unsafe`, found an `unsafe` block - // created internally to the compiler - // - // The second case is necessary to ensure that the - // compiler `unsafe` blocks don't accidentally "use" - // external blocks (e.g. `unsafe { println("") }`, - // expands to `unsafe { ... unsafe { ... } }` where - // the inner one is compiler generated). - if self.unsafe_context.root == SafeContext || source == hir::CompilerGenerated { - self.unsafe_context.root = UnsafeBlock(block.id) - } - } - hir::PushUnsafeBlock(..) => { - self.unsafe_context.push_unsafe_count = - self.unsafe_context.push_unsafe_count.checked_add(1).unwrap(); - } - hir::PopUnsafeBlock(..) => { - self.unsafe_context.push_unsafe_count = - self.unsafe_context.push_unsafe_count.checked_sub(1).unwrap(); - } - hir::DefaultBlock => {} - } - - intravisit::walk_block(self, block); - - self.unsafe_context = old_unsafe_context - } - - fn visit_expr(&mut self, expr: &'tcx hir::Expr) { - match expr.node { - hir::ExprMethodCall(..) => { - let def_id = self.tables.type_dependent_defs()[expr.hir_id].def_id(); - let sig = self.tcx.fn_sig(def_id); - debug!("effect: method call case, signature is {:?}", - sig); - - if sig.0.unsafety == hir::Unsafety::Unsafe { - self.require_unsafe(expr.span, - "invocation of unsafe method") - } - } - hir::ExprCall(ref base, _) => { - let base_type = self.tables.expr_ty_adjusted(base); - debug!("effect: call case, base type is {:?}", - base_type); - match base_type.sty { - ty::TyFnDef(..) | ty::TyFnPtr(_) => { - if base_type.fn_sig(self.tcx).unsafety() == hir::Unsafety::Unsafe { - self.require_unsafe(expr.span, "call to unsafe function") - } - } - _ => {} - } - } - hir::ExprUnary(hir::UnDeref, ref base) => { - let base_type = self.tables.expr_ty_adjusted(base); - debug!("effect: unary case, base type is {:?}", - base_type); - if let ty::TyRawPtr(_) = base_type.sty { - self.require_unsafe(expr.span, "dereference of raw pointer") - } - } - hir::ExprInlineAsm(..) => { - self.require_unsafe(expr.span, "use of inline assembly"); - } - hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { - if let Def::Static(def_id, mutbl) = path.def { - if mutbl { - self.require_unsafe(expr.span, "use of mutable static"); - } else if match self.tcx.hir.get_if_local(def_id) { - Some(hir::map::NodeForeignItem(..)) => true, - Some(..) => false, - None => self.tcx.is_foreign_item(def_id), - } { - self.require_unsafe_ext(expr.id, expr.span, "use of extern static", true); - } - } - } - hir::ExprField(ref base_expr, field) => { - if let ty::TyAdt(adt, ..) = self.tables.expr_ty_adjusted(base_expr).sty { - if adt.is_union() { - self.require_unsafe(field.span, "access to union field"); - } - } - } - hir::ExprAssign(ref lhs, ref rhs) => { - if let hir::ExprField(ref base_expr, field) = lhs.node { - if let ty::TyAdt(adt, ..) = self.tables.expr_ty_adjusted(base_expr).sty { - if adt.is_union() { - let field_ty = self.tables.expr_ty_adjusted(lhs); - let owner_def_id = self.tcx.hir.body_owner_def_id(self.body_id); - let param_env = self.tcx.param_env(owner_def_id); - if field_ty.moves_by_default(self.tcx, param_env, field.span) { - self.require_unsafe(field.span, - "assignment to non-`Copy` union field"); - } - // Do not walk the field expr again. - intravisit::walk_expr(self, base_expr); - intravisit::walk_expr(self, rhs); - return - } - } - } - } - _ => {} - } - - intravisit::walk_expr(self, expr); - } - - fn visit_pat(&mut self, pat: &'tcx hir::Pat) { - if let PatKind::Struct(_, ref fields, _) = pat.node { - if let ty::TyAdt(adt, ..) = self.tables.pat_ty(pat).sty { - if adt.is_union() { - for field in fields { - self.require_unsafe(field.span, "matching on union field"); - } - } - } - } - - intravisit::walk_pat(self, pat); - } -} - -pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let mut visitor = EffectCheckVisitor { - tcx, - tables: &ty::TypeckTables::empty(None), - body_id: hir::BodyId { node_id: ast::CRATE_NODE_ID }, - unsafe_context: UnsafeContext::new(SafeContext), - }; - - tcx.hir.krate().visit_all_item_likes(&mut visitor.as_deep_visitor()); -} diff --git a/src/librustc/middle/exported_symbols.rs b/src/librustc/middle/exported_symbols.rs new file mode 100644 index 0000000000..d650dbe88b --- /dev/null +++ b/src/librustc/middle/exported_symbols.rs @@ -0,0 +1,36 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// The SymbolExportLevel of a symbols specifies from which kinds of crates +/// the symbol will be exported. `C` symbols will be exported from any +/// kind of crate, including cdylibs which export very few things. +/// `Rust` will only be exported if the crate produced is a Rust +/// dylib. +#[derive(Eq, PartialEq, Debug, Copy, Clone)] +pub enum SymbolExportLevel { + C, + Rust, +} + +impl_stable_hash_for!(enum self::SymbolExportLevel { + C, + Rust +}); + +impl SymbolExportLevel { + pub fn is_below_threshold(self, threshold: SymbolExportLevel) -> bool { + if threshold == SymbolExportLevel::Rust { + // We export everything from Rust dylibs + true + } else { + self == SymbolExportLevel::C + } + } +} diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index 324f9a6e90..b036b145a9 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -23,7 +23,7 @@ use hir::def::Def; use hir::def_id::{DefId}; use infer::InferCtxt; use middle::mem_categorization as mc; -use middle::region::RegionMaps; +use middle::region; use ty::{self, TyCtxt, adjustment}; use hir::{self, PatKind}; @@ -211,9 +211,9 @@ enum OverloadedCallType { impl OverloadedCallType { fn from_trait_id(tcx: TyCtxt, trait_id: DefId) -> OverloadedCallType { for &(maybe_function_trait, overloaded_call_type) in &[ - (tcx.lang_items.fn_once_trait(), FnOnceOverloadedCall), - (tcx.lang_items.fn_mut_trait(), FnMutOverloadedCall), - (tcx.lang_items.fn_trait(), FnOverloadedCall) + (tcx.lang_items().fn_once_trait(), FnOnceOverloadedCall), + (tcx.lang_items().fn_mut_trait(), FnMutOverloadedCall), + (tcx.lang_items().fn_trait(), FnOverloadedCall) ] { match maybe_function_trait { Some(function_trait) if function_trait == trait_id => { @@ -265,12 +265,12 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx, 'tcx> { pub fn new(delegate: &'a mut (Delegate<'tcx>+'a), tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - region_maps: &'a RegionMaps, + region_scope_tree: &'a region::ScopeTree, tables: &'a ty::TypeckTables<'tcx>) -> Self { ExprUseVisitor { - mc: mc::MemCategorizationContext::new(tcx, region_maps, tables), + mc: mc::MemCategorizationContext::new(tcx, region_scope_tree, tables), delegate, param_env, } @@ -281,12 +281,12 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { pub fn with_infer(delegate: &'a mut (Delegate<'tcx>+'a), infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - region_maps: &'a RegionMaps, + region_scope_tree: &'a region::ScopeTree, tables: &'a ty::TypeckTables<'tcx>) -> Self { ExprUseVisitor { - mc: mc::MemCategorizationContext::with_infer(infcx, region_maps, tables), + mc: mc::MemCategorizationContext::with_infer(infcx, region_scope_tree, tables), delegate, param_env, } @@ -298,7 +298,8 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { for arg in &body.arguments { let arg_ty = return_if_err!(self.mc.node_ty(arg.pat.hir_id)); - let fn_body_scope_r = self.tcx().node_scope_region(body.value.id); + let fn_body_scope_r = + self.tcx().mk_region(ty::ReScope(region::Scope::Node(body.value.hir_id.local_id))); let arg_cmt = self.mc.cat_rvalue( arg.id, arg.pat.span, @@ -517,13 +518,17 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.consume_expr(&base); } - hir::ExprClosure(.., fn_decl_span) => { + hir::ExprClosure(.., fn_decl_span, _) => { self.walk_captures(expr, fn_decl_span) } hir::ExprBox(ref base) => { self.consume_expr(&base); } + + hir::ExprYield(ref value) => { + self.consume_expr(&value); + } } } @@ -538,16 +543,17 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { ty::TyError => { } _ => { let def_id = self.mc.tables.type_dependent_defs()[call.hir_id].def_id(); + let call_scope = region::Scope::Node(call.hir_id.local_id); match OverloadedCallType::from_method_id(self.tcx(), def_id) { FnMutOverloadedCall => { - let call_scope_r = self.tcx().node_scope_region(call.id); + let call_scope_r = self.tcx().mk_region(ty::ReScope(call_scope)); self.borrow_expr(callee, call_scope_r, ty::MutBorrow, ClosureInvocation); } FnOverloadedCall => { - let call_scope_r = self.tcx().node_scope_region(call.id); + let call_scope_r = self.tcx().mk_region(ty::ReScope(call_scope)); self.borrow_expr(callee, call_scope_r, ty::ImmBorrow, @@ -745,7 +751,8 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // Converting from a &T to *T (or &mut T to *mut T) is // treated as borrowing it for the enclosing temporary // scope. - let r = self.tcx().node_scope_region(expr.id); + let r = self.tcx().mk_region(ty::ReScope( + region::Scope::Node(expr.hir_id.local_id))); self.delegate.borrow(expr.id, expr.span, @@ -821,7 +828,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { let ExprUseVisitor { ref mc, ref mut delegate, param_env } = *self; return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |cmt_pat, pat| { - if let PatKind::Binding(_, def_id, ..) = pat.node { + if let PatKind::Binding(_, canonical_id, ..) = pat.node { debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", cmt_pat, pat, match_mode); let bm = *mc.tables.pat_binding_modes().get(pat.hir_id) .expect("missing binding mode"); @@ -831,7 +838,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // Each match binding is effectively an assignment to the // binding being produced. - let def = Def::Local(def_id); + let def = Def::Local(canonical_id); if let Ok(binding_cmt) = mc.cat_def(pat.id, pat.span, pat_ty, def) { delegate.mutate(pat.id, pat.span, binding_cmt, MutateMode::Init); } @@ -888,17 +895,16 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.tcx().with_freevars(closure_expr.id, |freevars| { for freevar in freevars { - let var_def_id = freevar.def.def_id(); - debug_assert!(var_def_id.is_local()); + let var_hir_id = self.tcx().hir.node_to_hir_id(freevar.var_id()); let closure_def_id = self.tcx().hir.local_def_id(closure_expr.id); let upvar_id = ty::UpvarId { - var_id: var_def_id.index, + var_id: var_hir_id, closure_expr_id: closure_def_id.index }; let upvar_capture = self.mc.tables.upvar_capture(upvar_id); let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id, fn_decl_span, - freevar.def)); + freevar)); match upvar_capture { ty::UpvarCapture::ByValue => { let mode = copy_or_move(&self.mc, @@ -923,14 +929,13 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { fn cat_captured_var(&mut self, closure_id: ast::NodeId, closure_span: Span, - upvar_def: Def) + upvar: &hir::Freevar) -> mc::McResult> { // Create the cmt for the variable being borrowed, from the // caller's perspective - let var_node_id = self.tcx().hir.as_local_node_id(upvar_def.def_id()).unwrap(); - let var_hir_id = self.tcx().hir.node_to_hir_id(var_node_id); + let var_hir_id = self.tcx().hir.node_to_hir_id(upvar.var_id()); let var_ty = self.mc.node_ty(var_hir_id)?; - self.mc.cat_def(closure_id, closure_span, var_ty, upvar_def) + self.mc.cat_def(closure_id, closure_span, var_ty, upvar.def) } } diff --git a/src/librustc/middle/free_region.rs b/src/librustc/middle/free_region.rs index de738fba30..49a241b86e 100644 --- a/src/librustc/middle/free_region.rs +++ b/src/librustc/middle/free_region.rs @@ -16,11 +16,11 @@ //! region outlives another and so forth. use hir::def_id::DefId; -use middle::region::RegionMaps; +use middle::region; use ty::{self, Lift, TyCtxt, Region}; use rustc_data_structures::transitive_relation::TransitiveRelation; -/// Combines a `RegionMaps` (which governs relationships between +/// Combines a `region::ScopeTree` (which governs relationships between /// scopes) and a `FreeRegionMap` (which governs relationships between /// free regions) to yield a complete relation between concrete /// regions. @@ -34,7 +34,7 @@ pub struct RegionRelations<'a, 'gcx: 'tcx, 'tcx: 'a> { pub context: DefId, /// region maps for the given context - pub region_maps: &'a RegionMaps, + pub region_scope_tree: &'a region::ScopeTree, /// free-region relationships pub free_regions: &'a FreeRegionMap<'tcx>, @@ -44,13 +44,13 @@ impl<'a, 'gcx, 'tcx> RegionRelations<'a, 'gcx, 'tcx> { pub fn new( tcx: TyCtxt<'a, 'gcx, 'tcx>, context: DefId, - region_maps: &'a RegionMaps, + region_scope_tree: &'a region::ScopeTree, free_regions: &'a FreeRegionMap<'tcx>, ) -> Self { Self { tcx, context, - region_maps, + region_scope_tree, free_regions, } } @@ -68,16 +68,16 @@ impl<'a, 'gcx, 'tcx> RegionRelations<'a, 'gcx, 'tcx> { true, (&ty::ReScope(sub_scope), &ty::ReScope(super_scope)) => - self.region_maps.is_subscope_of(sub_scope, super_scope), + self.region_scope_tree.is_subscope_of(sub_scope, super_scope), (&ty::ReScope(sub_scope), &ty::ReEarlyBound(ref br)) => { - let fr_scope = self.region_maps.early_free_extent(self.tcx, br); - self.region_maps.is_subscope_of(sub_scope, fr_scope) + let fr_scope = self.region_scope_tree.early_free_scope(self.tcx, br); + self.region_scope_tree.is_subscope_of(sub_scope, fr_scope) } (&ty::ReScope(sub_scope), &ty::ReFree(ref fr)) => { - let fr_scope = self.region_maps.free_extent(self.tcx, fr); - self.region_maps.is_subscope_of(sub_scope, fr_scope) + let fr_scope = self.region_scope_tree.free_scope(self.tcx, fr); + self.region_scope_tree.is_subscope_of(sub_scope, fr_scope) } (&ty::ReEarlyBound(_), &ty::ReEarlyBound(_)) | @@ -117,7 +117,7 @@ impl<'a, 'gcx, 'tcx> RegionRelations<'a, 'gcx, 'tcx> { } } -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct FreeRegionMap<'tcx> { // Stores the relation `a < b`, where `a` and `b` are regions. // @@ -147,7 +147,8 @@ impl<'tcx> FreeRegionMap<'tcx> { ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | ty::Predicate::ClosureKind(..) | - ty::Predicate::TypeOutlives(..) => { + ty::Predicate::TypeOutlives(..) | + ty::Predicate::ConstEvaluatable(..) => { // No region bounds here } ty::Predicate::RegionOutlives(ty::Binder(ty::OutlivesPredicate(r_a, r_b))) => { diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs index 9ba4252b52..679c4f17a6 100644 --- a/src/librustc/middle/lang_items.rs +++ b/src/librustc/middle/lang_items.rs @@ -21,10 +21,8 @@ pub use self::LangItem::*; -use hir::map as hir_map; -use session::Session; use hir::def_id::DefId; -use ty; +use ty::{self, TyCtxt}; use middle::weak_lang_items; use util::nodemap::FxHashMap; @@ -48,6 +46,14 @@ enum_from_u32! { } } +impl LangItem { + fn name(self) -> &'static str { + match self { + $( $variant => $name, )* + } + } +} + pub struct LanguageItems { pub items: Vec>, pub missing: Vec, @@ -67,42 +73,17 @@ impl LanguageItems { &*self.items } - pub fn item_name(index: usize) -> &'static str { - let item: Option = LangItem::from_u32(index as u32); - match item { - $( Some($variant) => $name, )* - None => "???" - } - } - pub fn require(&self, it: LangItem) -> Result { - match self.items[it as usize] { - Some(id) => Ok(id), - None => { - Err(format!("requires `{}` lang_item", - LanguageItems::item_name(it as usize))) - } - } - } - - pub fn require_owned_box(&self) -> Result { - self.require(OwnedBoxLangItem) + self.items[it as usize].ok_or_else(|| format!("requires `{}` lang_item", it.name())) } pub fn fn_trait_kind(&self, id: DefId) -> Option { - let def_id_kinds = [ - (self.fn_trait(), ty::ClosureKind::Fn), - (self.fn_mut_trait(), ty::ClosureKind::FnMut), - (self.fn_once_trait(), ty::ClosureKind::FnOnce), - ]; - - for &(opt_def_id, kind) in &def_id_kinds { - if Some(id) == opt_def_id { - return Some(kind); - } + match Some(id) { + x if x == self.fn_trait() => Some(ty::ClosureKind::Fn), + x if x == self.fn_mut_trait() => Some(ty::ClosureKind::FnMut), + x if x == self.fn_once_trait() => Some(ty::ClosureKind::FnOnce), + _ => None } - - None } $( @@ -116,9 +97,7 @@ impl LanguageItems { struct LanguageItemCollector<'a, 'tcx: 'a> { items: LanguageItems, - hir_map: &'a hir_map::Map<'tcx>, - - session: &'a Session, + tcx: TyCtxt<'a, 'tcx, 'tcx>, item_refs: FxHashMap<&'static str, usize>, } @@ -129,10 +108,11 @@ impl<'a, 'v, 'tcx> ItemLikeVisitor<'v> for LanguageItemCollector<'a, 'tcx> { let item_index = self.item_refs.get(&*value.as_str()).cloned(); if let Some(item_index) = item_index { - self.collect_item(item_index, self.hir_map.local_def_id(item.id)) + let def_id = self.tcx.hir.local_def_id(item.id); + self.collect_item(item_index, def_id); } else { - let span = self.hir_map.span(item.id); - span_err!(self.session, span, E0522, + let span = self.tcx.hir.span(item.id); + span_err!(self.tcx.sess, span, E0522, "definition of an unknown language item: `{}`.", value); } @@ -149,45 +129,41 @@ impl<'a, 'v, 'tcx> ItemLikeVisitor<'v> for LanguageItemCollector<'a, 'tcx> { } impl<'a, 'tcx> LanguageItemCollector<'a, 'tcx> { - pub fn new(session: &'a Session, hir_map: &'a hir_map::Map<'tcx>) - -> LanguageItemCollector<'a, 'tcx> { + fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> LanguageItemCollector<'a, 'tcx> { let mut item_refs = FxHashMap(); $( item_refs.insert($name, $variant as usize); )* LanguageItemCollector { - session, - hir_map, + tcx, items: LanguageItems::new(), item_refs, } } - pub fn collect_item(&mut self, item_index: usize, - item_def_id: DefId) { + fn collect_item(&mut self, item_index: usize, item_def_id: DefId) { // Check for duplicates. match self.items.items[item_index] { Some(original_def_id) if original_def_id != item_def_id => { - let cstore = &self.session.cstore; - let name = LanguageItems::item_name(item_index); - let mut err = match self.hir_map.span_if_local(item_def_id) { + let name = LangItem::from_u32(item_index as u32).unwrap().name(); + let mut err = match self.tcx.hir.span_if_local(item_def_id) { Some(span) => struct_span_err!( - self.session, + self.tcx.sess, span, E0152, "duplicate lang item found: `{}`.", name), - None => self.session.struct_err(&format!( + None => self.tcx.sess.struct_err(&format!( "duplicate lang item in crate `{}`: `{}`.", - cstore.crate_name(item_def_id.krate), + self.tcx.crate_name(item_def_id.krate), name)), }; - if let Some(span) = self.hir_map.span_if_local(original_def_id) { + if let Some(span) = self.tcx.hir.span_if_local(original_def_id) { span_note!(&mut err, span, "first defined here."); } else { err.note(&format!("first defined in crate `{}`.", - cstore.crate_name(original_def_id.krate))); + self.tcx.crate_name(original_def_id.krate))); } err.emit(); } @@ -199,26 +175,6 @@ impl<'a, 'tcx> LanguageItemCollector<'a, 'tcx> { // Matched. self.items.items[item_index] = Some(item_def_id); } - - pub fn collect_local_language_items(&mut self, krate: &hir::Crate) { - krate.visit_all_item_likes(self); - } - - pub fn collect_external_language_items(&mut self) { - let cstore = &self.session.cstore; - - for cnum in cstore.crates() { - for (index, item_index) in cstore.lang_items(cnum) { - let def_id = DefId { krate: cnum, index: index }; - self.collect_item(item_index, def_id); - } - } - } - - pub fn collect(&mut self, krate: &hir::Crate) { - self.collect_external_language_items(); - self.collect_local_language_items(krate); - } } pub fn extract(attrs: &[ast::Attribute]) -> Option { @@ -233,14 +189,16 @@ pub fn extract(attrs: &[ast::Attribute]) -> Option { return None; } -pub fn collect_language_items(session: &Session, - map: &hir_map::Map) - -> LanguageItems { - let krate: &hir::Crate = map.krate(); - let mut collector = LanguageItemCollector::new(session, map); - collector.collect(krate); +pub fn collect<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> LanguageItems { + let mut collector = LanguageItemCollector::new(tcx); + for &cnum in tcx.crates().iter() { + for &(def_id, item_index) in tcx.defined_lang_items(cnum).iter() { + collector.collect_item(item_index, def_id); + } + } + tcx.hir.krate().visit_all_item_likes(&mut collector); let LanguageItemCollector { mut items, .. } = collector; - weak_lang_items::check_crate(krate, session, &mut items); + weak_lang_items::check_crate(tcx, &mut items); items } @@ -316,11 +274,12 @@ language_item_table! { FnMutTraitLangItem, "fn_mut", fn_mut_trait; FnOnceTraitLangItem, "fn_once", fn_once_trait; + GeneratorStateLangItem, "generator_state", gen_state; + GeneratorTraitLangItem, "generator", gen_trait; + EqTraitLangItem, "eq", eq_trait; OrdTraitLangItem, "ord", ord_trait; - StrEqFnLangItem, "str_eq", str_eq_fn; - // A number of panic-related lang items. The `panic` item corresponds to // divide-by-zero and various panic cases with `match`. The // `panic_bounds_check` item is for indexing arrays. @@ -348,22 +307,14 @@ language_item_table! { PhantomDataItem, "phantom_data", phantom_data; - // Deprecated: - CovariantTypeItem, "covariant_type", covariant_type; - ContravariantTypeItem, "contravariant_type", contravariant_type; - InvariantTypeItem, "invariant_type", invariant_type; - CovariantLifetimeItem, "covariant_lifetime", covariant_lifetime; - ContravariantLifetimeItem, "contravariant_lifetime", contravariant_lifetime; - InvariantLifetimeItem, "invariant_lifetime", invariant_lifetime; - NonZeroItem, "non_zero", non_zero; DebugTraitLangItem, "debug_trait", debug_trait; } -impl<'a, 'tcx, 'gcx> ty::TyCtxt<'a, 'tcx, 'gcx> { +impl<'a, 'tcx, 'gcx> TyCtxt<'a, 'tcx, 'gcx> { pub fn require_lang_item(&self, lang_item: LangItem) -> DefId { - self.lang_items.require(lang_item).unwrap_or_else(|msg| { + self.lang_items().require(lang_item).unwrap_or_else(|msg| { self.sess.fatal(&msg) }) } diff --git a/src/librustc/middle/liveness.rs b/src/librustc/middle/liveness.rs index 8e5f748c78..d4fa03b508 100644 --- a/src/librustc/middle/liveness.rs +++ b/src/librustc/middle/liveness.rs @@ -429,8 +429,7 @@ fn visit_expr<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, expr: &'tcx Expr) { let mut call_caps = Vec::new(); ir.tcx.with_freevars(expr.id, |freevars| { for fv in freevars { - if let Def::Local(def_id) = fv.def { - let rv = ir.tcx.hir.as_local_node_id(def_id).unwrap(); + if let Def::Local(rv) = fv.def { let fv_ln = ir.add_live_node(FreeVarNode(fv.span)); call_caps.push(CaptureInfo {ln: fv_ln, var_nid: rv}); @@ -460,7 +459,7 @@ fn visit_expr<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, expr: &'tcx Expr) { hir::ExprAgain(_) | hir::ExprLit(_) | hir::ExprRet(..) | hir::ExprBlock(..) | hir::ExprAssign(..) | hir::ExprAssignOp(..) | hir::ExprStruct(..) | hir::ExprRepeat(..) | - hir::ExprInlineAsm(..) | hir::ExprBox(..) | + hir::ExprInlineAsm(..) | hir::ExprBox(..) | hir::ExprYield(..) | hir::ExprType(..) | hir::ExprPath(hir::QPath::TypeRelative(..)) => { intravisit::walk_expr(ir, expr); } @@ -881,7 +880,6 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { match expr.node { // Interesting cases with control flow or which gen/kill - hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { self.access_path(expr.id, path, succ, ACC_READ | ACC_USE) } @@ -894,7 +892,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_expr(&e, succ) } - hir::ExprClosure(.., blk_id, _) => { + hir::ExprClosure(.., blk_id, _, _) => { debug!("{} is an ExprClosure", self.ir.tcx.hir.node_to_pretty_string(expr.id)); /* @@ -1116,6 +1114,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { hir::ExprCast(ref e, _) | hir::ExprType(ref e, _) | hir::ExprUnary(_, ref e) | + hir::ExprYield(ref e) | hir::ExprRepeat(ref e, _) => { self.propagate_through_expr(&e, succ) } @@ -1224,18 +1223,22 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { } } + fn access_var(&mut self, id: NodeId, nid: NodeId, succ: LiveNode, acc: u32, span: Span) + -> LiveNode { + let ln = self.live_node(id, span); + if acc != 0 { + self.init_from_succ(ln, succ); + let var = self.variable(nid, span); + self.acc(ln, var, acc); + } + ln + } + fn access_path(&mut self, id: NodeId, path: &hir::Path, succ: LiveNode, acc: u32) -> LiveNode { match path.def { - Def::Local(def_id) => { - let nid = self.ir.tcx.hir.as_local_node_id(def_id).unwrap(); - let ln = self.live_node(id, path.span); - if acc != 0 { - self.init_from_succ(ln, succ); - let var = self.variable(nid, path.span); - self.acc(ln, var, acc); - } - ln + Def::Local(nid) => { + self.access_var(id, nid, succ, acc, path.span) } _ => succ } @@ -1398,7 +1401,7 @@ fn check_expr<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, expr: &'tcx Expr) { hir::ExprBreak(..) | hir::ExprAgain(..) | hir::ExprLit(_) | hir::ExprBlock(..) | hir::ExprAddrOf(..) | hir::ExprStruct(..) | hir::ExprRepeat(..) | - hir::ExprClosure(..) | hir::ExprPath(_) | + hir::ExprClosure(..) | hir::ExprPath(_) | hir::ExprYield(..) | hir::ExprBox(..) | hir::ExprType(..) => { intravisit::walk_expr(this, expr); } @@ -1409,12 +1412,11 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn check_lvalue(&mut self, expr: &'tcx Expr) { match expr.node { hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { - if let Def::Local(def_id) = path.def { + if let Def::Local(nid) = path.def { // Assignment to an immutable variable or argument: only legal // if there is no later assignment. If this local is actually // mutable, then check for a reassignment to flag the mutability // as being used. - let nid = self.ir.tcx.hir.as_local_node_id(def_id).unwrap(); let ln = self.live_node(expr.id, expr.span); let var = self.variable(nid, expr.span); self.warn_about_dead_assign(expr.span, expr.id, ln, var); @@ -1485,12 +1487,12 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.ir.tcx.lint_node_note(lint::builtin::UNUSED_VARIABLES, id, sp, &format!("variable `{}` is assigned to, but never used", name), - &format!("to disable this warning, consider using `_{}` instead", + &format!("to avoid this warning, consider using `_{}` instead", name)); } else if name != "self" { self.ir.tcx.lint_node_note(lint::builtin::UNUSED_VARIABLES, id, sp, &format!("unused variable: `{}`", name), - &format!("to disable this warning, consider using `_{}` instead", + &format!("to avoid this warning, consider using `_{}` instead", name)); } } diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index 8cd023b8e6..c973881c98 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -69,7 +69,7 @@ pub use self::Note::*; use self::Aliasability::*; -use middle::region::RegionMaps; +use middle::region; use hir::def_id::{DefId, DefIndex}; use hir::map as hir_map; use infer::InferCtxt; @@ -283,7 +283,7 @@ impl ast_node for hir::Pat { #[derive(Clone)] pub struct MemCategorizationContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'gcx, 'tcx>, - pub region_maps: &'a RegionMaps, + pub region_scope_tree: &'a region::ScopeTree, pub tables: &'a ty::TypeckTables<'tcx>, infcx: Option<&'a InferCtxt<'a, 'gcx, 'tcx>>, } @@ -391,21 +391,21 @@ impl MutabilityCategory { impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx, 'tcx> { pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, - region_maps: &'a RegionMaps, + region_scope_tree: &'a region::ScopeTree, tables: &'a ty::TypeckTables<'tcx>) -> MemCategorizationContext<'a, 'tcx, 'tcx> { - MemCategorizationContext { tcx, region_maps, tables, infcx: None } + MemCategorizationContext { tcx, region_scope_tree, tables, infcx: None } } } impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { pub fn with_infer(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, - region_maps: &'a RegionMaps, + region_scope_tree: &'a region::ScopeTree, tables: &'a ty::TypeckTables<'tcx>) -> MemCategorizationContext<'a, 'gcx, 'tcx> { MemCategorizationContext { tcx: infcx.tcx, - region_maps, + region_scope_tree, tables, infcx: Some(infcx), } @@ -625,7 +625,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { hir::ExprAddrOf(..) | hir::ExprCall(..) | hir::ExprAssign(..) | hir::ExprAssignOp(..) | hir::ExprClosure(..) | hir::ExprRet(..) | - hir::ExprUnary(..) | + hir::ExprUnary(..) | hir::ExprYield(..) | hir::ExprMethodCall(..) | hir::ExprCast(..) | hir::ExprArray(..) | hir::ExprTup(..) | hir::ExprIf(..) | hir::ExprBinary(..) | hir::ExprWhile(..) | @@ -670,13 +670,11 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { })) } - Def::Upvar(def_id, _, fn_node_id) => { - let var_id = self.tcx.hir.as_local_node_id(def_id).unwrap(); + Def::Upvar(var_id, _, fn_node_id) => { self.cat_upvar(id, span, var_id, fn_node_id) } - Def::Local(def_id) => { - let vid = self.tcx.hir.as_local_node_id(def_id).unwrap(); + Def::Local(vid) => { Ok(Rc::new(cmt_ { id, span, @@ -725,19 +723,23 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // FnMut | copied -> &'env mut | upvar -> &'env mut -> &'up bk // FnOnce | copied | upvar -> &'up bk - let kind = match self.tables.closure_kinds().get(fn_hir_id) { - Some(&(kind, _)) => kind, - None => span_bug!(span, "missing closure kind") + let kind = match self.node_ty(fn_hir_id)?.sty { + ty::TyGenerator(..) => ty::ClosureKind::FnOnce, + _ => { + match self.tables.closure_kinds().get(fn_hir_id) { + Some(&(kind, _)) => kind, + None => span_bug!(span, "missing closure kind"), + } + } }; let closure_expr_def_index = self.tcx.hir.local_def_id(fn_node_id).index; - let var_def_index = self.tcx.hir.local_def_id(var_id).index; - + let var_hir_id = self.tcx.hir.node_to_hir_id(var_id); let upvar_id = ty::UpvarId { - var_id: var_def_index, + var_id: var_hir_id, closure_expr_id: closure_expr_def_index }; - let var_hir_id = self.tcx.hir.node_to_hir_id(var_id); + let var_ty = self.node_ty(var_hir_id)?; // Mutability of original variable itself @@ -856,9 +858,8 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { /// Returns the lifetime of a temporary created by expr with id `id`. /// This could be `'static` if `id` is part of a constant expression. - pub fn temporary_scope(&self, id: ast::NodeId) -> ty::Region<'tcx> - { - let scope = self.region_maps.temporary_scope(id); + pub fn temporary_scope(&self, id: hir::ItemLocalId) -> ty::Region<'tcx> { + let scope = self.region_scope_tree.temporary_scope(id); self.tcx.mk_region(match scope { Some(scope) => ty::ReScope(scope), None => ty::ReStatic @@ -875,7 +876,8 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // Always promote `[T; 0]` (even when e.g. borrowed mutably). let promotable = match expr_ty.sty { - ty::TyArray(_, 0) => true, + ty::TyArray(_, len) if + len.val.to_const_int().and_then(|i| i.to_u64()) == Some(0) => true, _ => promotable, }; @@ -885,7 +887,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { let re = if promotable { self.tcx.types.re_static } else { - self.temporary_scope(id) + self.temporary_scope(self.tcx.hir.node_to_hir_id(id).local_id) }; let ret = self.cat_rvalue(id, span, re, expr_ty); debug!("cat_rvalue_node ret {:?}", ret); @@ -1092,7 +1094,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } // FIXME(#19596) This is a workaround, but there should be a better way to do this - fn cat_pattern_(&self, cmt: cmt<'tcx>, pat: &hir::Pat, op: &mut F) -> McResult<()> + fn cat_pattern_(&self, mut cmt: cmt<'tcx>, pat: &hir::Pat, op: &mut F) -> McResult<()> where F : FnMut(cmt<'tcx>, &hir::Pat) { // Here, `cmt` is the categorization for the value being @@ -1142,6 +1144,56 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { debug!("cat_pattern: {:?} cmt={:?}", pat, cmt); + // If (pattern) adjustments are active for this pattern, adjust the `cmt` correspondingly. + // `cmt`s are constructed differently from patterns. For example, in + // + // ``` + // match foo { + // &&Some(x, ) => { ... }, + // _ => { ... }, + // } + // ``` + // + // the pattern `&&Some(x,)` is represented as `Ref { Ref { TupleStruct }}`. To build the + // corresponding `cmt` we start with a `cmt` for `foo`, and then, by traversing the + // pattern, try to answer the question: given the address of `foo`, how is `x` reached? + // + // `&&Some(x,)` `cmt_foo` + // `&Some(x,)` `deref { cmt_foo}` + // `Some(x,)` `deref { deref { cmt_foo }}` + // (x,)` `field0 { deref { deref { cmt_foo }}}` <- resulting cmt + // + // The above example has no adjustments. If the code were instead the (after adjustments, + // equivalent) version + // + // ``` + // match foo { + // Some(x, ) => { ... }, + // _ => { ... }, + // } + // ``` + // + // Then we see that to get the same result, we must start with `deref { deref { cmt_foo }}` + // instead of `cmt_foo` since the pattern is now `Some(x,)` and not `&&Some(x,)`, even + // though its assigned type is that of `&&Some(x,)`. + for _ in 0..self.tables + .pat_adjustments() + .get(pat.hir_id) + .map(|v| v.len()) + .unwrap_or(0) { + cmt = self.cat_deref(pat, cmt, true /* implicit */)?; + } + let cmt = cmt; // lose mutability + + // Invoke the callback, but only now, after the `cmt` has adjusted. + // + // To see that this makes sense, consider `match &Some(3) { Some(x) => { ... }}`. In that + // case, the initial `cmt` will be that for `&Some(3)` and the pattern is `Some(x)`. We + // don't want to call `op` with these incompatible values. As written, what happens instead + // is that `op` is called with the adjusted cmt (that for `*&Some(3)`) and the pattern + // `Some(x)` (which matches). Recursing once more, `*&Some(3)` and the pattern `Some(x)` + // result in the cmt `Downcast(*&Some(3)).0` associated to `x` and invoke `op` with + // that (where the `ref` on `x` is implied). op(cmt.clone(), pat); match pat.node { @@ -1437,7 +1489,7 @@ impl<'tcx> fmt::Debug for Categorization<'tcx> { Categorization::StaticItem => write!(f, "static"), Categorization::Rvalue(r) => { write!(f, "rvalue({:?})", r) } Categorization::Local(id) => { - let name = ty::tls::with(|tcx| tcx.local_var_name_str(id)); + let name = ty::tls::with(|tcx| tcx.hir.name(id)); write!(f, "local({})", name) } Categorization::Upvar(upvar) => { diff --git a/src/librustc/middle/reachable.rs b/src/librustc/middle/reachable.rs index 666f71cca0..55d0c6b4c6 100644 --- a/src/librustc/middle/reachable.rs +++ b/src/librustc/middle/reachable.rs @@ -115,28 +115,34 @@ impl<'a, 'tcx> Visitor<'tcx> for ReachableContext<'a, 'tcx> { _ => None }; - if let Some(def) = def { - let def_id = def.def_id(); - if let Some(node_id) = self.tcx.hir.as_local_node_id(def_id) { - if self.def_id_represents_local_inlined_item(def_id) { - self.worklist.push(node_id); - } else { - match def { - // If this path leads to a constant, then we need to - // recurse into the constant to continue finding - // items that are reachable. - Def::Const(..) | Def::AssociatedConst(..) => { - self.worklist.push(node_id); - } + match def { + Some(Def::Local(node_id)) | Some(Def::Upvar(node_id, ..)) => { + self.reachable_symbols.insert(node_id); + } + Some(def) => { + let def_id = def.def_id(); + if let Some(node_id) = self.tcx.hir.as_local_node_id(def_id) { + if self.def_id_represents_local_inlined_item(def_id) { + self.worklist.push(node_id); + } else { + match def { + // If this path leads to a constant, then we need to + // recurse into the constant to continue finding + // items that are reachable. + Def::Const(..) | Def::AssociatedConst(..) => { + self.worklist.push(node_id); + } - // If this wasn't a static, then the destination is - // surely reachable. - _ => { - self.reachable_symbols.insert(node_id); + // If this wasn't a static, then the destination is + // surely reachable. + _ => { + self.reachable_symbols.insert(node_id); + } } } } } + _ => {} } intravisit::walk_expr(self, expr) @@ -227,8 +233,8 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { } else { false }; - let is_extern = attr::contains_extern_indicator(&self.tcx.sess.diagnostic(), - &item.attrs); + let def_id = self.tcx.hir.local_def_id(item.id); + let is_extern = self.tcx.contains_extern_indicator(def_id); if reachable || is_extern { self.reachable_symbols.insert(search_item); } @@ -296,7 +302,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { hir::ImplItemKind::Type(_) => {} } } - hir_map::NodeExpr(&hir::Expr { node: hir::ExprClosure(.., body, _), .. }) => { + hir_map::NodeExpr(&hir::Expr { node: hir::ExprClosure(.., body, _, _), .. }) => { self.visit_nested_body(body); } // Nothing to recurse on for these @@ -304,7 +310,8 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { hir_map::NodeVariant(_) | hir_map::NodeStructCtor(_) | hir_map::NodeField(_) | - hir_map::NodeTy(_) => {} + hir_map::NodeTy(_) | + hir_map::NodeMacroDef(_) => {} _ => { bug!("found unexpected thingy in worklist: {}", self.tcx.hir.node_to_string(search_item)) @@ -363,11 +370,13 @@ impl<'a, 'tcx: 'a> ItemLikeVisitor<'tcx> for CollectPrivateImplItemsVisitor<'a, } } -pub fn find_reachable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Rc { - tcx.reachable_set(LOCAL_CRATE) -} +// We introduce a new-type here, so we can have a specialized HashStable +// implementation for it. +#[derive(Clone)] +pub struct ReachableSet(pub Rc); + -fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> Rc { +fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> ReachableSet { debug_assert!(crate_num == LOCAL_CRATE); let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE); @@ -392,7 +401,7 @@ fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> for (id, _) in &access_levels.map { reachable_context.worklist.push(*id); } - for item in tcx.lang_items.items().iter() { + for item in tcx.lang_items().items().iter() { if let Some(did) = *item { if let Some(node_id) = tcx.hir.as_local_node_id(did) { reachable_context.worklist.push(node_id); @@ -412,7 +421,7 @@ fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> reachable_context.propagate(); // Return the set of reachable symbols. - Rc::new(reachable_context.reachable_symbols) + ReachableSet(Rc::new(reachable_context.reachable_symbols)) } pub fn provide(providers: &mut Providers) { diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index 45a3080ed9..b909ee9f93 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -8,23 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! This file actually contains two passes related to regions. The first -//! pass builds up the `scope_map`, which describes the parent links in -//! the region hierarchy. The second pass infers which types must be -//! region parameterized. +//! This file builds up the `ScopeTree`, which describes +//! the parent links in the region hierarchy. //! //! Most of the documentation on regions can be found in //! `middle/infer/region_inference/README.md` -use hir::map as hir_map; -use util::nodemap::{FxHashMap, NodeMap, NodeSet}; +use ich::{StableHashingContext, NodeIdHashingMode}; +use util::nodemap::{FxHashMap, FxHashSet}; use ty; +use std::fmt; use std::mem; use std::rc::Rc; use syntax::codemap; use syntax::ast; -use syntax_pos::Span; +use syntax_pos::{Span, DUMMY_SP}; use ty::TyCtxt; use ty::maps::Providers; @@ -33,25 +32,28 @@ use hir::def_id::DefId; use hir::intravisit::{self, Visitor, NestedVisitorMap}; use hir::{Block, Arm, Pat, PatKind, Stmt, Expr, Local}; use mir::transform::MirSource; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, + StableHasherResult}; -/// CodeExtent represents a statically-describable extent that can be +/// Scope represents a statically-describable scope that can be /// used to bound the lifetime/region for values. /// -/// `Misc(node_id)`: Any AST node that has any extent at all has the -/// `Misc(node_id)` extent. Other variants represent special cases not +/// `Node(node_id)`: Any AST node that has any scope at all has the +/// `Node(node_id)` scope. Other variants represent special cases not /// immediately derivable from the abstract syntax tree structure. /// -/// `DestructionScope(node_id)` represents the extent of destructors +/// `DestructionScope(node_id)` represents the scope of destructors /// implicitly-attached to `node_id` that run immediately after the /// expression for `node_id` itself. Not every AST node carries a /// `DestructionScope`, but those that are `terminating_scopes` do; -/// see discussion with `RegionMaps`. +/// see discussion with `ScopeTree`. /// /// `Remainder(BlockRemainder { block, statement_index })` represents -/// the extent of user code running immediately after the initializer +/// the scope of user code running immediately after the initializer /// expression for the indexed statement, until the end of the block. /// -/// So: the following code can be broken down into the extents beneath: +/// So: the following code can be broken down into the scopes beneath: /// ``` /// let a = f().g( 'b: { let x = d(); let y = d(); x.h(y) } ) ; /// ``` @@ -69,21 +71,21 @@ use mir::transform::MirSource; /// +--+ (M2.) /// +-----------------------------------------------------------+ (M1.) /// -/// (M1.): Misc extent of the whole `let a = ...;` statement. -/// (M2.): Misc extent of the `f()` expression. -/// (M3.): Misc extent of the `f().g(..)` expression. -/// (M4.): Misc extent of the block labeled `'b:`. -/// (M5.): Misc extent of the `let x = d();` statement +/// (M1.): Node scope of the whole `let a = ...;` statement. +/// (M2.): Node scope of the `f()` expression. +/// (M3.): Node scope of the `f().g(..)` expression. +/// (M4.): Node scope of the block labeled `'b:`. +/// (M5.): Node scope of the `let x = d();` statement /// (D6.): DestructionScope for temporaries created during M5. -/// (R7.): Remainder extent for block `'b:`, stmt 0 (let x = ...). -/// (M8.): Misc Extent of the `let y = d();` statement. +/// (R7.): Remainder scope for block `'b:`, stmt 0 (let x = ...). +/// (M8.): Node scope of the `let y = d();` statement. /// (D9.): DestructionScope for temporaries created during M8. -/// (R10.): Remainder extent for block `'b:`, stmt 1 (let y = ...). +/// (R10.): Remainder scope for block `'b:`, stmt 1 (let y = ...). /// (D11.): DestructionScope for temporaries and bindings from block `'b:`. /// (D12.): DestructionScope for temporaries created during M1 (e.g. f()). /// /// Note that while the above picture shows the destruction scopes -/// as following their corresponding misc extents, in the internal +/// as following their corresponding node scopes, in the internal /// data structures of the compiler the destruction scopes are /// represented as enclosing parents. This is sound because we use the /// enclosing parent relationship just to ensure that referenced @@ -95,22 +97,38 @@ use mir::transform::MirSource; /// placate the same deriving in `ty::FreeRegion`, but we may want to /// actually attach a more meaningful ordering to scopes than the one /// generated via deriving here. +/// +/// Scope is a bit-packed to save space - if `code` is SCOPE_DATA_REMAINDER_MAX +/// or less, it is a `ScopeData::Remainder`, otherwise it is a type specified +/// by the bitpacking. +#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Copy, RustcEncodable, RustcDecodable)] +pub struct Scope { + pub(crate) id: hir::ItemLocalId, + pub(crate) code: u32 +} + +const SCOPE_DATA_NODE: u32 = !0; +const SCOPE_DATA_CALLSITE: u32 = !1; +const SCOPE_DATA_ARGUMENTS: u32 = !2; +const SCOPE_DATA_DESTRUCTION: u32 = !3; +const SCOPE_DATA_REMAINDER_MAX: u32 = !4; + #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Debug, Copy, RustcEncodable, RustcDecodable)] -pub enum CodeExtent { - Misc(ast::NodeId), +pub enum ScopeData { + Node(hir::ItemLocalId), - // extent of the call-site for a function or closure (outlives - // the parameters as well as the body). - CallSiteScope(hir::BodyId), + // Scope of the call-site for a function or closure + // (outlives the arguments as well as the body). + CallSite(hir::ItemLocalId), - // extent of parameters passed to a function or closure (they - // outlive its body) - ParameterScope(hir::BodyId), + // Scope of arguments passed to a function or closure + // (they outlive its body). + Arguments(hir::ItemLocalId), - // extent of destructors for temporaries of node-id - DestructionScope(ast::NodeId), + // Scope of destructors for temporaries of node-id. + Destruction(hir::ItemLocalId), - // extent of code following a `let id = expr;` binding in a block + // Scope following a `let id = expr;` binding in a block. Remainder(BlockRemainder) } @@ -125,95 +143,180 @@ pub enum CodeExtent { /// * the subscope with `first_statement_index == 0` is scope of both /// `a` and `b`; it does not include EXPR_1, but does include /// everything after that first `let`. (If you want a scope that -/// includes EXPR_1 as well, then do not use `CodeExtent::Remainder`, -/// but instead another `CodeExtent` that encompasses the whole block, -/// e.g. `CodeExtent::Misc`. +/// includes EXPR_1 as well, then do not use `Scope::Remainder`, +/// but instead another `Scope` that encompasses the whole block, +/// e.g. `Scope::Node`. /// /// * the subscope with `first_statement_index == 1` is scope of `c`, /// and thus does not include EXPR_2, but covers the `...`. #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable, RustcDecodable, Debug, Copy)] pub struct BlockRemainder { - pub block: ast::NodeId, - pub first_statement_index: u32, + pub block: hir::ItemLocalId, + pub first_statement_index: FirstStatementIndex, +} + +#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable, + RustcDecodable, Copy)] +pub struct FirstStatementIndex { pub idx: u32 } + +impl Idx for FirstStatementIndex { + fn new(idx: usize) -> Self { + assert!(idx <= SCOPE_DATA_REMAINDER_MAX as usize); + FirstStatementIndex { idx: idx as u32 } + } + + fn index(self) -> usize { + self.idx as usize + } } -impl CodeExtent { - /// Returns a node id associated with this scope. +impl fmt::Debug for FirstStatementIndex { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.index(), formatter) + } +} + +impl From for Scope { + #[inline] + fn from(scope_data: ScopeData) -> Self { + let (id, code) = match scope_data { + ScopeData::Node(id) => (id, SCOPE_DATA_NODE), + ScopeData::CallSite(id) => (id, SCOPE_DATA_CALLSITE), + ScopeData::Arguments(id) => (id, SCOPE_DATA_ARGUMENTS), + ScopeData::Destruction(id) => (id, SCOPE_DATA_DESTRUCTION), + ScopeData::Remainder(r) => (r.block, r.first_statement_index.index() as u32) + }; + Self { id, code } + } +} + +impl fmt::Debug for Scope { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.data(), formatter) + } +} + +#[allow(non_snake_case)] +impl Scope { + #[inline] + pub fn data(self) -> ScopeData { + match self.code { + SCOPE_DATA_NODE => ScopeData::Node(self.id), + SCOPE_DATA_CALLSITE => ScopeData::CallSite(self.id), + SCOPE_DATA_ARGUMENTS => ScopeData::Arguments(self.id), + SCOPE_DATA_DESTRUCTION => ScopeData::Destruction(self.id), + idx => ScopeData::Remainder(BlockRemainder { + block: self.id, + first_statement_index: FirstStatementIndex { idx } + }) + } + } + + #[inline] + pub fn Node(id: hir::ItemLocalId) -> Self { + Self::from(ScopeData::Node(id)) + } + + #[inline] + pub fn CallSite(id: hir::ItemLocalId) -> Self { + Self::from(ScopeData::CallSite(id)) + } + + #[inline] + pub fn Arguments(id: hir::ItemLocalId) -> Self { + Self::from(ScopeData::Arguments(id)) + } + + #[inline] + pub fn Destruction(id: hir::ItemLocalId) -> Self { + Self::from(ScopeData::Destruction(id)) + } + + #[inline] + pub fn Remainder(r: BlockRemainder) -> Self { + Self::from(ScopeData::Remainder(r)) + } +} + +impl Scope { + /// Returns a item-local id associated with this scope. /// /// NB: likely to be replaced as API is refined; e.g. pnkfelix /// anticipates `fn entry_node_id` and `fn each_exit_node_id`. - pub fn node_id(&self) -> ast::NodeId { - match *self { - CodeExtent::Misc(node_id) => node_id, - - // These cases all return rough approximations to the - // precise extent denoted by `self`. - CodeExtent::Remainder(br) => br.block, - CodeExtent::DestructionScope(node_id) => node_id, - CodeExtent::CallSiteScope(body_id) | - CodeExtent::ParameterScope(body_id) => body_id.node_id, + pub fn item_local_id(&self) -> hir::ItemLocalId { + self.id + } + + pub fn node_id(&self, tcx: TyCtxt, scope_tree: &ScopeTree) -> ast::NodeId { + match scope_tree.root_body { + Some(hir_id) => { + tcx.hir.hir_to_node_id(hir::HirId { + owner: hir_id.owner, + local_id: self.item_local_id() + }) + } + None => ast::DUMMY_NODE_ID } } - /// Returns the span of this CodeExtent. Note that in general the + /// Returns the span of this Scope. Note that in general the /// returned span may not correspond to the span of any node id in /// the AST. - pub fn span(&self, hir_map: &hir_map::Map) -> Option { - match hir_map.find(self.node_id()) { - Some(hir_map::NodeBlock(ref blk)) => { - match *self { - CodeExtent::CallSiteScope(_) | - CodeExtent::ParameterScope(_) | - CodeExtent::Misc(_) | - CodeExtent::DestructionScope(_) => Some(blk.span), - - CodeExtent::Remainder(r) => { - assert_eq!(r.block, blk.id); - // Want span for extent starting after the - // indexed statement and ending at end of - // `blk`; reuse span of `blk` and shift `lo` - // forward to end of indexed statement. - // - // (This is the special case aluded to in the - // doc-comment for this method) - let stmt_span = blk.stmts[r.first_statement_index as usize].span; - Some(Span { lo: stmt_span.hi, hi: blk.span.hi, ctxt: stmt_span.ctxt }) - } + pub fn span(&self, tcx: TyCtxt, scope_tree: &ScopeTree) -> Span { + let node_id = self.node_id(tcx, scope_tree); + if node_id == ast::DUMMY_NODE_ID { + return DUMMY_SP; + } + let span = tcx.hir.span(node_id); + if let ScopeData::Remainder(r) = self.data() { + if let hir::map::NodeBlock(ref blk) = tcx.hir.get(node_id) { + // Want span for scope starting after the + // indexed statement and ending at end of + // `blk`; reuse span of `blk` and shift `lo` + // forward to end of indexed statement. + // + // (This is the special case aluded to in the + // doc-comment for this method) + + let stmt_span = blk.stmts[r.first_statement_index.index()].span; + + // To avoid issues with macro-generated spans, the span + // of the statement must be nested in that of the block. + if span.lo() <= stmt_span.lo() && stmt_span.lo() <= span.hi() { + return Span::new(stmt_span.lo(), span.hi(), span.ctxt()); } } - Some(hir_map::NodeExpr(ref expr)) => Some(expr.span), - Some(hir_map::NodeStmt(ref stmt)) => Some(stmt.span), - Some(hir_map::NodeItem(ref item)) => Some(item.span), - Some(_) | None => None, } + span } } -/// The region maps encode information about region relationships. -pub struct RegionMaps { +/// The region scope tree encodes information about region relationships. +#[derive(Default, Debug)] +pub struct ScopeTree { /// If not empty, this body is the root of this region hierarchy. - root_body: Option, + root_body: Option, /// The parent of the root body owner, if the latter is an /// an associated const or method, as impls/traits can also /// have lifetime parameters free in this body. root_parent: Option, - /// `scope_map` maps from a scope id to the enclosing scope id; + /// `parent_map` maps from a scope id to the enclosing scope id; /// this is usually corresponding to the lexical nesting, though /// in the case of closures the parent scope is the innermost /// conditional expression or repeating block. (Note that the /// enclosing scope id for the block associated with a closure is /// the closure itself.) - scope_map: FxHashMap, + parent_map: FxHashMap, /// `var_map` maps from a variable or binding id to the block in /// which that variable is declared. - var_map: NodeMap, + var_map: FxHashMap, /// maps from a node-id to the associated destruction scope (if any) - destruction_scopes: NodeMap, + destruction_scopes: FxHashMap, /// `rvalue_scopes` includes entries for those expressions whose cleanup scope is /// larger than the default. The map goes from the expression id @@ -221,7 +324,9 @@ pub struct RegionMaps { /// table, the appropriate cleanup scope is the innermost /// enclosing statement, conditional expression, or repeating /// block (see `terminating_scopes`). - rvalue_scopes: NodeMap, + /// In constants, None is used to indicate that certain expressions + /// escape into 'static and should have no local cleanup scope. + rvalue_scopes: FxHashMap>, /// Encodes the hierarchy of fn bodies. Every fn body (including /// closures) forms its own distinct region hierarchy, rooted in @@ -233,7 +338,83 @@ pub struct RegionMaps { /// closure defined by that fn. See the "Modeling closures" /// section of the README in infer::region_inference for /// more details. - fn_tree: NodeMap, + closure_tree: FxHashMap, + + /// If there are any `yield` nested within a scope, this map + /// stores the `Span` of the last one and its index in the + /// postorder of the Visitor traversal on the HIR. + /// + /// HIR Visitor postorder indexes might seem like a peculiar + /// thing to care about. but it turns out that HIR bindings + /// and the temporary results of HIR expressions are never + /// storage-live at the end of HIR nodes with postorder indexes + /// lower than theirs, and therefore don't need to be suspended + /// at yield-points at these indexes. + /// + /// For an example, suppose we have some code such as: + /// ```rust,ignore (example) + /// foo(f(), yield y, bar(g())) + /// ``` + /// + /// With the HIR tree (calls numbered for expository purposes) + /// ``` + /// Call#0(foo, [Call#1(f), Yield(y), Call#2(bar, Call#3(g))]) + /// ``` + /// + /// Obviously, the result of `f()` was created before the yield + /// (and therefore needs to be kept valid over the yield) while + /// the result of `g()` occurs after the yield (and therefore + /// doesn't). If we want to infer that, we can look at the + /// postorder traversal: + /// ``` + /// `foo` `f` Call#1 `y` Yield `bar` `g` Call#3 Call#2 Call#0 + /// ``` + /// + /// In which we can easily see that `Call#1` occurs before the yield, + /// and `Call#3` after it. + /// + /// To see that this method works, consider: + /// + /// Let `D` be our binding/temporary and `U` be our other HIR node, with + /// `HIR-postorder(U) < HIR-postorder(D)` (in our example, U would be + /// the yield and D would be one of the calls). Let's show that + /// `D` is storage-dead at `U`. + /// + /// Remember that storage-live/storage-dead refers to the state of + /// the *storage*, and does not consider moves/drop flags. + /// + /// Then: + /// 1. From the ordering guarantee of HIR visitors (see + /// `rustc::hir::intravisit`), `D` does not dominate `U`. + /// 2. Therefore, `D` is *potentially* storage-dead at `U` (because + /// we might visit `U` without ever getting to `D`). + /// 3. However, we guarantee that at each HIR point, each + /// binding/temporary is always either always storage-live + /// or always storage-dead. This is what is being guaranteed + /// by `terminating_scopes` including all blocks where the + /// count of executions is not guaranteed. + /// 4. By `2.` and `3.`, `D` is *statically* storage-dead at `U`, + /// QED. + /// + /// I don't think this property relies on `3.` in an essential way - it + /// is probably still correct even if we have "unrestricted" terminating + /// scopes. However, why use the complicated proof when a simple one + /// works? + /// + /// A subtle thing: `box` expressions, such as `box (&x, yield 2, &y)`. It + /// might seem that a `box` expression creates a `Box` temporary + /// when it *starts* executing, at `HIR-preorder(BOX-EXPR)`. That might + /// be true in the MIR desugaring, but it is not important in the semantics. + /// + /// The reason is that semantically, until the `box` expression returns, + /// the values are still owned by their containing expressions. So + /// we'll see that `&x`. + yield_in_scope: FxHashMap, + + /// The number of visit_expr and visit_pat calls done in the body. + /// Used to sanity check visit_expr/visit_pat call count when + /// calculating generator interiors. + body_expr_count: FxHashMap, } #[derive(Debug, Copy, Clone)] @@ -244,20 +425,23 @@ pub struct Context { /// arranged into a tree. See the "Modeling closures" section of /// the README in infer::region_inference for more /// details. - root_id: Option, + root_id: Option, /// the scope that contains any new variables declared - var_parent: Option, + var_parent: Option, /// region parent of expressions etc - parent: Option, + parent: Option, } struct RegionResolutionVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - // Generated maps: - region_maps: RegionMaps, + // The number of expressions and patterns visited in the current body + expr_and_pat_count: usize, + + // Generated scope tree: + scope_tree: ScopeTree, cx: Context, @@ -281,124 +465,117 @@ struct RegionResolutionVisitor<'a, 'tcx: 'a> { /// arbitrary amounts of stack space. Terminating scopes end /// up being contained in a DestructionScope that contains the /// destructor's execution. - terminating_scopes: NodeSet, + terminating_scopes: FxHashSet, } -impl<'tcx> RegionMaps { - pub fn new() -> Self { - RegionMaps { - root_body: None, - root_parent: None, - scope_map: FxHashMap(), - destruction_scopes: FxHashMap(), - var_map: NodeMap(), - rvalue_scopes: NodeMap(), - fn_tree: NodeMap(), - } - } - - pub fn record_code_extent(&mut self, - child: CodeExtent, - parent: Option) { +impl<'tcx> ScopeTree { + pub fn record_scope_parent(&mut self, child: Scope, parent: Option) { debug!("{:?}.parent = {:?}", child, parent); if let Some(p) = parent { - let prev = self.scope_map.insert(child, p); + let prev = self.parent_map.insert(child, p); assert!(prev.is_none()); } // record the destruction scopes for later so we can query them - if let CodeExtent::DestructionScope(n) = child { + if let ScopeData::Destruction(n) = child.data() { self.destruction_scopes.insert(n, child); } } - pub fn each_encl_scope(&self, mut e:E) where E: FnMut(CodeExtent, CodeExtent) { - for (&child, &parent) in &self.scope_map { + pub fn each_encl_scope(&self, mut e:E) where E: FnMut(Scope, Scope) { + for (&child, &parent) in &self.parent_map { e(child, parent) } } - pub fn each_var_scope(&self, mut e:E) where E: FnMut(&ast::NodeId, CodeExtent) { + pub fn each_var_scope(&self, mut e:E) where E: FnMut(&hir::ItemLocalId, Scope) { for (child, &parent) in self.var_map.iter() { e(child, parent) } } - pub fn opt_destruction_extent(&self, n: ast::NodeId) -> Option { + pub fn opt_destruction_scope(&self, n: hir::ItemLocalId) -> Option { self.destruction_scopes.get(&n).cloned() } - /// Records that `sub_fn` is defined within `sup_fn`. These ids + /// Records that `sub_closure` is defined within `sup_closure`. These ids /// should be the id of the block that is the fn body, which is /// also the root of the region hierarchy for that fn. - fn record_fn_parent(&mut self, sub_fn: ast::NodeId, sup_fn: ast::NodeId) { - debug!("record_fn_parent(sub_fn={:?}, sup_fn={:?})", sub_fn, sup_fn); - assert!(sub_fn != sup_fn); - let previous = self.fn_tree.insert(sub_fn, sup_fn); + fn record_closure_parent(&mut self, + sub_closure: hir::ItemLocalId, + sup_closure: hir::ItemLocalId) { + debug!("record_closure_parent(sub_closure={:?}, sup_closure={:?})", + sub_closure, sup_closure); + assert!(sub_closure != sup_closure); + let previous = self.closure_tree.insert(sub_closure, sup_closure); assert!(previous.is_none()); } - fn fn_is_enclosed_by(&self, mut sub_fn: ast::NodeId, sup_fn: ast::NodeId) -> bool { + fn closure_is_enclosed_by(&self, + mut sub_closure: hir::ItemLocalId, + sup_closure: hir::ItemLocalId) -> bool { loop { - if sub_fn == sup_fn { return true; } - match self.fn_tree.get(&sub_fn) { - Some(&s) => { sub_fn = s; } + if sub_closure == sup_closure { return true; } + match self.closure_tree.get(&sub_closure) { + Some(&s) => { sub_closure = s; } None => { return false; } } } } - fn record_var_scope(&mut self, var: ast::NodeId, lifetime: CodeExtent) { + fn record_var_scope(&mut self, var: hir::ItemLocalId, lifetime: Scope) { debug!("record_var_scope(sub={:?}, sup={:?})", var, lifetime); - assert!(var != lifetime.node_id()); + assert!(var != lifetime.item_local_id()); self.var_map.insert(var, lifetime); } - fn record_rvalue_scope(&mut self, var: ast::NodeId, lifetime: CodeExtent) { + fn record_rvalue_scope(&mut self, var: hir::ItemLocalId, lifetime: Option) { debug!("record_rvalue_scope(sub={:?}, sup={:?})", var, lifetime); - assert!(var != lifetime.node_id()); + if let Some(lifetime) = lifetime { + assert!(var != lifetime.item_local_id()); + } self.rvalue_scopes.insert(var, lifetime); } - pub fn opt_encl_scope(&self, id: CodeExtent) -> Option { + pub fn opt_encl_scope(&self, id: Scope) -> Option { //! Returns the narrowest scope that encloses `id`, if any. - self.scope_map.get(&id).cloned() + self.parent_map.get(&id).cloned() } #[allow(dead_code)] // used in cfg - pub fn encl_scope(&self, id: CodeExtent) -> CodeExtent { + pub fn encl_scope(&self, id: Scope) -> Scope { //! Returns the narrowest scope that encloses `id`, if any. self.opt_encl_scope(id).unwrap() } /// Returns the lifetime of the local variable `var_id` - pub fn var_scope(&self, var_id: ast::NodeId) -> CodeExtent { + pub fn var_scope(&self, var_id: hir::ItemLocalId) -> Scope { match self.var_map.get(&var_id) { Some(&r) => r, None => { bug!("no enclosing scope for id {:?}", var_id); } } } - pub fn temporary_scope(&self, expr_id: ast::NodeId) -> Option { + pub fn temporary_scope(&self, expr_id: hir::ItemLocalId) -> Option { //! Returns the scope when temp created by expr_id will be cleaned up // check for a designated rvalue scope if let Some(&s) = self.rvalue_scopes.get(&expr_id) { debug!("temporary_scope({:?}) = {:?} [custom]", expr_id, s); - return Some(s); + return s; } // else, locate the innermost terminating scope // if there's one. Static items, for instance, won't // have an enclosing scope, hence no scope will be // returned. - let mut id = CodeExtent::Misc(expr_id); + let mut id = Scope::Node(expr_id); - while let Some(&p) = self.scope_map.get(&id) { - match p { - CodeExtent::DestructionScope(..) => { + while let Some(&p) = self.parent_map.get(&id) { + match p.data() { + ScopeData::Destruction(..) => { debug!("temporary_scope({:?}) = {:?} [enclosing]", expr_id, id); return Some(id); @@ -411,7 +588,7 @@ impl<'tcx> RegionMaps { return None; } - pub fn var_region(&self, id: ast::NodeId) -> ty::RegionKind { + pub fn var_region(&self, id: hir::ItemLocalId) -> ty::RegionKind { //! Returns the lifetime of the variable `id`. let scope = ty::ReScope(self.var_scope(id)); @@ -419,7 +596,7 @@ impl<'tcx> RegionMaps { scope } - pub fn scopes_intersect(&self, scope1: CodeExtent, scope2: CodeExtent) + pub fn scopes_intersect(&self, scope1: Scope, scope2: Scope) -> bool { self.is_subscope_of(scope1, scope2) || self.is_subscope_of(scope2, scope1) @@ -428,8 +605,8 @@ impl<'tcx> RegionMaps { /// Returns true if `subscope` is equal to or is lexically nested inside `superscope` and false /// otherwise. pub fn is_subscope_of(&self, - subscope: CodeExtent, - superscope: CodeExtent) + subscope: Scope, + superscope: Scope) -> bool { let mut s = subscope; debug!("is_subscope_of({:?}, {:?})", subscope, superscope); @@ -453,22 +630,22 @@ impl<'tcx> RegionMaps { /// Finds the nearest common ancestor (if any) of two scopes. That is, finds the smallest /// scope which is greater than or equal to both `scope_a` and `scope_b`. pub fn nearest_common_ancestor(&self, - scope_a: CodeExtent, - scope_b: CodeExtent) - -> CodeExtent { + scope_a: Scope, + scope_b: Scope) + -> Scope { if scope_a == scope_b { return scope_a; } // [1] The initial values for `a_buf` and `b_buf` are not used. // The `ancestors_of` function will return some prefix that // is re-initialized with new values (or else fallback to a // heap-allocated vector). - let mut a_buf: [CodeExtent; 32] = [scope_a /* [1] */; 32]; - let mut a_vec: Vec = vec![]; - let mut b_buf: [CodeExtent; 32] = [scope_b /* [1] */; 32]; - let mut b_vec: Vec = vec![]; - let scope_map = &self.scope_map; - let a_ancestors = ancestors_of(scope_map, scope_a, &mut a_buf, &mut a_vec); - let b_ancestors = ancestors_of(scope_map, scope_b, &mut b_buf, &mut b_vec); + let mut a_buf: [Scope; 32] = [scope_a /* [1] */; 32]; + let mut a_vec: Vec = vec![]; + let mut b_buf: [Scope; 32] = [scope_b /* [1] */; 32]; + let mut b_vec: Vec = vec![]; + let parent_map = &self.parent_map; + let a_ancestors = ancestors_of(parent_map, scope_a, &mut a_buf, &mut a_vec); + let b_ancestors = ancestors_of(parent_map, scope_b, &mut b_buf, &mut b_vec); let mut a_index = a_ancestors.len() - 1; let mut b_index = b_ancestors.len() - 1; @@ -488,13 +665,13 @@ impl<'tcx> RegionMaps { // infer::region_inference for more details. let a_root_scope = a_ancestors[a_index]; let b_root_scope = a_ancestors[a_index]; - return match (a_root_scope, b_root_scope) { - (CodeExtent::DestructionScope(a_root_id), - CodeExtent::DestructionScope(b_root_id)) => { - if self.fn_is_enclosed_by(a_root_id, b_root_id) { + return match (a_root_scope.data(), b_root_scope.data()) { + (ScopeData::Destruction(a_root_id), + ScopeData::Destruction(b_root_id)) => { + if self.closure_is_enclosed_by(a_root_id, b_root_id) { // `a` is enclosed by `b`, hence `b` is the ancestor of everything in `a` scope_b - } else if self.fn_is_enclosed_by(b_root_id, a_root_id) { + } else if self.closure_is_enclosed_by(b_root_id, a_root_id) { // `b` is enclosed by `a`, hence `a` is the ancestor of everything in `b` scope_a } else { @@ -503,7 +680,7 @@ impl<'tcx> RegionMaps { } } _ => { - // root ids are always Misc right now + // root ids are always Node right now bug!() } }; @@ -521,18 +698,18 @@ impl<'tcx> RegionMaps { } } - fn ancestors_of<'a, 'tcx>(scope_map: &FxHashMap, - scope: CodeExtent, - buf: &'a mut [CodeExtent; 32], - vec: &'a mut Vec) - -> &'a [CodeExtent] { + fn ancestors_of<'a, 'tcx>(parent_map: &FxHashMap, + scope: Scope, + buf: &'a mut [Scope; 32], + vec: &'a mut Vec) + -> &'a [Scope] { // debug!("ancestors_of(scope={:?})", scope); let mut scope = scope; let mut i = 0; while i < 32 { buf[i] = scope; - match scope_map.get(&scope) { + match parent_map.get(&scope) { Some(&superscope) => scope = superscope, _ => return &buf[..i+1] } @@ -543,7 +720,7 @@ impl<'tcx> RegionMaps { vec.extend_from_slice(buf); loop { vec.push(scope); - match scope_map.get(&scope) { + match parent_map.get(&scope) { Some(&superscope) => scope = superscope, _ => return &*vec } @@ -551,34 +728,38 @@ impl<'tcx> RegionMaps { } } - /// Assuming that the provided region was defined within this `RegionMaps`, - /// returns the outermost `CodeExtent` that the region outlives. - pub fn early_free_extent<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + /// Assuming that the provided region was defined within this `ScopeTree`, + /// returns the outermost `Scope` that the region outlives. + pub fn early_free_scope<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, br: &ty::EarlyBoundRegion) - -> CodeExtent { + -> Scope { let param_owner = tcx.parent_def_id(br.def_id).unwrap(); let param_owner_id = tcx.hir.as_local_node_id(param_owner).unwrap(); - let body_id = tcx.hir.maybe_body_owned_by(param_owner_id).unwrap_or_else(|| { + let scope = tcx.hir.maybe_body_owned_by(param_owner_id).map(|body_id| { + tcx.hir.body(body_id).value.hir_id.local_id + }).unwrap_or_else(|| { // The lifetime was defined on node that doesn't own a body, // which in practice can only mean a trait or an impl, that // is the parent of a method, and that is enforced below. assert_eq!(Some(param_owner_id), self.root_parent, - "free_extent: {:?} not recognized by the region maps for {:?}", + "free_scope: {:?} not recognized by the \ + region scope tree for {:?} / {:?}", param_owner, - self.root_body.map(|body| tcx.hir.body_owner_def_id(body))); + self.root_parent.map(|id| tcx.hir.local_def_id(id)), + self.root_body.map(|hir_id| DefId::local(hir_id.owner))); // The trait/impl lifetime is in scope for the method's body. - self.root_body.unwrap() + self.root_body.unwrap().local_id }); - CodeExtent::CallSiteScope(body_id) + Scope::CallSite(scope) } - /// Assuming that the provided region was defined within this `RegionMaps`, - /// returns the outermost `CodeExtent` that the region outlives. - pub fn free_extent<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, fr: &ty::FreeRegion) - -> CodeExtent { + /// Assuming that the provided region was defined within this `ScopeTree`, + /// returns the outermost `Scope` that the region outlives. + pub fn free_scope<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, fr: &ty::FreeRegion) + -> Scope { let param_owner = match fr.bound_region { ty::BoundRegion::BrNamed(def_id, _) => { tcx.parent_def_id(def_id).unwrap() @@ -591,13 +772,28 @@ impl<'tcx> RegionMaps { assert_eq!(param_owner, fr.scope); let param_owner_id = tcx.hir.as_local_node_id(param_owner).unwrap(); - CodeExtent::CallSiteScope(tcx.hir.body_owned_by(param_owner_id)) + let body_id = tcx.hir.body_owned_by(param_owner_id); + Scope::CallSite(tcx.hir.body(body_id).value.hir_id.local_id) + } + + /// Checks whether the given scope contains a `yield`. If so, + /// returns `Some((span, expr_count))` with the span of a yield we found and + /// the number of expressions appearing before the `yield` in the body. + pub fn yield_in_scope(&self, scope: Scope) -> Option<(Span, usize)> { + self.yield_in_scope.get(&scope).cloned() + } + + /// Gives the number of expressions visited in a body. + /// Used to sanity check visit_expr call count when + /// calculating generator interiors. + pub fn body_expr_count(&self, body_id: hir::BodyId) -> Option { + self.body_expr_count.get(&body_id).map(|r| *r) } } /// Records the lifetime of a local variable as `cx.var_parent` fn record_var_lifetime(visitor: &mut RegionResolutionVisitor, - var_id: ast::NodeId, + var_id: hir::ItemLocalId, _sp: Span) { match visitor.cx.var_parent { None => { @@ -606,7 +802,7 @@ fn record_var_lifetime(visitor: &mut RegionResolutionVisitor, // extern fn isalnum(c: c_int) -> c_int } Some(parent_scope) => - visitor.region_maps.record_var_scope(var_id, parent_scope), + visitor.scope_tree.record_var_scope(var_id, parent_scope), } } @@ -640,7 +836,7 @@ fn resolve_block<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, blk: // `other_argument()` has run and also the call to `quux(..)` // itself has returned. - visitor.enter_node_extent_with_dtor(blk.id); + visitor.enter_node_scope_with_dtor(blk.hir_id.local_id); visitor.cx.var_parent = visitor.cx.parent; { @@ -657,10 +853,10 @@ fn resolve_block<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, blk: // has the previous subscope in the block as a parent, // except for the first such subscope, which has the // block itself as a parent. - visitor.enter_code_extent( - CodeExtent::Remainder(BlockRemainder { - block: blk.id, - first_statement_index: i as u32 + visitor.enter_scope( + Scope::Remainder(BlockRemainder { + block: blk.hir_id.local_id, + first_statement_index: FirstStatementIndex::new(i) }) ); visitor.cx.var_parent = visitor.cx.parent; @@ -674,39 +870,41 @@ fn resolve_block<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, blk: } fn resolve_arm<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, arm: &'tcx hir::Arm) { - visitor.terminating_scopes.insert(arm.body.id); + visitor.terminating_scopes.insert(arm.body.hir_id.local_id); if let Some(ref expr) = arm.guard { - visitor.terminating_scopes.insert(expr.id); + visitor.terminating_scopes.insert(expr.hir_id.local_id); } intravisit::walk_arm(visitor, arm); } fn resolve_pat<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, pat: &'tcx hir::Pat) { - visitor.record_code_extent(CodeExtent::Misc(pat.id)); + visitor.record_child_scope(Scope::Node(pat.hir_id.local_id)); // If this is a binding then record the lifetime of that binding. if let PatKind::Binding(..) = pat.node { - record_var_lifetime(visitor, pat.id, pat.span); + record_var_lifetime(visitor, pat.hir_id.local_id, pat.span); } intravisit::walk_pat(visitor, pat); + + visitor.expr_and_pat_count += 1; } fn resolve_stmt<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, stmt: &'tcx hir::Stmt) { - let stmt_id = stmt.node.id(); + let stmt_id = visitor.tcx.hir.node_to_hir_id(stmt.node.id()).local_id; debug!("resolve_stmt(stmt.id={:?})", stmt_id); // Every statement will clean up the temporaries created during // execution of that statement. Therefore each statement has an - // associated destruction scope that represents the extent of the - // statement plus its destructors, and thus the extent for which + // associated destruction scope that represents the scope of the + // statement plus its destructors, and thus the scope for which // regions referenced by the destructors need to survive. visitor.terminating_scopes.insert(stmt_id); let prev_parent = visitor.cx.parent; - visitor.enter_node_extent_with_dtor(stmt_id); + visitor.enter_node_scope_with_dtor(stmt_id); intravisit::walk_stmt(visitor, stmt); @@ -717,11 +915,11 @@ fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: debug!("resolve_expr(expr.id={:?})", expr.id); let prev_cx = visitor.cx; - visitor.enter_node_extent_with_dtor(expr.id); + visitor.enter_node_scope_with_dtor(expr.hir_id.local_id); { let terminating_scopes = &mut visitor.terminating_scopes; - let mut terminating = |id: ast::NodeId| { + let mut terminating = |id: hir::ItemLocalId| { terminating_scopes.insert(id); }; match expr.node { @@ -733,27 +931,27 @@ fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: hir::ExprBinary(codemap::Spanned { node: hir::BiOr, .. }, _, ref r) => { // For shortcircuiting operators, mark the RHS as a terminating // scope since it only executes conditionally. - terminating(r.id); + terminating(r.hir_id.local_id); } hir::ExprIf(ref expr, ref then, Some(ref otherwise)) => { - terminating(expr.id); - terminating(then.id); - terminating(otherwise.id); + terminating(expr.hir_id.local_id); + terminating(then.hir_id.local_id); + terminating(otherwise.hir_id.local_id); } hir::ExprIf(ref expr, ref then, None) => { - terminating(expr.id); - terminating(then.id); + terminating(expr.hir_id.local_id); + terminating(then.hir_id.local_id); } hir::ExprLoop(ref body, _, _) => { - terminating(body.id); + terminating(body.hir_id.local_id); } hir::ExprWhile(ref expr, ref body, _) => { - terminating(expr.id); - terminating(body.id); + terminating(expr.hir_id.local_id); + terminating(body.hir_id.local_id); } hir::ExprMatch(..) => { @@ -789,7 +987,7 @@ fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: match expr.node { // Manually recurse over closures, because they are the only // case of nested bodies that share the parent environment. - hir::ExprClosure(.., body, _) => { + hir::ExprClosure(.., body, _, _) => { let body = visitor.tcx.hir.body(body); visitor.visit_body(body); } @@ -797,20 +995,36 @@ fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: _ => intravisit::walk_expr(visitor, expr) } + visitor.expr_and_pat_count += 1; + + if let hir::ExprYield(..) = expr.node { + // Mark this expr's scope and all parent scopes as containing `yield`. + let mut scope = Scope::Node(expr.hir_id.local_id); + loop { + visitor.scope_tree.yield_in_scope.insert(scope, + (expr.span, visitor.expr_and_pat_count)); + + // Keep traversing up while we can. + match visitor.scope_tree.parent_map.get(&scope) { + // Don't cross from closure bodies to their parent. + Some(&superscope) => match superscope.data() { + ScopeData::CallSite(_) => break, + _ => scope = superscope + }, + None => break + } + } + } + visitor.cx = prev_cx; } fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, - local: &'tcx hir::Local) { - debug!("resolve_local(local.id={:?},local.init={:?})", - local.id,local.init.is_some()); + pat: Option<&'tcx hir::Pat>, + init: Option<&'tcx hir::Expr>) { + debug!("resolve_local(pat={:?}, init={:?})", pat, init); - // For convenience in trans, associate with the local-id the var - // scope that will be used for any bindings declared in this - // pattern. let blk_scope = visitor.cx.var_parent; - let blk_scope = blk_scope.expect("locals must be within a block"); - visitor.region_maps.record_var_scope(local.id, blk_scope); // As an exception to the normal rules governing temporary // lifetimes, initializers in a let have a temporary lifetime @@ -870,15 +1084,22 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, // // FIXME(#6308) -- Note that `[]` patterns work more smoothly post-DST. - if let Some(ref expr) = local.init { + if let Some(expr) = init { record_rvalue_scope_if_borrow_expr(visitor, &expr, blk_scope); - if is_binding_pat(&local.pat) { - record_rvalue_scope(visitor, &expr, blk_scope); + if let Some(pat) = pat { + if is_binding_pat(pat) { + record_rvalue_scope(visitor, &expr, blk_scope); + } } } - intravisit::walk_local(visitor, local); + if let Some(pat) = pat { + visitor.visit_pat(pat); + } + if let Some(expr) = init { + visitor.visit_expr(expr); + } /// True if `pat` match the `P&` nonterminal: /// @@ -952,7 +1173,7 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, fn record_rvalue_scope_if_borrow_expr<'a, 'tcx>( visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: &hir::Expr, - blk_id: CodeExtent) + blk_id: Option) { match expr.node { hir::ExprAddrOf(_, ref subexpr) => { @@ -1002,7 +1223,7 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, /// Note: ET is intended to match "rvalues or lvalues based on rvalues". fn record_rvalue_scope<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: &hir::Expr, - blk_scope: CodeExtent) { + blk_scope: Option) { let mut expr = expr; loop { // Note: give all the expressions matching `ET` with the @@ -1010,7 +1231,7 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, // because in trans if we must compile e.g. `*rvalue()` // into a temporary, we request the temporary scope of the // outer expression. - visitor.region_maps.record_rvalue_scope(expr.id, blk_scope); + visitor.scope_tree.record_rvalue_scope(expr.hir_id.local_id, blk_scope); match expr.node { hir::ExprAddrOf(_, ref subexpr) | @@ -1030,27 +1251,27 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, impl<'a, 'tcx> RegionResolutionVisitor<'a, 'tcx> { /// Records the current parent (if any) as the parent of `child_scope`. - fn record_code_extent(&mut self, child_scope: CodeExtent) { + fn record_child_scope(&mut self, child_scope: Scope) { let parent = self.cx.parent; - self.region_maps.record_code_extent(child_scope, parent); + self.scope_tree.record_scope_parent(child_scope, parent); } /// Records the current parent (if any) as the parent of `child_scope`, /// and sets `child_scope` as the new current parent. - fn enter_code_extent(&mut self, child_scope: CodeExtent) { - self.record_code_extent(child_scope); + fn enter_scope(&mut self, child_scope: Scope) { + self.record_child_scope(child_scope); self.cx.parent = Some(child_scope); } - fn enter_node_extent_with_dtor(&mut self, id: ast::NodeId) { + fn enter_node_scope_with_dtor(&mut self, id: hir::ItemLocalId) { // If node was previously marked as a terminating scope during the // recursive visit of its parent node in the AST, then we need to - // account for the destruction scope representing the extent of + // account for the destruction scope representing the scope of // the destructors that run immediately after it completes. if self.terminating_scopes.contains(&id) { - self.enter_code_extent(CodeExtent::DestructionScope(id)); + self.enter_scope(Scope::Destruction(id)); } - self.enter_code_extent(CodeExtent::Misc(id)); + self.enter_scope(Scope::Node(id)); } } @@ -1073,22 +1294,18 @@ impl<'a, 'tcx> Visitor<'tcx> for RegionResolutionVisitor<'a, 'tcx> { body_id, self.cx.parent); + let outer_ec = mem::replace(&mut self.expr_and_pat_count, 0); let outer_cx = self.cx; - let outer_ts = mem::replace(&mut self.terminating_scopes, NodeSet()); - - // Only functions have an outer terminating (drop) scope, - // while temporaries in constant initializers are 'static. - if let MirSource::Fn(_) = MirSource::from_node(self.tcx, owner_id) { - self.terminating_scopes.insert(body_id.node_id); - } + let outer_ts = mem::replace(&mut self.terminating_scopes, FxHashSet()); + self.terminating_scopes.insert(body.value.hir_id.local_id); if let Some(root_id) = self.cx.root_id { - self.region_maps.record_fn_parent(body_id.node_id, root_id); + self.scope_tree.record_closure_parent(body.value.hir_id.local_id, root_id); } - self.cx.root_id = Some(body_id.node_id); + self.cx.root_id = Some(body.value.hir_id.local_id); - self.enter_code_extent(CodeExtent::CallSiteScope(body_id)); - self.enter_code_extent(CodeExtent::ParameterScope(body_id)); + self.enter_scope(Scope::CallSite(body.value.hir_id.local_id)); + self.enter_scope(Scope::Arguments(body.value.hir_id.local_id)); // The arguments and `self` are parented to the fn. self.cx.var_parent = self.cx.parent.take(); @@ -1098,9 +1315,37 @@ impl<'a, 'tcx> Visitor<'tcx> for RegionResolutionVisitor<'a, 'tcx> { // The body of the every fn is a root scope. self.cx.parent = self.cx.var_parent; - self.visit_expr(&body.value); + if let MirSource::Fn(_) = MirSource::from_node(self.tcx, owner_id) { + self.visit_expr(&body.value); + } else { + // Only functions have an outer terminating (drop) scope, while + // temporaries in constant initializers may be 'static, but only + // according to rvalue lifetime semantics, using the same + // syntactical rules used for let initializers. + // + // E.g. in `let x = &f();`, the temporary holding the result from + // the `f()` call lives for the entirety of the surrounding block. + // + // Similarly, `const X: ... = &f();` would have the result of `f()` + // live for `'static`, implying (if Drop restrictions on constants + // ever get lifted) that the value *could* have a destructor, but + // it'd get leaked instead of the destructor running during the + // evaluation of `X` (if at all allowed by CTFE). + // + // However, `const Y: ... = g(&f());`, like `let y = g(&f());`, + // would *not* let the `f()` temporary escape into an outer scope + // (i.e. `'static`), which means that after `g` returns, it drops, + // and all the associated destruction scope rules apply. + self.cx.var_parent = None; + resolve_local(self, None, Some(&body.value)); + } + + if body.is_generator { + self.scope_tree.body_expr_count.insert(body_id, self.expr_and_pat_count); + } // Restore context we had at the start. + self.expr_and_pat_count = outer_ec; self.cx = outer_cx; self.terminating_scopes = outer_ts; } @@ -1118,32 +1363,34 @@ impl<'a, 'tcx> Visitor<'tcx> for RegionResolutionVisitor<'a, 'tcx> { resolve_expr(self, ex); } fn visit_local(&mut self, l: &'tcx Local) { - resolve_local(self, l); + resolve_local(self, Some(&l.pat), l.init.as_ref().map(|e| &**e)); } } -fn region_maps<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) - -> Rc +fn region_scope_tree<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) + -> Rc { let closure_base_def_id = tcx.closure_base_def_id(def_id); if closure_base_def_id != def_id { - return tcx.region_maps(closure_base_def_id); + return tcx.region_scope_tree(closure_base_def_id); } let id = tcx.hir.as_local_node_id(def_id).unwrap(); - let maps = if let Some(body) = tcx.hir.maybe_body_owned_by(id) { + let scope_tree = if let Some(body_id) = tcx.hir.maybe_body_owned_by(id) { let mut visitor = RegionResolutionVisitor { tcx, - region_maps: RegionMaps::new(), + scope_tree: ScopeTree::default(), + expr_and_pat_count: 0, cx: Context { root_id: None, parent: None, var_parent: None, }, - terminating_scopes: NodeSet(), + terminating_scopes: FxHashSet(), }; - visitor.region_maps.root_body = Some(body); + let body = tcx.hir.body(body_id); + visitor.scope_tree.root_body = Some(body.value.hir_id); // If the item is an associated const or a method, // record its impl/trait parent, as it can also have @@ -1151,24 +1398,55 @@ fn region_maps<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) match tcx.hir.get(id) { hir::map::NodeImplItem(_) | hir::map::NodeTraitItem(_) => { - visitor.region_maps.root_parent = Some(tcx.hir.get_parent(id)); + visitor.scope_tree.root_parent = Some(tcx.hir.get_parent(id)); } _ => {} } - visitor.visit_body(tcx.hir.body(body)); + visitor.visit_body(body); - visitor.region_maps + visitor.scope_tree } else { - RegionMaps::new() + ScopeTree::default() }; - Rc::new(maps) + Rc::new(scope_tree) } pub fn provide(providers: &mut Providers) { *providers = Providers { - region_maps, + region_scope_tree, ..*providers }; } + +impl<'gcx> HashStable> for ScopeTree { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + let ScopeTree { + root_body, + root_parent, + ref body_expr_count, + ref parent_map, + ref var_map, + ref destruction_scopes, + ref rvalue_scopes, + ref closure_tree, + ref yield_in_scope, + } = *self; + + hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { + root_body.hash_stable(hcx, hasher); + root_parent.hash_stable(hcx, hasher); + }); + + body_expr_count.hash_stable(hcx, hasher); + parent_map.hash_stable(hcx, hasher); + var_map.hash_stable(hcx, hasher); + destruction_scopes.hash_stable(hcx, hasher); + rvalue_scopes.hash_stable(hcx, hasher); + closure_tree.hash_stable(hcx, hasher); + yield_in_scope.hash_stable(hcx, hasher); + } +} diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs index d346ac9dd9..d0c5460fa9 100644 --- a/src/librustc/middle/resolve_lifetime.rs +++ b/src/librustc/middle/resolve_lifetime.rs @@ -16,9 +16,10 @@ //! way. Therefore we break lifetime name resolution into a separate pass. use hir::map::Map; -use session::Session; use hir::def::Def; use hir::def_id::DefId; +use middle::cstore::CrateStore; +use session::Session; use ty; use std::cell::Cell; @@ -38,22 +39,26 @@ use hir::intravisit::{self, Visitor, NestedVisitorMap}; #[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)] pub enum Region { Static, - EarlyBound(/* index */ u32, /* lifetime decl */ ast::NodeId), - LateBound(ty::DebruijnIndex, /* lifetime decl */ ast::NodeId), + EarlyBound(/* index */ u32, /* lifetime decl */ DefId), + LateBound(ty::DebruijnIndex, /* lifetime decl */ DefId), LateBoundAnon(ty::DebruijnIndex, /* anon index */ u32), - Free(DefId, /* lifetime decl */ ast::NodeId), + Free(DefId, /* lifetime decl */ DefId), } impl Region { - fn early(index: &mut u32, def: &hir::LifetimeDef) -> (ast::Name, Region) { + fn early(hir_map: &Map, index: &mut u32, def: &hir::LifetimeDef) + -> (hir::LifetimeName, Region) + { let i = *index; *index += 1; - (def.lifetime.name, Region::EarlyBound(i, def.lifetime.id)) + let def_id = hir_map.local_def_id(def.lifetime.id); + (def.lifetime.name, Region::EarlyBound(i, def_id)) } - fn late(def: &hir::LifetimeDef) -> (ast::Name, Region) { + fn late(hir_map: &Map, def: &hir::LifetimeDef) -> (hir::LifetimeName, Region) { let depth = ty::DebruijnIndex::new(1); - (def.lifetime.name, Region::LateBound(depth, def.lifetime.id)) + let def_id = hir_map.local_def_id(def.lifetime.id); + (def.lifetime.name, Region::LateBound(depth, def_id)) } fn late_anon(index: &Cell) -> Region { @@ -63,7 +68,7 @@ impl Region { Region::LateBoundAnon(depth, i) } - fn id(&self) -> Option { + fn id(&self) -> Option { match *self { Region::Static | Region::LateBoundAnon(..) => None, @@ -153,10 +158,6 @@ pub struct NamedRegionMap { // (b) it DOES appear in the arguments. pub late_bound: NodeSet, - // Contains the node-ids for lifetimes that were (incorrectly) categorized - // as late-bound, until #32330 was fixed. - pub issue_32330: NodeMap, - // For each type and trait definition, maps type parameters // to the trait object lifetime defaults computed from them. pub object_lifetime_defaults: NodeMap>, @@ -164,6 +165,7 @@ pub struct NamedRegionMap { struct LifetimeContext<'a, 'tcx: 'a> { sess: &'a Session, + cstore: &'a CrateStore, hir_map: &'a Map<'tcx>, map: &'a mut NamedRegionMap, scope: ScopeRef<'a>, @@ -198,7 +200,7 @@ enum Scope<'a> { /// it should be shifted by the number of `Binder`s in between the /// declaration `Binder` and the location it's referenced from. Binder { - lifetimes: FxHashMap, + lifetimes: FxHashMap, s: ScopeRef<'a> }, @@ -255,18 +257,19 @@ type ScopeRef<'a> = &'a Scope<'a>; const ROOT_SCOPE: ScopeRef<'static> = &Scope::Root; pub fn krate(sess: &Session, + cstore: &CrateStore, hir_map: &Map) -> Result { let krate = hir_map.krate(); let mut map = NamedRegionMap { defs: NodeMap(), late_bound: NodeSet(), - issue_32330: NodeMap(), object_lifetime_defaults: compute_object_lifetime_defaults(sess, hir_map), }; sess.track_errors(|| { let mut visitor = LifetimeContext { sess, + cstore, hir_map, map: &mut map, scope: ROOT_SCOPE, @@ -303,7 +306,7 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { match item.node { hir::ItemFn(ref decl, _, _, _, ref generics, _) => { - self.visit_early_late(item.id, None, decl, generics, |this| { + self.visit_early_late(None, decl, generics, |this| { intravisit::walk_item(this, item); }); } @@ -338,7 +341,7 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { 0 }; let lifetimes = generics.lifetimes.iter().map(|def| { - Region::early(&mut index, def) + Region::early(self.hir_map, &mut index, def) }).collect(); let scope = Scope::Binder { lifetimes, @@ -355,7 +358,7 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) { match item.node { hir::ForeignItemFn(ref decl, _, ref generics) => { - self.visit_early_late(item.id, None, decl, generics, |this| { + self.visit_early_late(None, decl, generics, |this| { intravisit::walk_foreign_item(this, item); }) } @@ -369,7 +372,9 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { match ty.node { hir::TyBareFn(ref c) => { let scope = Scope::Binder { - lifetimes: c.lifetimes.iter().map(Region::late).collect(), + lifetimes: c.lifetimes.iter().map(|def| { + Region::late(self.hir_map, def) + }).collect(), s: self.scope }; self.with(scope, |old_scope, this| { @@ -406,7 +411,6 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { if let hir::TraitItemKind::Method(ref sig, _) = trait_item.node { self.visit_early_late( - trait_item.id, Some(self.hir_map.get_parent(trait_item.id)), &sig.decl, &sig.generics, |this| intravisit::walk_trait_item(this, trait_item)) @@ -418,7 +422,6 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node { self.visit_early_late( - impl_item.id, Some(self.hir_map.get_parent(impl_item.id)), &sig.decl, &sig.generics, |this| intravisit::walk_impl_item(this, impl_item)) @@ -442,7 +445,9 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_path(&mut self, path: &'tcx hir::Path, _: ast::NodeId) { for (i, segment) in path.segments.iter().enumerate() { let depth = path.segments.len() - i - 1; - self.visit_segment_parameters(path.def, depth, &segment.parameters); + if let Some(ref parameters) = segment.parameters { + self.visit_segment_parameters(path.def, depth, parameters); + } } } @@ -470,7 +475,9 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { if !bound_lifetimes.is_empty() { self.trait_ref_hack = true; let scope = Scope::Binder { - lifetimes: bound_lifetimes.iter().map(Region::late).collect(), + lifetimes: bound_lifetimes.iter().map(|def| { + Region::late(self.hir_map, def) + }).collect(), s: self.scope }; let result = self.with(scope, |old_scope, this| { @@ -515,7 +522,9 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { "nested quantification of lifetimes"); } let scope = Scope::Binder { - lifetimes: trait_ref.bound_lifetimes.iter().map(Region::late).collect(), + lifetimes: trait_ref.bound_lifetimes.iter().map(|def| { + Region::late(self.hir_map, def) + }).collect(), s: self.scope }; self.with(scope, |old_scope, this| { @@ -649,11 +658,14 @@ fn extract_labels(ctxt: &mut LifetimeContext, body: &hir::Body) { Scope::Binder { ref lifetimes, s } => { // FIXME (#24278): non-hygienic comparison - if let Some(def) = lifetimes.get(&label) { + if let Some(def) = lifetimes.get(&hir::LifetimeName::Name(label)) { + let node_id = hir_map.as_local_node_id(def.id().unwrap()) + .unwrap(); + signal_shadowing_problem( sess, label, - original_lifetime(hir_map.span(def.id().unwrap())), + original_lifetime(hir_map.span(node_id)), shadower_label(label_span)); return; } @@ -684,7 +696,7 @@ fn compute_object_lifetime_defaults(sess: &Session, hir_map: &Map) Set1::Empty => "BaseDefault".to_string(), Set1::One(Region::Static) => "'static".to_string(), Set1::One(Region::EarlyBound(i, _)) => { - generics.lifetimes[i as usize].lifetime.name.to_string() + generics.lifetimes[i as usize].lifetime.name.name().to_string() } Set1::One(_) => bug!(), Set1::Many => "Ambiguous".to_string(), @@ -706,7 +718,7 @@ fn compute_object_lifetime_defaults(sess: &Session, hir_map: &Map) /// for each type parameter. fn object_lifetime_defaults_for_item(hir_map: &Map, generics: &hir::Generics) -> Vec { - fn add_bounds(set: &mut Set1, bounds: &[hir::TyParamBound]) { + fn add_bounds(set: &mut Set1, bounds: &[hir::TyParamBound]) { for bound in bounds { if let hir::RegionTyParamBound(ref lifetime) = *bound { set.insert(lifetime.name); @@ -746,13 +758,14 @@ fn object_lifetime_defaults_for_item(hir_map: &Map, generics: &hir::Generics) match set { Set1::Empty => Set1::Empty, Set1::One(name) => { - if name == "'static" { + if name == hir::LifetimeName::Static { Set1::One(Region::Static) } else { generics.lifetimes.iter().enumerate().find(|&(_, def)| { def.lifetime.name == name }).map_or(Set1::Many, |(i, def)| { - Set1::One(Region::EarlyBound(i as u32, def.lifetime.id)) + let def_id = hir_map.local_def_id(def.lifetime.id); + Set1::One(Region::EarlyBound(i as u32, def_id)) }) } } @@ -772,12 +785,13 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { fn with(&mut self, wrap_scope: Scope, f: F) where F: for<'b> FnOnce(ScopeRef, &mut LifetimeContext<'b, 'tcx>), { - let LifetimeContext {sess, hir_map, ref mut map, ..} = *self; + let LifetimeContext {sess, cstore, hir_map, ref mut map, ..} = *self; let labels_in_fn = replace(&mut self.labels_in_fn, vec![]); let xcrate_object_lifetime_defaults = replace(&mut self.xcrate_object_lifetime_defaults, DefIdMap()); let mut this = LifetimeContext { sess, + cstore, hir_map, map: *map, scope: &wrap_scope, @@ -811,18 +825,13 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { /// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the /// ordering is not important there. fn visit_early_late(&mut self, - fn_id: ast::NodeId, parent_id: Option, decl: &'tcx hir::FnDecl, generics: &'tcx hir::Generics, walk: F) where F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>), { - let fn_def_id = self.hir_map.local_def_id(fn_id); - insert_late_bound_lifetimes(self.map, - fn_def_id, - decl, - generics); + insert_late_bound_lifetimes(self.map, decl, generics); // Find the start of nested early scopes, e.g. in methods. let mut index = 0; @@ -842,9 +851,9 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { let lifetimes = generics.lifetimes.iter().map(|def| { if self.map.late_bound.contains(&def.lifetime.id) { - Region::late(def) + Region::late(self.hir_map, def) } else { - Region::early(&mut index, def) + Region::early(self.hir_map, &mut index, def) } }).collect(); @@ -917,7 +926,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { self.insert_lifetime(lifetime_ref, def); } else { struct_span_err!(self.sess, lifetime_ref.span, E0261, - "use of undeclared lifetime name `{}`", lifetime_ref.name) + "use of undeclared lifetime name `{}`", lifetime_ref.name.name()) .span_label(lifetime_ref.span, "undeclared lifetime") .emit(); } @@ -944,7 +953,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { let def_key = if def_id.is_local() { this.hir_map.def_key(def_id) } else { - this.sess.cstore.def_key(def_id) + this.cstore.def_key(def_id) }; DefId { krate: def_id.krate, @@ -988,9 +997,9 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { let unsubst = if let Some(id) = self.hir_map.as_local_node_id(def_id) { &map.object_lifetime_defaults[&id] } else { - let cstore = &self.sess.cstore; + let cstore = self.cstore; self.xcrate_object_lifetime_defaults.entry(def_id).or_insert_with(|| { - cstore.item_generics_cloned(def_id).types.into_iter().map(|def| { + cstore.item_generics_cloned_untracked(def_id).types.into_iter().map(|def| { def.object_lifetime_default }).collect() }) @@ -1417,13 +1426,17 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { let lifetime_i = &lifetimes[i]; for lifetime in lifetimes { - if lifetime.lifetime.is_static() { - let lifetime = lifetime.lifetime; - let mut err = struct_span_err!(self.sess, lifetime.span, E0262, - "invalid lifetime parameter name: `{}`", lifetime.name); - err.span_label(lifetime.span, - format!("{} is a reserved lifetime name", lifetime.name)); - err.emit(); + match lifetime.lifetime.name { + hir::LifetimeName::Static | hir::LifetimeName::Underscore => { + let lifetime = lifetime.lifetime; + let name = lifetime.name.name(); + let mut err = struct_span_err!(self.sess, lifetime.span, E0262, + "invalid lifetime parameter name: `{}`", name); + err.span_label(lifetime.span, + format!("{} is a reserved lifetime name", name)); + err.emit(); + } + hir::LifetimeName::Implicit | hir::LifetimeName::Name(_) => {} } } @@ -1434,7 +1447,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { if lifetime_i.lifetime.name == lifetime_j.lifetime.name { struct_span_err!(self.sess, lifetime_j.lifetime.span, E0263, "lifetime name `{}` declared twice in the same scope", - lifetime_j.lifetime.name) + lifetime_j.lifetime.name.name()) .span_label(lifetime_j.lifetime.span, "declared twice") .span_label(lifetime_i.lifetime.span, @@ -1447,15 +1460,27 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { self.check_lifetime_def_for_shadowing(old_scope, &lifetime_i.lifetime); for bound in &lifetime_i.bounds { - if !bound.is_static() { - self.resolve_lifetime_ref(bound); - } else { - self.insert_lifetime(bound, Region::Static); - self.sess.struct_span_warn(lifetime_i.lifetime.span.to(bound.span), - &format!("unnecessary lifetime parameter `{}`", lifetime_i.lifetime.name)) - .help(&format!("you can use the `'static` lifetime directly, in place \ - of `{}`", lifetime_i.lifetime.name)) - .emit(); + match bound.name { + hir::LifetimeName::Underscore => { + let mut err = struct_span_err!(self.sess, bound.span, E0637, + "invalid lifetime bound name: `'_`"); + err.span_label(bound.span, "`'_` is a reserved lifetime name"); + err.emit(); + } + hir::LifetimeName::Static => { + self.insert_lifetime(bound, Region::Static); + self.sess.struct_span_warn(lifetime_i.lifetime.span.to(bound.span), + &format!("unnecessary lifetime parameter `{}`", + lifetime_i.lifetime.name.name())) + .help(&format!( + "you can use the `'static` lifetime directly, in place \ + of `{}`", lifetime_i.lifetime.name.name())) + .emit(); + } + hir::LifetimeName::Implicit | + hir::LifetimeName::Name(_) => { + self.resolve_lifetime_ref(bound); + } } } } @@ -1467,9 +1492,9 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { { for &(label, label_span) in &self.labels_in_fn { // FIXME (#24278): non-hygienic comparison - if lifetime.name == label { + if lifetime.name.name() == label { signal_shadowing_problem(self.sess, - lifetime.name, + label, original_label(label_span), shadower_lifetime(&lifetime)); return; @@ -1490,10 +1515,14 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { Scope::Binder { ref lifetimes, s } => { if let Some(&def) = lifetimes.get(&lifetime.name) { + let node_id = self.hir_map + .as_local_node_id(def.id().unwrap()) + .unwrap(); + signal_shadowing_problem( self.sess, - lifetime.name, - original_lifetime(self.hir_map.span(def.id().unwrap())), + lifetime.name.name(), + original_lifetime(self.hir_map.span(node_id)), shadower_lifetime(&lifetime)); return; } @@ -1534,7 +1563,6 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { /// not amongst the inputs to a projection. In other words, `<&'a /// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`. fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, - fn_def_id: DefId, decl: &hir::FnDecl, generics: &hir::Generics) { debug!("insert_late_bound_lifetimes(decl={:?}, generics={:?})", decl, generics); @@ -1592,22 +1620,9 @@ fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, // any `impl Trait` in the return type? early-bound. if appears_in_output.impl_trait { continue; } - // does not appear in the inputs, but appears in the return - // type? eventually this will be early-bound, but for now we - // just mark it so we can issue warnings. - let constrained_by_input = constrained_by_input.regions.contains(&name); - let appears_in_output = appears_in_output.regions.contains(&name); - if !constrained_by_input && appears_in_output { - debug!("inserting issue_32330 entry for {:?}, {:?} on {:?}", - lifetime.lifetime.id, - name, - fn_def_id); - map.issue_32330.insert( - lifetime.lifetime.id, - ty::Issue32330 { - fn_def_id, - region_name: name, - }); + // does not appear in the inputs, but appears in the return type? early-bound. + if !constrained_by_input.regions.contains(&name) && + appears_in_output.regions.contains(&name) { continue; } @@ -1622,7 +1637,7 @@ fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, return; struct ConstrainedCollector { - regions: FxHashSet, + regions: FxHashSet, } impl<'v> Visitor<'v> for ConstrainedCollector { @@ -1662,7 +1677,7 @@ fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, } struct AllCollector { - regions: FxHashSet, + regions: FxHashSet, impl_trait: bool } diff --git a/src/librustc/middle/stability.rs b/src/librustc/middle/stability.rs index d2ed29a3a0..4e4fc8b311 100644 --- a/src/librustc/middle/stability.rs +++ b/src/librustc/middle/stability.rs @@ -15,20 +15,19 @@ pub use self::StabilityLevel::*; use lint; use hir::def::Def; -use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, DefIndex, LOCAL_CRATE}; +use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE}; use ty::{self, TyCtxt}; use middle::privacy::AccessLevels; -use session::Session; use syntax::symbol::Symbol; use syntax_pos::{Span, DUMMY_SP}; use syntax::ast; use syntax::ast::{NodeId, Attribute}; use syntax::feature_gate::{GateIssue, emit_feature_err, find_lang_feature_accepted_version}; use syntax::attr::{self, Stability, Deprecation}; -use util::nodemap::{DefIdMap, FxHashSet, FxHashMap}; +use util::nodemap::{FxHashSet, FxHashMap}; use hir; -use hir::{Item, Generics, StructField, Variant}; +use hir::{Item, Generics, StructField, Variant, HirId}; use hir::intravisit::{self, Visitor, NestedVisitorMap}; use std::mem::replace; @@ -63,19 +62,23 @@ pub struct DeprecationEntry { pub attr: Deprecation, /// The def id where the attr was originally attached. `None` for non-local /// `DefId`'s. - origin: Option, + origin: Option, } +impl_stable_hash_for!(struct self::DeprecationEntry { + attr, + origin +}); + impl DeprecationEntry { - fn local(attr: Deprecation, id: DefId) -> DeprecationEntry { - assert!(id.is_local()); + fn local(attr: Deprecation, id: HirId) -> DeprecationEntry { DeprecationEntry { attr, - origin: Some(id.index), + origin: Some(id), } } - fn external(attr: Deprecation) -> DeprecationEntry { + pub fn external(attr: Deprecation) -> DeprecationEntry { DeprecationEntry { attr, origin: None, @@ -94,19 +97,23 @@ impl DeprecationEntry { pub struct Index<'tcx> { /// This is mostly a cache, except the stabilities of local items /// are filled by the annotator. - stab_map: DefIdMap>, - depr_map: DefIdMap>, + stab_map: FxHashMap, + depr_map: FxHashMap, /// Maps for each crate whether it is part of the staged API. staged_api: FxHashMap, /// Features enabled for this crate. active_features: FxHashSet, - - /// Features used by this crate. Updated before and during typeck. - used_features: FxHashMap } +impl_stable_hash_for!(struct self::Index<'tcx> { + stab_map, + depr_map, + staged_api, + active_features +}); + // A private tree-walker for producing an Index. struct Annotator<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -178,8 +185,8 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { } } - let def_id = self.tcx.hir.local_def_id(id); - self.index.stab_map.insert(def_id, Some(stab)); + let hir_id = self.tcx.hir.node_to_hir_id(id); + self.index.stab_map.insert(hir_id, stab); let orig_parent_stab = replace(&mut self.parent_stab, Some(stab)); visit_children(self); @@ -188,8 +195,8 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { debug!("annotate: not found, parent = {:?}", self.parent_stab); if let Some(stab) = self.parent_stab { if stab.level.is_unstable() { - let def_id = self.tcx.hir.local_def_id(id); - self.index.stab_map.insert(def_id, Some(stab)); + let hir_id = self.tcx.hir.node_to_hir_id(id); + self.index.stab_map.insert(hir_id, stab); } } visit_children(self); @@ -209,8 +216,8 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { // -Zforce-unstable-if-unmarked is set. if let Some(stab) = self.parent_stab { if stab.level.is_unstable() { - let def_id = self.tcx.hir.local_def_id(id); - self.index.stab_map.insert(def_id, Some(stab)); + let hir_id = self.tcx.hir.node_to_hir_id(id); + self.index.stab_map.insert(hir_id, stab); } } @@ -220,16 +227,17 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { } // `Deprecation` is just two pointers, no need to intern it - let def_id = self.tcx.hir.local_def_id(id); - let depr_entry = Some(DeprecationEntry::local(depr, def_id)); - self.index.depr_map.insert(def_id, depr_entry.clone()); + let hir_id = self.tcx.hir.node_to_hir_id(id); + let depr_entry = DeprecationEntry::local(depr, hir_id); + self.index.depr_map.insert(hir_id, depr_entry.clone()); - let orig_parent_depr = replace(&mut self.parent_depr, depr_entry); + let orig_parent_depr = replace(&mut self.parent_depr, + Some(depr_entry)); visit_children(self); self.parent_depr = orig_parent_depr; - } else if let parent_depr @ Some(_) = self.parent_depr.clone() { - let def_id = self.tcx.hir.local_def_id(id); - self.index.depr_map.insert(def_id, parent_depr); + } else if let Some(parent_depr) = self.parent_depr.clone() { + let hir_id = self.tcx.hir.node_to_hir_id(id); + self.index.depr_map.insert(hir_id, parent_depr); visit_children(self); } else { visit_children(self); @@ -322,10 +330,10 @@ struct MissingStabilityAnnotations<'a, 'tcx: 'a> { impl<'a, 'tcx: 'a> MissingStabilityAnnotations<'a, 'tcx> { fn check_missing_stability(&self, id: NodeId, span: Span) { - let def_id = self.tcx.hir.local_def_id(id); - let stab = self.tcx.stability.borrow().stab_map.get(&def_id).cloned(); + let hir_id = self.tcx.hir.node_to_hir_id(id); + let stab = self.tcx.stability().local_stability(hir_id); let is_error = !self.tcx.sess.opts.test && - (stab == None || stab == Some(None)) && + stab.is_none() && self.access_levels.is_reachable(id); if is_error { self.tcx.sess.span_err(span, "This node does not have a stability attribute"); @@ -386,60 +394,71 @@ impl<'a, 'tcx> Visitor<'tcx> for MissingStabilityAnnotations<'a, 'tcx> { } impl<'a, 'tcx> Index<'tcx> { - /// Construct the stability index for a crate being compiled. - pub fn build(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>) { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Index<'tcx> { + let is_staged_api = + tcx.sess.opts.debugging_opts.force_unstable_if_unmarked || + tcx.sess.features.borrow().staged_api; + let mut staged_api = FxHashMap(); + staged_api.insert(LOCAL_CRATE, is_staged_api); + let mut index = Index { + staged_api, + stab_map: FxHashMap(), + depr_map: FxHashMap(), + active_features: FxHashSet(), + }; + let ref active_lib_features = tcx.sess.features.borrow().declared_lib_features; // Put the active features into a map for quick lookup - self.active_features = active_lib_features.iter().map(|&(ref s, _)| s.clone()).collect(); + index.active_features = active_lib_features.iter().map(|&(ref s, _)| s.clone()).collect(); + + { + let krate = tcx.hir.krate(); + let mut annotator = Annotator { + tcx, + index: &mut index, + parent_stab: None, + parent_depr: None, + in_trait_impl: false, + }; - let krate = tcx.hir.krate(); - let mut annotator = Annotator { - tcx, - index: self, - parent_stab: None, - parent_depr: None, - in_trait_impl: false, - }; + // If the `-Z force-unstable-if-unmarked` flag is passed then we provide + // a parent stability annotation which indicates that this is private + // with the `rustc_private` feature. This is intended for use when + // compiling librustc crates themselves so we can leverage crates.io + // while maintaining the invariant that all sysroot crates are unstable + // by default and are unable to be used. + if tcx.sess.opts.debugging_opts.force_unstable_if_unmarked { + let reason = "this crate is being loaded from the sysroot, an \ + unstable location; did you mean to load this crate \ + from crates.io via `Cargo.toml` instead?"; + let stability = tcx.intern_stability(Stability { + level: attr::StabilityLevel::Unstable { + reason: Some(Symbol::intern(reason)), + issue: 27812, + }, + feature: Symbol::intern("rustc_private"), + rustc_depr: None, + rustc_const_unstable: None, + }); + annotator.parent_stab = Some(stability); + } - // If the `-Z force-unstable-if-unmarked` flag is passed then we provide - // a parent stability annotation which indicates that this is private - // with the `rustc_private` feature. This is intended for use when - // compiling librustc crates themselves so we can leverage crates.io - // while maintaining the invariant that all sysroot crates are unstable - // by default and are unable to be used. - if tcx.sess.opts.debugging_opts.force_unstable_if_unmarked { - let reason = "this crate is being loaded from the sysroot, and \ - unstable location; did you mean to load this crate \ - from crates.io via `Cargo.toml` instead?"; - let stability = tcx.intern_stability(Stability { - level: attr::StabilityLevel::Unstable { - reason: Some(Symbol::intern(reason)), - issue: 27812, - }, - feature: Symbol::intern("rustc_private"), - rustc_depr: None, - }); - annotator.parent_stab = Some(stability); + annotator.annotate(ast::CRATE_NODE_ID, + &krate.attrs, + krate.span, + AnnotationKind::Required, + |v| intravisit::walk_crate(v, krate)); } + return index + } - annotator.annotate(ast::CRATE_NODE_ID, &krate.attrs, krate.span, AnnotationKind::Required, - |v| intravisit::walk_crate(v, krate)); + pub fn local_stability(&self, id: HirId) -> Option<&'tcx Stability> { + self.stab_map.get(&id).cloned() } - pub fn new(sess: &Session) -> Index<'tcx> { - let is_staged_api = - sess.opts.debugging_opts.force_unstable_if_unmarked || - sess.features.borrow().staged_api; - let mut staged_api = FxHashMap(); - staged_api.insert(LOCAL_CRATE, is_staged_api); - Index { - staged_api, - stab_map: DefIdMap(), - depr_map: DefIdMap(), - active_features: FxHashSet(), - used_features: FxHashMap(), - } + pub fn local_deprecation_entry(&self, id: HirId) -> Option { + self.depr_map.get(&id).cloned() } } @@ -476,7 +495,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { _ => {} } - let visibility = self.sess.cstore.visibility(def_id); + let visibility = self.visibility(def_id); match visibility { // must check stability for pub items. @@ -547,10 +566,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { return } - if let Some(&Stability { ref level, ref feature, .. }) = stability { - self.stability.borrow_mut().used_features.insert(feature.clone(), level.clone()); - } - // Issue 38412: private items lack stability markers. if self.skip_stability_check_due_to_privacy(def_id) { return @@ -558,7 +573,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { match stability { Some(&Stability { level: attr::Unstable {ref reason, issue}, ref feature, .. }) => { - if self.stability.borrow().active_features.contains(feature) { + if self.stability().active_features.contains(feature) { return } @@ -610,7 +625,8 @@ impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { // compiler-generated `extern crate` items have a dummy span. if item.span == DUMMY_SP { return } - let cnum = match self.tcx.sess.cstore.extern_mod_stmt_cnum(item.id) { + let def_id = self.tcx.hir.local_def_id(item.id); + let cnum = match self.tcx.extern_mod_stmt_cnum(def_id) { Some(cnum) => cnum, None => return, }; @@ -663,6 +679,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { fn visit_path(&mut self, path: &'tcx hir::Path, id: ast::NodeId) { match path.def { + Def::Local(..) | Def::Upvar(..) | Def::PrimTy(..) | Def::SelfTy(..) | Def::Err => {} _ => self.tcx.check_stability(path.def.def_id(), id, path.span) } @@ -671,49 +688,9 @@ impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - /// Lookup the stability for a node, loading external crate - /// metadata as necessary. - pub fn lookup_stability(self, id: DefId) -> Option<&'gcx Stability> { - if let Some(st) = self.stability.borrow().stab_map.get(&id) { - return *st; - } - - let st = self.lookup_stability_uncached(id); - self.stability.borrow_mut().stab_map.insert(id, st); - st - } - pub fn lookup_deprecation(self, id: DefId) -> Option { self.lookup_deprecation_entry(id).map(|depr| depr.attr) } - - pub fn lookup_deprecation_entry(self, id: DefId) -> Option { - if let Some(depr) = self.stability.borrow().depr_map.get(&id) { - return depr.clone(); - } - - let depr = self.lookup_deprecation_uncached(id); - self.stability.borrow_mut().depr_map.insert(id, depr.clone()); - depr - } - - fn lookup_stability_uncached(self, id: DefId) -> Option<&'gcx Stability> { - debug!("lookup(id={:?})", id); - if id.is_local() { - None // The stability cache is filled partially lazily - } else { - self.stability(id).map(|st| self.intern_stability(st)) - } - } - - fn lookup_deprecation_uncached(self, id: DefId) -> Option { - debug!("lookup(id={:?})", id); - if id.is_local() { - None // The stability cache is filled partially lazily - } else { - self.deprecation(id).map(DeprecationEntry::external) - } - } } /// Given the list of enabled features that were not language features (i.e. that @@ -724,7 +701,7 @@ pub fn check_unused_or_stable_features<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE); - if tcx.stability.borrow().staged_api[&LOCAL_CRATE] { + if tcx.stability().staged_api[&LOCAL_CRATE] { let krate = tcx.hir.krate(); let mut missing = MissingStabilityAnnotations { tcx, @@ -740,10 +717,6 @@ pub fn check_unused_or_stable_features<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { = declared_lib_features.clone().into_iter().collect(); remaining_lib_features.remove(&Symbol::intern("proc_macro")); - fn format_stable_since_msg(version: &str) -> String { - format!("this feature has been stable since {}. Attribute no longer needed", version) - } - for &(ref stable_lang_feature, span) in &sess.features.borrow().declared_stable_lang_features { let version = find_lang_feature_accepted_version(&stable_lang_feature.as_str()) .expect("unexpectedly couldn't find version feature was stabilized"); @@ -753,25 +726,23 @@ pub fn check_unused_or_stable_features<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { &format_stable_since_msg(version)); } - let index = tcx.stability.borrow(); - for (used_lib_feature, level) in &index.used_features { - match remaining_lib_features.remove(used_lib_feature) { - Some(span) => { - if let &attr::StabilityLevel::Stable { since: ref version } = level { - tcx.lint_node(lint::builtin::STABLE_FEATURES, - ast::CRATE_NODE_ID, - span, - &format_stable_since_msg(&version.as_str())); - } - } - None => ( /* used but undeclared, handled during the previous ast visit */ ) - } - } + // FIXME(#44232) the `used_features` table no longer exists, so we don't + // lint about unknown or unused features. We should reenable + // this one day! + // + // let index = tcx.stability(); + // for (used_lib_feature, level) in &index.used_features { + // remaining_lib_features.remove(used_lib_feature); + // } + // + // for &span in remaining_lib_features.values() { + // tcx.lint_node(lint::builtin::UNUSED_FEATURES, + // ast::CRATE_NODE_ID, + // span, + // "unused or unknown feature"); + // } +} - for &span in remaining_lib_features.values() { - tcx.lint_node(lint::builtin::UNUSED_FEATURES, - ast::CRATE_NODE_ID, - span, - "unused or unknown feature"); - } +fn format_stable_since_msg(version: &str) -> String { + format!("this feature has been stable since {}. Attribute no longer needed", version) } diff --git a/src/librustc/middle/trans.rs b/src/librustc/middle/trans.rs new file mode 100644 index 0000000000..7744c9c3d1 --- /dev/null +++ b/src/librustc/middle/trans.rs @@ -0,0 +1,189 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax::ast::NodeId; +use syntax::symbol::InternedString; +use ty::Instance; +use util::nodemap::FxHashMap; +use rustc_data_structures::stable_hasher::{HashStable, StableHasherResult, + StableHasher}; +use ich::{Fingerprint, StableHashingContext, NodeIdHashingMode}; + +#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] +pub enum TransItem<'tcx> { + Fn(Instance<'tcx>), + Static(NodeId), + GlobalAsm(NodeId), +} + +impl<'tcx> HashStable> for TransItem<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'tcx>, + hasher: &mut StableHasher) { + ::std::mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + TransItem::Fn(ref instance) => { + instance.hash_stable(hcx, hasher); + } + TransItem::Static(node_id) | + TransItem::GlobalAsm(node_id) => { + hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { + node_id.hash_stable(hcx, hasher); + }) + } + } + } +} + +pub struct CodegenUnit<'tcx> { + /// A name for this CGU. Incremental compilation requires that + /// name be unique amongst **all** crates. Therefore, it should + /// contain something unique to this crate (e.g., a module path) + /// as well as the crate name and disambiguator. + name: InternedString, + items: FxHashMap, (Linkage, Visibility)>, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum Linkage { + External, + AvailableExternally, + LinkOnceAny, + LinkOnceODR, + WeakAny, + WeakODR, + Appending, + Internal, + Private, + ExternalWeak, + Common, +} + +impl_stable_hash_for!(enum self::Linkage { + External, + AvailableExternally, + LinkOnceAny, + LinkOnceODR, + WeakAny, + WeakODR, + Appending, + Internal, + Private, + ExternalWeak, + Common +}); + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum Visibility { + Default, + Hidden, + Protected, +} + +impl_stable_hash_for!(enum self::Visibility { + Default, + Hidden, + Protected +}); + +impl<'tcx> CodegenUnit<'tcx> { + pub fn new(name: InternedString) -> CodegenUnit<'tcx> { + CodegenUnit { + name: name, + items: FxHashMap(), + } + } + + pub fn name(&self) -> &InternedString { + &self.name + } + + pub fn set_name(&mut self, name: InternedString) { + self.name = name; + } + + pub fn items(&self) -> &FxHashMap, (Linkage, Visibility)> { + &self.items + } + + pub fn items_mut(&mut self) + -> &mut FxHashMap, (Linkage, Visibility)> + { + &mut self.items + } +} + +impl<'tcx> HashStable> for CodegenUnit<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'tcx>, + hasher: &mut StableHasher) { + let CodegenUnit { + ref items, + name, + } = *self; + + name.hash_stable(hcx, hasher); + + let mut items: Vec<(Fingerprint, _)> = items.iter().map(|(trans_item, &attrs)| { + let mut hasher = StableHasher::new(); + trans_item.hash_stable(hcx, &mut hasher); + let trans_item_fingerprint = hasher.finish(); + (trans_item_fingerprint, attrs) + }).collect(); + + items.sort_unstable_by_key(|i| i.0); + items.hash_stable(hcx, hasher); + } +} + +#[derive(Clone, Default)] +pub struct Stats { + pub n_glues_created: usize, + pub n_null_glues: usize, + pub n_real_glues: usize, + pub n_fns: usize, + pub n_inlines: usize, + pub n_closures: usize, + pub n_llvm_insns: usize, + pub llvm_insns: FxHashMap, + // (ident, llvm-instructions) + pub fn_stats: Vec<(String, usize)>, +} + +impl_stable_hash_for!(struct self::Stats { + n_glues_created, + n_null_glues, + n_real_glues, + n_fns, + n_inlines, + n_closures, + n_llvm_insns, + llvm_insns, + fn_stats +}); + +impl Stats { + pub fn extend(&mut self, stats: Stats) { + self.n_glues_created += stats.n_glues_created; + self.n_null_glues += stats.n_null_glues; + self.n_real_glues += stats.n_real_glues; + self.n_fns += stats.n_fns; + self.n_inlines += stats.n_inlines; + self.n_closures += stats.n_closures; + self.n_llvm_insns += stats.n_llvm_insns; + + for (k, v) in stats.llvm_insns { + *self.llvm_insns.entry(k).or_insert(0) += v; + } + self.fn_stats.extend(stats.fn_stats); + } +} + diff --git a/src/librustc/middle/weak_lang_items.rs b/src/librustc/middle/weak_lang_items.rs index acb506878e..50fb584070 100644 --- a/src/librustc/middle/weak_lang_items.rs +++ b/src/librustc/middle/weak_lang_items.rs @@ -11,7 +11,6 @@ //! Validity checking for weak lang items use session::config; -use session::Session; use middle::lang_items; use rustc_back::PanicStrategy; @@ -21,38 +20,38 @@ use syntax_pos::Span; use hir::intravisit::{Visitor, NestedVisitorMap}; use hir::intravisit; use hir; +use ty::TyCtxt; use std::collections::HashSet; macro_rules! weak_lang_items { ($($name:ident, $item:ident, $sym:ident;)*) => ( -struct Context<'a> { - sess: &'a Session, +struct Context<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, items: &'a mut lang_items::LanguageItems, } /// Checks the crate for usage of weak lang items, returning a vector of all the /// language items required by this crate, but not defined yet. -pub fn check_crate(krate: &hir::Crate, - sess: &Session, - items: &mut lang_items::LanguageItems) { +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + items: &mut lang_items::LanguageItems) { // These are never called by user code, they're generated by the compiler. // They will never implicitly be added to the `missing` array unless we do // so here. if items.eh_personality().is_none() { items.missing.push(lang_items::EhPersonalityLangItem); } - if sess.target.target.options.custom_unwind_resume & + if tcx.sess.target.target.options.custom_unwind_resume & items.eh_unwind_resume().is_none() { items.missing.push(lang_items::EhUnwindResumeLangItem); } { - let mut cx = Context { sess: sess, items: items }; - krate.visit_all_item_likes(&mut cx.as_deep_visitor()); + let mut cx = Context { tcx, items }; + tcx.hir.krate().visit_all_item_likes(&mut cx.as_deep_visitor()); } - verify(sess, items); + verify(tcx, items); } pub fn link_name(attrs: &[ast::Attribute]) -> Option { @@ -65,10 +64,11 @@ pub fn link_name(attrs: &[ast::Attribute]) -> Option { }) } -fn verify(sess: &Session, items: &lang_items::LanguageItems) { +fn verify<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + items: &lang_items::LanguageItems) { // We only need to check for the presence of weak lang items if we're // emitting something that's not an rlib. - let needs_check = sess.crate_types.borrow().iter().any(|kind| { + let needs_check = tcx.sess.crate_types.borrow().iter().any(|kind| { match *kind { config::CrateTypeDylib | config::CrateTypeProcMacro | @@ -83,8 +83,8 @@ fn verify(sess: &Session, items: &lang_items::LanguageItems) { } let mut missing = HashSet::new(); - for cnum in sess.cstore.crates() { - for item in sess.cstore.missing_lang_items(cnum) { + for &cnum in tcx.crates().iter() { + for &item in tcx.missing_lang_items(cnum).iter() { missing.insert(item); } } @@ -93,7 +93,7 @@ fn verify(sess: &Session, items: &lang_items::LanguageItems) { // symbols. Other panic runtimes ensure that the relevant symbols are // available to link things together, but they're never exercised. let mut whitelisted = HashSet::new(); - if sess.panic_strategy() != PanicStrategy::Unwind { + if tcx.sess.panic_strategy() != PanicStrategy::Unwind { whitelisted.insert(lang_items::EhPersonalityLangItem); whitelisted.insert(lang_items::EhUnwindResumeLangItem); } @@ -102,28 +102,28 @@ fn verify(sess: &Session, items: &lang_items::LanguageItems) { if missing.contains(&lang_items::$item) && !whitelisted.contains(&lang_items::$item) && items.$name().is_none() { - sess.err(&format!("language item required, but not found: `{}`", - stringify!($name))); + tcx.sess.err(&format!("language item required, but not found: `{}`", + stringify!($name))); } )* } -impl<'a> Context<'a> { +impl<'a, 'tcx> Context<'a, 'tcx> { fn register(&mut self, name: &str, span: Span) { $(if name == stringify!($name) { if self.items.$name().is_none() { self.items.missing.push(lang_items::$item); } } else)* { - span_err!(self.sess, span, E0264, + span_err!(self.tcx.sess, span, E0264, "unknown external lang item: `{}`", name); } } } -impl<'a, 'v> Visitor<'v> for Context<'a> { +impl<'a, 'tcx, 'v> Visitor<'v> for Context<'a, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { NestedVisitorMap::None } diff --git a/src/librustc/mir/cache.rs b/src/librustc/mir/cache.rs index 73c702fedb..efc2f647cf 100644 --- a/src/librustc/mir/cache.rs +++ b/src/librustc/mir/cache.rs @@ -35,9 +35,9 @@ impl serialize::Decodable for Cache { } } -impl<'a, 'gcx, 'tcx> HashStable> for Cache { +impl<'gcx> HashStable> for Cache { fn hash_stable(&self, - _: &mut StableHashingContext<'a, 'gcx, 'tcx>, + _: &mut StableHashingContext<'gcx>, _: &mut StableHasher) { // do nothing } diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 74ce68b351..75efe6eb90 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -12,16 +12,17 @@ use graphviz::IntoCow; use middle::const_val::ConstVal; -use middle::region::CodeExtent; +use middle::region; use rustc_const_math::{ConstUsize, ConstInt, ConstMathErr}; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_data_structures::control_flow_graph::dominators::{Dominators, dominators}; use rustc_data_structures::control_flow_graph::{GraphPredecessors, GraphSuccessors}; use rustc_data_structures::control_flow_graph::ControlFlowGraph; +use rustc_serialize as serialize; use hir::def::CtorKind; use hir::def_id::DefId; use ty::subst::{Subst, Substs}; -use ty::{self, AdtDef, ClosureSubsts, Region, Ty}; +use ty::{self, AdtDef, ClosureSubsts, Region, Ty, TyCtxt, GeneratorInterior}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use util::ppaux; use rustc_back::slice; @@ -32,8 +33,9 @@ use std::cell::Ref; use std::fmt::{self, Debug, Formatter, Write}; use std::{iter, u32}; use std::ops::{Index, IndexMut}; +use std::rc::Rc; use std::vec::IntoIter; -use syntax::ast::Name; +use syntax::ast::{self, Name}; use syntax_pos::Span; mod cache; @@ -42,30 +44,6 @@ pub mod visit; pub mod transform; pub mod traversal; -macro_rules! newtype_index { - ($name:ident, $debug_name:expr) => ( - #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, - RustcEncodable, RustcDecodable)] - pub struct $name(u32); - - impl Idx for $name { - fn new(value: usize) -> Self { - assert!(value < (u32::MAX) as usize); - $name(value as u32) - } - fn index(self) -> usize { - self.0 as usize - } - } - - impl Debug for $name { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - write!(fmt, "{}{}", $debug_name, self.0) - } - } - ) -} - /// Types for locals type LocalDecls<'tcx> = IndexVec>; @@ -96,6 +74,10 @@ pub struct Mir<'tcx> { /// and used (eventually) for debuginfo. Indexed by a `VisibilityScope`. pub visibility_scopes: IndexVec, + /// Crate-local information for each visibility scope, that can't (and + /// needn't) be tracked across crates. + pub visibility_scope_info: ClearOnDecode>, + /// Rvalues promoted from this function, such as borrows of constants. /// Each of them is the Mir of a constant with the fn's type parameters /// in scope, but a separate set of locals. @@ -104,6 +86,15 @@ pub struct Mir<'tcx> { /// Return type of the function. pub return_ty: Ty<'tcx>, + /// Yield type of the function, if it is a generator. + pub yield_ty: Option>, + + /// Generator drop glue + pub generator_drop: Option>>, + + /// The layout of a generator. Produced by the state transformation. + pub generator_layout: Option>, + /// Declarations of locals. /// /// The first local is the return value pointer, followed by `arg_count` @@ -142,8 +133,11 @@ pub const START_BLOCK: BasicBlock = BasicBlock(0); impl<'tcx> Mir<'tcx> { pub fn new(basic_blocks: IndexVec>, visibility_scopes: IndexVec, + visibility_scope_info: ClearOnDecode>, promoted: IndexVec>, return_ty: Ty<'tcx>, + yield_ty: Option>, local_decls: IndexVec>, arg_count: usize, upvar_decls: Vec, @@ -157,8 +151,12 @@ impl<'tcx> Mir<'tcx> { Mir { basic_blocks, visibility_scopes, + visibility_scope_info, promoted, return_ty, + yield_ty, + generator_drop: None, + generator_layout: None, local_decls, arg_count, upvar_decls, @@ -263,13 +261,49 @@ impl<'tcx> Mir<'tcx> { debug_assert!(location.statement_index < block.statements.len()); block.statements[location.statement_index].make_nop() } + + /// Returns the source info associated with `location`. + pub fn source_info(&self, location: Location) -> &SourceInfo { + let block = &self[location.block]; + let stmts = &block.statements; + let idx = location.statement_index; + if location.statement_index < stmts.len() { + &stmts[idx].source_info + } else { + assert!(location.statement_index == stmts.len()); + &block.terminator().source_info + } + } +} + +#[derive(Clone, Debug)] +pub struct VisibilityScopeInfo { + /// A NodeId with lint levels equivalent to this scope's lint levels. + pub lint_root: ast::NodeId, + /// The unsafe block that contains this node. + pub safety: Safety, +} + +#[derive(Copy, Clone, Debug)] +pub enum Safety { + Safe, + /// Unsafe because of a PushUnsafeBlock + BuiltinUnsafe, + /// Unsafe because of an unsafe fn + FnUnsafe, + /// Unsafe because of an `unsafe` block + ExplicitUnsafe(ast::NodeId) } impl_stable_hash_for!(struct Mir<'tcx> { basic_blocks, visibility_scopes, + visibility_scope_info, promoted, return_ty, + yield_ty, + generator_drop, + generator_layout, local_decls, arg_count, upvar_decls, @@ -294,10 +328,28 @@ impl<'tcx> IndexMut for Mir<'tcx> { } } +#[derive(Clone, Debug)] +pub enum ClearOnDecode { + Clear, + Set(T) +} + +impl serialize::Encodable for ClearOnDecode { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + serialize::Encodable::encode(&(), s) + } +} + +impl serialize::Decodable for ClearOnDecode { + fn decode(d: &mut D) -> Result { + serialize::Decodable::decode(d).map(|()| ClearOnDecode::Clear) + } +} + /// Grouped information about the source code origin of a MIR entity. /// Intended to be inspected by diagnostics and debuginfo. /// Most passes can work with it as a whole, within a single function. -#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] pub struct SourceInfo { /// Source span for the AST pertaining to this MIR entity. pub span: Span, @@ -395,6 +447,25 @@ pub struct LocalDecl<'tcx> { /// True if this corresponds to a user-declared local variable. pub is_user_variable: bool, + /// True if this is an internal local + /// + /// These locals are not based on types in the source code and are only used + /// for a few desugarings at the moment. + /// + /// The generator transformation will sanity check the locals which are live + /// across a suspension point against the type components of the generator + /// which type checking knows are live across a suspension point. We need to + /// flag drop flags to avoid triggering this check as they are introduced + /// after typeck. + /// + /// Unsafety checking will also ignore dereferences of these locals, + /// so they can be used for raw pointers only used in a desugaring. + /// + /// This should be sound because the drop flags are fully algebraic, and + /// therefore don't affect the OIBIT or outlives properties of the + /// generator. + pub internal: bool, + /// Type of this local. pub ty: Ty<'tcx>, @@ -406,6 +477,12 @@ pub struct LocalDecl<'tcx> { /// Source info of the local. pub source_info: SourceInfo, + + /// The *lexical* visibility scope the local is defined + /// in. If the local was defined in a let-statement, this + /// is *within* the let-statement, rather than outside + /// of it. + pub lexical_scope: VisibilityScope, } impl<'tcx> LocalDecl<'tcx> { @@ -420,6 +497,25 @@ impl<'tcx> LocalDecl<'tcx> { span, scope: ARGUMENT_VISIBILITY_SCOPE }, + lexical_scope: ARGUMENT_VISIBILITY_SCOPE, + internal: false, + is_user_variable: false + } + } + + /// Create a new `LocalDecl` for a internal temporary. + #[inline] + pub fn new_internal(ty: Ty<'tcx>, span: Span) -> Self { + LocalDecl { + mutability: Mutability::Mut, + ty, + name: None, + source_info: SourceInfo { + span, + scope: ARGUMENT_VISIBILITY_SCOPE + }, + lexical_scope: ARGUMENT_VISIBILITY_SCOPE, + internal: true, is_user_variable: false } } @@ -436,6 +532,8 @@ impl<'tcx> LocalDecl<'tcx> { span, scope: ARGUMENT_VISIBILITY_SCOPE }, + lexical_scope: ARGUMENT_VISIBILITY_SCOPE, + internal: false, name: None, // FIXME maybe we do want some name here? is_user_variable: false } @@ -567,7 +665,20 @@ pub enum TerminatorKind<'tcx> { msg: AssertMessage<'tcx>, target: BasicBlock, cleanup: Option - } + }, + + /// A suspend point + Yield { + /// The value to return + value: Operand<'tcx>, + /// Where to resume to + resume: BasicBlock, + /// Cleanup to be done if the generator is dropped at this suspend point + drop: Option, + }, + + /// Indicates the end of the dropping of a generator + GeneratorDrop, } impl<'tcx> Terminator<'tcx> { @@ -581,7 +692,7 @@ impl<'tcx> Terminator<'tcx> { } impl<'tcx> TerminatorKind<'tcx> { - pub fn if_<'a, 'gcx>(tcx: ty::TyCtxt<'a, 'gcx, 'tcx>, cond: Operand<'tcx>, + pub fn if_<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, cond: Operand<'tcx>, t: BasicBlock, f: BasicBlock) -> TerminatorKind<'tcx> { static BOOL_SWITCH_FALSE: &'static [ConstInt] = &[ConstInt::U8(0)]; TerminatorKind::SwitchInt { @@ -597,7 +708,7 @@ impl<'tcx> TerminatorKind<'tcx> { match *self { Goto { target: ref b } => slice::ref_slice(b).into_cow(), SwitchInt { targets: ref b, .. } => b[..].into_cow(), - Resume => (&[]).into_cow(), + Resume | GeneratorDrop => (&[]).into_cow(), Return => (&[]).into_cow(), Unreachable => (&[]).into_cow(), Call { destination: Some((_, t)), cleanup: Some(c), .. } => vec![t, c].into_cow(), @@ -605,6 +716,8 @@ impl<'tcx> TerminatorKind<'tcx> { slice::ref_slice(t).into_cow(), Call { destination: None, cleanup: Some(ref c), .. } => slice::ref_slice(c).into_cow(), Call { destination: None, cleanup: None, .. } => (&[]).into_cow(), + Yield { resume: t, drop: Some(c), .. } => vec![t, c].into_cow(), + Yield { resume: ref t, drop: None, .. } => slice::ref_slice(t).into_cow(), DropAndReplace { target, unwind: Some(unwind), .. } | Drop { target, unwind: Some(unwind), .. } => { vec![target, unwind].into_cow() @@ -625,13 +738,15 @@ impl<'tcx> TerminatorKind<'tcx> { match *self { Goto { target: ref mut b } => vec![b], SwitchInt { targets: ref mut b, .. } => b.iter_mut().collect(), - Resume => Vec::new(), + Resume | GeneratorDrop => Vec::new(), Return => Vec::new(), Unreachable => Vec::new(), Call { destination: Some((_, ref mut t)), cleanup: Some(ref mut c), .. } => vec![t, c], Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t], Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c], Call { destination: None, cleanup: None, .. } => vec![], + Yield { resume: ref mut t, drop: Some(ref mut c), .. } => vec![t, c], + Yield { resume: ref mut t, drop: None, .. } => vec![t], DropAndReplace { ref mut target, unwind: Some(ref mut unwind), .. } | Drop { ref mut target, unwind: Some(ref mut unwind), .. } => vec![target, unwind], DropAndReplace { ref mut target, unwind: None, .. } | @@ -664,6 +779,14 @@ impl<'tcx> BasicBlockData<'tcx> { pub fn terminator_mut(&mut self) -> &mut Terminator<'tcx> { self.terminator.as_mut().expect("invalid terminator state") } + + pub fn retain_statements(&mut self, mut f: F) where F: FnMut(&mut Statement) -> bool { + for s in &mut self.statements { + if !f(s) { + s.kind = StatementKind::Nop; + } + } + } } impl<'tcx> Debug for TerminatorKind<'tcx> { @@ -703,7 +826,9 @@ impl<'tcx> TerminatorKind<'tcx> { Goto { .. } => write!(fmt, "goto"), SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv), Return => write!(fmt, "return"), + GeneratorDrop => write!(fmt, "generator_drop"), Resume => write!(fmt, "resume"), + Yield { ref value, .. } => write!(fmt, "_1 = suspend({:?})", value), Unreachable => write!(fmt, "unreachable"), Drop { ref location, .. } => write!(fmt, "drop({:?})", location), DropAndReplace { ref location, ref value, .. } => @@ -737,6 +862,12 @@ impl<'tcx> TerminatorKind<'tcx> { AssertMessage::Math(ref err) => { write!(fmt, "{:?}", err.description())?; } + AssertMessage::GeneratorResumedAfterReturn => { + write!(fmt, "{:?}", "generator resumed after completion")?; + } + AssertMessage::GeneratorResumedAfterPanic => { + write!(fmt, "{:?}", "generator resumed after panicking")?; + } } write!(fmt, ")") @@ -748,7 +879,7 @@ impl<'tcx> TerminatorKind<'tcx> { pub fn fmt_successor_labels(&self) -> Vec> { use self::TerminatorKind::*; match *self { - Return | Resume | Unreachable => vec![], + Return | Resume | Unreachable | GeneratorDrop => vec![], Goto { .. } => vec!["".into()], SwitchInt { ref values, .. } => { values.iter() @@ -765,6 +896,9 @@ impl<'tcx> TerminatorKind<'tcx> { Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()], Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()], Call { destination: None, cleanup: None, .. } => vec![], + Yield { drop: Some(_), .. } => + vec!["resume".into_cow(), "drop".into_cow()], + Yield { drop: None, .. } => vec!["resume".into_cow()], DropAndReplace { unwind: None, .. } | Drop { unwind: None, .. } => vec!["return".into_cow()], DropAndReplace { unwind: Some(_), .. } | @@ -784,7 +918,9 @@ pub enum AssertMessage<'tcx> { len: Operand<'tcx>, index: Operand<'tcx> }, - Math(ConstMathErr) + Math(ConstMathErr), + GeneratorResumedAfterReturn, + GeneratorResumedAfterPanic, } /////////////////////////////////////////////////////////////////////////// @@ -813,10 +949,10 @@ pub enum StatementKind<'tcx> { SetDiscriminant { lvalue: Lvalue<'tcx>, variant_index: usize }, /// Start a live range for the storage of the local. - StorageLive(Lvalue<'tcx>), + StorageLive(Local), /// End the current live range for the storage of the local. - StorageDead(Lvalue<'tcx>), + StorageDead(Local), /// Execute a piece of inline Assembly. InlineAsm { @@ -830,9 +966,9 @@ pub enum StatementKind<'tcx> { /// See for more details. Validate(ValidationOp, Vec>>), - /// Mark one terminating point of an extent (i.e. static region). + /// Mark one terminating point of a region scope (i.e. static region). /// (The starting point(s) arise implicitly from borrows.) - EndRegion(CodeExtent), + EndRegion(region::Scope), /// No-op. Useful for deleting instructions without affecting statement indices. Nop, @@ -851,7 +987,7 @@ pub enum ValidationOp { Release, /// Recursive traverse the *mutable* part of the type and relinquish all exclusive /// access *until* the given region ends. Then, access will be recovered. - Suspend(CodeExtent), + Suspend(region::Scope), } impl Debug for ValidationOp { @@ -871,7 +1007,7 @@ impl Debug for ValidationOp { pub struct ValidationOperand<'tcx, T> { pub lval: T, pub ty: Ty<'tcx>, - pub re: Option, + pub re: Option, pub mutbl: hir::Mutability, } @@ -989,12 +1125,12 @@ pub enum ProjectionElem<'tcx, V, T> { } /// Alias for projections as they appear in lvalues, where the base is an lvalue -/// and the index is an operand. -pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Operand<'tcx>, Ty<'tcx>>; +/// and the index is a local. +pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Local, Ty<'tcx>>; /// Alias for projections as they appear in lvalues, where the base is an lvalue -/// and the index is an operand. -pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Operand<'tcx>, Ty<'tcx>>; +/// and the index is a local. +pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Local, Ty<'tcx>>; newtype_index!(Field, "field"); @@ -1011,7 +1147,7 @@ impl<'tcx> Lvalue<'tcx> { self.elem(ProjectionElem::Downcast(adt_def, variant_index)) } - pub fn index(self, index: Operand<'tcx>) -> Lvalue<'tcx> { + pub fn index(self, index: Local) -> Lvalue<'tcx> { self.elem(ProjectionElem::Index(index)) } @@ -1094,15 +1230,21 @@ impl<'tcx> Debug for Operand<'tcx> { impl<'tcx> Operand<'tcx> { pub fn function_handle<'a>( - tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, span: Span, ) -> Self { + let ty = tcx.type_of(def_id).subst(tcx, substs); Operand::Constant(box Constant { span, - ty: tcx.type_of(def_id).subst(tcx, substs), - literal: Literal::Value { value: ConstVal::Function(def_id, substs) }, + ty, + literal: Literal::Value { + value: tcx.mk_const(ty::Const { + val: ConstVal::Function(def_id, substs), + ty + }) + }, }) } @@ -1178,6 +1320,7 @@ pub enum AggregateKind<'tcx> { /// number and is present only for union expressions. Adt(&'tcx AdtDef, usize, &'tcx Substs<'tcx>, Option), Closure(DefId, ClosureSubsts<'tcx>), + Generator(DefId, ClosureSubsts<'tcx>, GeneratorInterior<'tcx>), } #[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] @@ -1327,10 +1470,8 @@ impl<'tcx> Debug for Rvalue<'tcx> { tcx.with_freevars(node_id, |freevars| { for (freevar, lv) in freevars.iter().zip(lvs) { - let def_id = freevar.def.def_id(); - let var_id = tcx.hir.as_local_node_id(def_id).unwrap(); - let var_name = tcx.local_var_name_str(var_id); - struct_fmt.field(&var_name, lv); + let var_name = tcx.hir.name(freevar.var_id()); + struct_fmt.field(&var_name.as_str(), lv); } }); @@ -1339,6 +1480,29 @@ impl<'tcx> Debug for Rvalue<'tcx> { write!(fmt, "[closure]") } }), + + AggregateKind::Generator(def_id, _, _) => ty::tls::with(|tcx| { + if let Some(node_id) = tcx.hir.as_local_node_id(def_id) { + let name = format!("[generator@{:?}]", tcx.hir.span(node_id)); + let mut struct_fmt = fmt.debug_struct(&name); + + tcx.with_freevars(node_id, |freevars| { + for (freevar, lv) in freevars.iter().zip(lvs) { + let var_name = tcx.hir.name(freevar.var_id()); + struct_fmt.field(&var_name.as_str(), lv); + } + struct_fmt.field("$state", &lvs[freevars.len()]); + for i in (freevars.len() + 1)..lvs.len() { + struct_fmt.field(&format!("${}", i - freevars.len() - 1), + &lvs[i]); + } + }); + + struct_fmt.finish() + } else { + write!(fmt, "[generator]") + } + }), } } } @@ -1363,12 +1527,8 @@ newtype_index!(Promoted, "promoted"); #[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum Literal<'tcx> { - Item { - def_id: DefId, - substs: &'tcx Substs<'tcx>, - }, Value { - value: ConstVal<'tcx>, + value: &'tcx ty::Const<'tcx>, }, Promoted { // Index into the `promoted` vector of `Mir`. @@ -1386,12 +1546,9 @@ impl<'tcx> Debug for Literal<'tcx> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { use self::Literal::*; match *self { - Item { def_id, substs } => { - ppaux::parameterized(fmt, substs, def_id, &[]) - } - Value { ref value } => { + Value { value } => { write!(fmt, "const ")?; - fmt_const_val(fmt, value) + fmt_const_val(fmt, &value.val) } Promoted { index } => { write!(fmt, "{:?}", index) @@ -1406,9 +1563,9 @@ fn fmt_const_val(fmt: &mut W, const_val: &ConstVal) -> fmt::Result { match *const_val { Float(f) => write!(fmt, "{:?}", f), Integral(n) => write!(fmt, "{}", n), - Str(ref s) => write!(fmt, "{:?}", s), - ByteStr(ref bytes) => { - let escaped: String = bytes + Str(s) => write!(fmt, "{:?}", s), + ByteStr(bytes) => { + let escaped: String = bytes.data .iter() .flat_map(|&ch| ascii::escape_default(ch).map(|c| c as char)) .collect(); @@ -1418,8 +1575,8 @@ fn fmt_const_val(fmt: &mut W, const_val: &ConstVal) -> fmt::Result { Char(c) => write!(fmt, "{:?}", c), Variant(def_id) | Function(def_id, _) => write!(fmt, "{}", item_path_str(def_id)), - Struct(_) | Tuple(_) | Array(_) | Repeat(..) => - bug!("ConstVal `{:?}` should not be in MIR", const_val), + Aggregate(_) => bug!("`ConstVal::{:?}` should not be in MIR", const_val), + Unevaluated(..) => write!(fmt, "{:?}", const_val) } } @@ -1483,6 +1640,27 @@ impl Location { } } +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct UnsafetyViolation { + pub source_info: SourceInfo, + pub description: &'static str, + pub lint_node_id: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct UnsafetyCheckResult { + /// Violations that are propagated *upwards* from this function + pub violations: Rc<[UnsafetyViolation]>, + /// unsafe blocks in this function, along with whether they are used. This is + /// used for the "unused_unsafe" lint. + pub unsafe_blocks: Rc<[(ast::NodeId, bool)]>, +} + +/// The layout of generator state +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct GeneratorLayout<'tcx> { + pub fields: Vec>, +} /* * TypeFoldable implementations for MIR types @@ -1493,8 +1671,12 @@ impl<'tcx> TypeFoldable<'tcx> for Mir<'tcx> { Mir { basic_blocks: self.basic_blocks.fold_with(folder), visibility_scopes: self.visibility_scopes.clone(), + visibility_scope_info: self.visibility_scope_info.clone(), promoted: self.promoted.fold_with(folder), return_ty: self.return_ty.fold_with(folder), + yield_ty: self.yield_ty.fold_with(folder), + generator_drop: self.generator_drop.fold_with(folder), + generator_layout: self.generator_layout.fold_with(folder), local_decls: self.local_decls.fold_with(folder), arg_count: self.arg_count, upvar_decls: self.upvar_decls.clone(), @@ -1506,12 +1688,27 @@ impl<'tcx> TypeFoldable<'tcx> for Mir<'tcx> { fn super_visit_with>(&self, visitor: &mut V) -> bool { self.basic_blocks.visit_with(visitor) || + self.generator_drop.visit_with(visitor) || + self.generator_layout.visit_with(visitor) || + self.yield_ty.visit_with(visitor) || self.promoted.visit_with(visitor) || self.return_ty.visit_with(visitor) || self.local_decls.visit_with(visitor) } } +impl<'tcx> TypeFoldable<'tcx> for GeneratorLayout<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + GeneratorLayout { + fields: self.fields.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.fields.visit_with(visitor) + } +} + impl<'tcx> TypeFoldable<'tcx> for LocalDecl<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { LocalDecl { @@ -1564,19 +1761,19 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { lvalue: lvalue.fold_with(folder), variant_index, }, - StorageLive(ref lval) => StorageLive(lval.fold_with(folder)), - StorageDead(ref lval) => StorageDead(lval.fold_with(folder)), + StorageLive(ref local) => StorageLive(local.fold_with(folder)), + StorageDead(ref local) => StorageDead(local.fold_with(folder)), InlineAsm { ref asm, ref outputs, ref inputs } => InlineAsm { asm: asm.clone(), outputs: outputs.fold_with(folder), inputs: inputs.fold_with(folder) }, - // Note for future: If we want to expose the extents + // Note for future: If we want to expose the region scopes // during the fold, we need to either generalize EndRegion // to carry `[ty::Region]`, or extend the `TypeFolder` - // trait with a `fn fold_extent`. - EndRegion(ref extent) => EndRegion(extent.clone()), + // trait with a `fn fold_scope`. + EndRegion(ref region_scope) => EndRegion(region_scope.clone()), Validate(ref op, ref lvals) => Validate(op.clone(), @@ -1595,17 +1792,17 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { match self.kind { Assign(ref lval, ref rval) => { lval.visit_with(visitor) || rval.visit_with(visitor) } - SetDiscriminant { ref lvalue, .. } | - StorageLive(ref lvalue) | - StorageDead(ref lvalue) => lvalue.visit_with(visitor), + SetDiscriminant { ref lvalue, .. } => lvalue.visit_with(visitor), + StorageLive(ref local) | + StorageDead(ref local) => local.visit_with(visitor), InlineAsm { ref outputs, ref inputs, .. } => outputs.visit_with(visitor) || inputs.visit_with(visitor), - // Note for future: If we want to expose the extents + // Note for future: If we want to expose the region scopes // during the visit, we need to either generalize EndRegion // to carry `[ty::Region]`, or extend the `TypeVisitor` - // trait with a `fn visit_extent`. - EndRegion(ref _extent) => false, + // trait with a `fn visit_scope`. + EndRegion(ref _scope) => false, Validate(ref _op, ref lvalues) => lvalues.iter().any(|ty_and_lvalue| ty_and_lvalue.visit_with(visitor)), @@ -1638,6 +1835,11 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { target, unwind, }, + Yield { ref value, resume, drop } => Yield { + value: value.fold_with(folder), + resume: resume, + drop: drop, + }, Call { ref func, ref args, ref destination, cleanup } => { let dest = destination.as_ref().map(|&(ref loc, dest)| { (loc.fold_with(folder), dest) @@ -1667,6 +1869,7 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { cleanup, } }, + GeneratorDrop => GeneratorDrop, Resume => Resume, Return => Return, Unreachable => Unreachable, @@ -1686,6 +1889,8 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { Drop { ref location, ..} => location.visit_with(visitor), DropAndReplace { ref location, ref value, ..} => location.visit_with(visitor) || value.visit_with(visitor), + Yield { ref value, ..} => + value.visit_with(visitor), Call { ref func, ref args, ref destination, .. } => { let dest = if let Some((ref loc, _)) = *destination { loc.visit_with(visitor) @@ -1706,6 +1911,7 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { Goto { .. } | Resume | Return | + GeneratorDrop | Unreachable => false } } @@ -1751,7 +1957,11 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { AggregateKind::Adt(def, v, substs, n) => AggregateKind::Adt(def, v, substs.fold_with(folder), n), AggregateKind::Closure(id, substs) => - AggregateKind::Closure(id, substs.fold_with(folder)) + AggregateKind::Closure(id, substs.fold_with(folder)), + AggregateKind::Generator(id, substs, interior) => + AggregateKind::Generator(id, + substs.fold_with(folder), + interior.fold_with(folder)), }; Aggregate(kind, fields.fold_with(folder)) } @@ -1777,7 +1987,9 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { AggregateKind::Array(ty) => ty.visit_with(visitor), AggregateKind::Tuple => false, AggregateKind::Adt(_, _, substs, _) => substs.visit_with(visitor), - AggregateKind::Closure(_, substs) => substs.visit_with(visitor) + AggregateKind::Closure(_, substs) => substs.visit_with(visitor), + AggregateKind::Generator(_, substs, interior) => substs.visit_with(visitor) || + interior.visit_with(visitor), }) || fields.visit_with(visitor) } } @@ -1848,17 +2060,16 @@ impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> { impl<'tcx> TypeFoldable<'tcx> for Literal<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { match *self { - Literal::Item { def_id, substs } => Literal::Item { - def_id, - substs: substs.fold_with(folder) + Literal::Value { value } => Literal::Value { + value: value.fold_with(folder) }, - _ => self.clone() + Literal::Promoted { index } => Literal::Promoted { index } } } fn super_visit_with>(&self, visitor: &mut V) -> bool { match *self { - Literal::Item { substs, .. } => substs.visit_with(visitor), - _ => false + Literal::Value { value } => value.visit_with(visitor), + Literal::Promoted { .. } => false } } } diff --git a/src/librustc/mir/tcx.rs b/src/librustc/mir/tcx.rs index a801804d50..d645a00e15 100644 --- a/src/librustc/mir/tcx.rs +++ b/src/librustc/mir/tcx.rs @@ -70,7 +70,9 @@ impl<'a, 'gcx, 'tcx> LvalueTy<'tcx> { LvalueTy::Ty { ty: match ty.sty { ty::TyArray(inner, size) => { - tcx.mk_array(inner, size-(from as usize)-(to as usize)) + let size = size.val.to_const_int().unwrap().to_u64().unwrap(); + let len = size - (from as u64) - (to as u64); + tcx.mk_array(inner, len) } ty::TySlice(..) => ty, _ => { @@ -146,11 +148,8 @@ impl<'tcx> Rvalue<'tcx> { { match *self { Rvalue::Use(ref operand) => operand.ty(local_decls, tcx), - Rvalue::Repeat(ref operand, ref count) => { - let op_ty = operand.ty(local_decls, tcx); - let count = count.as_u64(tcx.sess.target.uint_type); - assert_eq!(count as usize as u64, count); - tcx.mk_array(op_ty, count as usize) + Rvalue::Repeat(ref operand, count) => { + tcx.mk_array_const_usize(operand.ty(local_decls, tcx), count) } Rvalue::Ref(reg, bk, ref lv) => { let lv_ty = lv.ty(local_decls, tcx).to_ty(tcx); @@ -193,7 +192,7 @@ impl<'tcx> Rvalue<'tcx> { Rvalue::Aggregate(ref ak, ref ops) => { match **ak { AggregateKind::Array(ty) => { - tcx.mk_array(ty, ops.len()) + tcx.mk_array(ty, ops.len() as u64) } AggregateKind::Tuple => { tcx.mk_tup( @@ -207,6 +206,9 @@ impl<'tcx> Rvalue<'tcx> { AggregateKind::Closure(did, substs) => { tcx.mk_closure_from_closure_substs(did, substs) } + AggregateKind::Generator(did, substs, interior) => { + tcx.mk_generator(did, substs, interior) + } } } } diff --git a/src/librustc/mir/transform.rs b/src/librustc/mir/transform.rs index aa91123ef6..f29405e665 100644 --- a/src/librustc/mir/transform.rs +++ b/src/librustc/mir/transform.rs @@ -33,7 +33,10 @@ pub enum MirSource { Static(NodeId, hir::Mutability), /// Promoted rvalues within a function. - Promoted(NodeId, Promoted) + Promoted(NodeId, Promoted), + + /// Drop glue for a generator. + GeneratorDrop(NodeId), } impl<'a, 'tcx> MirSource { @@ -70,6 +73,7 @@ impl<'a, 'tcx> MirSource { match *self { MirSource::Fn(id) | MirSource::Const(id) | + MirSource::GeneratorDrop(id) | MirSource::Static(id, _) | MirSource::Promoted(id, _) => id } diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index a6d115bf03..63652980f9 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -8,10 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::const_val::ConstVal; use hir::def_id::DefId; use ty::subst::Substs; -use ty::{ClosureSubsts, Region, Ty}; +use ty::{ClosureSubsts, Region, Ty, GeneratorInterior}; use mir::*; use rustc_const_math::ConstUsize; use syntax_pos::Span; @@ -214,6 +213,18 @@ macro_rules! make_mir_visitor { self.super_ty(ty); } + fn visit_region(&mut self, + region: & $($mutability)* ty::Region<'tcx>, + _: Location) { + self.super_region(region); + } + + fn visit_const(&mut self, + constant: & $($mutability)* &'tcx ty::Const<'tcx>, + _: Location) { + self.super_const(constant); + } + fn visit_substs(&mut self, substs: & $($mutability)* &'tcx Substs<'tcx>, _: Location) { @@ -226,10 +237,10 @@ macro_rules! make_mir_visitor { self.super_closure_substs(substs); } - fn visit_const_val(&mut self, - const_val: & $($mutability)* ConstVal, - _: Location) { - self.super_const_val(const_val); + fn visit_generator_interior(&mut self, + interior: & $($mutability)* GeneratorInterior<'tcx>, + _: Location) { + self.super_generator_interior(interior); } fn visit_const_int(&mut self, @@ -249,6 +260,12 @@ macro_rules! make_mir_visitor { self.super_local_decl(local_decl); } + fn visit_local(&mut self, + _local: & $($mutability)* Local, + _context: LvalueContext<'tcx>, + _location: Location) { + } + fn visit_visibility_scope(&mut self, scope: & $($mutability)* VisibilityScope) { self.super_visibility_scope(scope); @@ -348,11 +365,11 @@ macro_rules! make_mir_visitor { StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => { self.visit_lvalue(lvalue, LvalueContext::Store, location); } - StatementKind::StorageLive(ref $($mutability)* lvalue) => { - self.visit_lvalue(lvalue, LvalueContext::StorageLive, location); + StatementKind::StorageLive(ref $($mutability)* local) => { + self.visit_local(local, LvalueContext::StorageLive, location); } - StatementKind::StorageDead(ref $($mutability)* lvalue) => { - self.visit_lvalue(lvalue, LvalueContext::StorageDead, location); + StatementKind::StorageDead(ref $($mutability)* local) => { + self.visit_local(local, LvalueContext::StorageDead, location); } StatementKind::InlineAsm { ref $($mutability)* outputs, ref $($mutability)* inputs, @@ -415,6 +432,7 @@ macro_rules! make_mir_visitor { TerminatorKind::Resume | TerminatorKind::Return | + TerminatorKind::GeneratorDrop | TerminatorKind::Unreachable => { } @@ -461,6 +479,15 @@ macro_rules! make_mir_visitor { self.visit_branch(block, target); cleanup.map(|t| self.visit_branch(block, t)); } + + TerminatorKind::Yield { ref $($mutability)* value, + resume, + drop } => { + self.visit_operand(value, source_location); + self.visit_branch(block, resume); + drop.map(|t| self.visit_branch(block, t)); + } + } } @@ -475,7 +502,9 @@ macro_rules! make_mir_visitor { self.visit_operand(len, location); self.visit_operand(index, location); } - AssertMessage::Math(_) => {} + AssertMessage::Math(_) => {}, + AssertMessage::GeneratorResumedAfterReturn => {}, + AssertMessage::GeneratorResumedAfterPanic => {}, } } @@ -493,9 +522,10 @@ macro_rules! make_mir_visitor { self.visit_const_usize(length, location); } - Rvalue::Ref(r, bk, ref $($mutability)* path) => { + Rvalue::Ref(ref $($mutability)* r, bk, ref $($mutability)* path) => { + self.visit_region(r, location); self.visit_lvalue(path, LvalueContext::Borrow { - region: r, + region: *r, kind: bk }, location); } @@ -553,6 +583,13 @@ macro_rules! make_mir_visitor { self.visit_def_id(def_id, location); self.visit_closure_substs(closure_substs, location); } + AggregateKind::Generator(ref $($mutability)* def_id, + ref $($mutability)* closure_substs, + ref $($mutability)* interior) => { + self.visit_def_id(def_id, location); + self.visit_closure_substs(closure_substs, location); + self.visit_generator_interior(interior, location); + } } for operand in operands { @@ -580,7 +617,8 @@ macro_rules! make_mir_visitor { context: LvalueContext<'tcx>, location: Location) { match *lvalue { - Lvalue::Local(_) => { + Lvalue::Local(ref $($mutability)* local) => { + self.visit_local(local, context, location); } Lvalue::Static(ref $($mutability)* static_) => { self.visit_static(static_, context, location); @@ -632,8 +670,8 @@ macro_rules! make_mir_visitor { ProjectionElem::Field(_field, ref $($mutability)* ty) => { self.visit_ty(ty, Lookup::Loc(location)); } - ProjectionElem::Index(ref $($mutability)* operand) => { - self.visit_operand(operand, location); + ProjectionElem::Index(ref $($mutability)* local) => { + self.visit_local(local, LvalueContext::Consume, location); } ProjectionElem::ConstantIndex { offset: _, min_length: _, @@ -651,11 +689,14 @@ macro_rules! make_mir_visitor { ref $($mutability)* ty, name: _, ref $($mutability)* source_info, + internal: _, + ref $($mutability)* lexical_scope, is_user_variable: _, } = *local_decl; self.visit_ty(ty, Lookup::Src(*source_info)); self.visit_source_info(source_info); + self.visit_visibility_scope(lexical_scope); } fn super_visibility_scope(&mut self, @@ -685,13 +726,8 @@ macro_rules! make_mir_visitor { literal: & $($mutability)* Literal<'tcx>, location: Location) { match *literal { - Literal::Item { ref $($mutability)* def_id, - ref $($mutability)* substs } => { - self.visit_def_id(def_id, location); - self.visit_substs(substs, location); - } Literal::Value { ref $($mutability)* value } => { - self.visit_const_val(value, location); + self.visit_const(value, location); } Literal::Promoted { index: _ } => {} } @@ -716,14 +752,21 @@ macro_rules! make_mir_visitor { fn super_ty(&mut self, _ty: & $($mutability)* Ty<'tcx>) { } + fn super_region(&mut self, _region: & $($mutability)* ty::Region<'tcx>) { + } + + fn super_const(&mut self, _const: & $($mutability)* &'tcx ty::Const<'tcx>) { + } + fn super_substs(&mut self, _substs: & $($mutability)* &'tcx Substs<'tcx>) { } - fn super_closure_substs(&mut self, - _substs: & $($mutability)* ClosureSubsts<'tcx>) { + fn super_generator_interior(&mut self, + _interior: & $($mutability)* GeneratorInterior<'tcx>) { } - fn super_const_val(&mut self, _const_val: & $($mutability)* ConstVal) { + fn super_closure_substs(&mut self, + _substs: & $($mutability)* ClosureSubsts<'tcx>) { } fn super_const_int(&mut self, _const_int: &ConstInt) { diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 99fe8e60ae..fc1c5e187e 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -19,8 +19,10 @@ pub use self::DebugInfoLevel::*; use session::{early_error, early_warn, Session}; use session::search_paths::SearchPaths; +use ich::StableHashingContext; use rustc_back::{LinkerFlavor, PanicStrategy, RelroLevel}; use rustc_back::target::Target; +use rustc_data_structures::stable_hasher::ToStableHashKey; use lint; use middle::cstore; @@ -48,8 +50,8 @@ use std::path::PathBuf; pub struct Config { pub target: Target, - pub int_type: IntTy, - pub uint_type: UintTy, + pub isize_ty: IntTy, + pub usize_ty: UintTy, } #[derive(Clone, Hash, Debug)] @@ -90,6 +92,25 @@ pub enum OutputType { DepInfo, } +impl_stable_hash_for!(enum self::OutputType { + Bitcode, + Assembly, + LlvmAssembly, + Mir, + Metadata, + Object, + Exe, + DepInfo +}); + +impl<'tcx> ToStableHashKey> for OutputType { + type KeyType = OutputType; + #[inline] + fn to_stable_hash_key(&self, _: &StableHashingContext<'tcx>) -> Self::KeyType { + *self + } +} + impl OutputType { fn is_compatible_with_codegen_units_and_single_output_file(&self) -> bool { match *self { @@ -149,6 +170,10 @@ impl Default for ErrorOutputType { #[derive(Clone, Hash)] pub struct OutputTypes(BTreeMap>); +impl_stable_hash_for!(tuple_struct self::OutputTypes { + map +}); + impl OutputTypes { pub fn new(entries: &[(OutputType, Option)]) -> OutputTypes { OutputTypes(BTreeMap::from_iter(entries.iter() @@ -325,6 +350,9 @@ top_level_options!( // is currently just a hack and will be removed eventually, so please // try to not rely on this too much. actually_rustdoc: bool [TRACKED], + + // Number of object files/codegen units to produce on the backend + codegen_units: usize [UNTRACKED], } ); @@ -340,6 +368,7 @@ pub enum PrintRequest { RelocationModels, CodeModels, TargetSpec, + NativeStaticLibs, } pub enum Input { @@ -372,9 +401,15 @@ pub struct OutputFilenames { pub outputs: OutputTypes, } -/// Codegen unit names generated by the numbered naming scheme will contain this -/// marker right before the index of the codegen unit. -pub const NUMBERED_CODEGEN_UNIT_MARKER: &'static str = ".cgu-"; +impl_stable_hash_for!(struct self::OutputFilenames { + out_directory, + out_filestem, + single_output_file, + extra, + outputs +}); + +pub const RUST_CGU_EXT: &str = "rust-cgu"; impl OutputFilenames { pub fn path(&self, flavor: OutputType) -> PathBuf { @@ -405,22 +440,14 @@ impl OutputFilenames { let mut extension = String::new(); if let Some(codegen_unit_name) = codegen_unit_name { - if codegen_unit_name.contains(NUMBERED_CODEGEN_UNIT_MARKER) { - // If we use the numbered naming scheme for modules, we don't want - // the files to look like ... - // but simply .. - let marker_offset = codegen_unit_name.rfind(NUMBERED_CODEGEN_UNIT_MARKER) - .unwrap(); - let index_offset = marker_offset + NUMBERED_CODEGEN_UNIT_MARKER.len(); - extension.push_str(&codegen_unit_name[index_offset .. ]); - } else { - extension.push_str(codegen_unit_name); - }; + extension.push_str(codegen_unit_name); } if !ext.is_empty() { if !extension.is_empty() { extension.push_str("."); + extension.push_str(RUST_CGU_EXT); + extension.push_str("."); } extension.push_str(ext); @@ -478,6 +505,7 @@ pub fn basic_options() -> Options { unstable_features: UnstableFeatures::Disallow, debug_assertions: true, actually_rustdoc: false, + codegen_units: 1, } } @@ -495,11 +523,6 @@ impl Options { (self.debugging_opts.query_dep_graph || self.debugging_opts.incremental_info) } - pub fn single_codegen_unit(&self) -> bool { - self.incremental.is_none() || - self.cg.codegen_units == 1 - } - pub fn file_path_mapping(&self) -> FilePathMapping { FilePathMapping::new( self.debugging_opts.remap_path_prefix_from.iter().zip( @@ -757,7 +780,7 @@ macro_rules! options { fn parse_opt_uint(slot: &mut Option, v: Option<&str>) -> bool { match v { Some(s) => { *slot = s.parse().ok(); slot.is_some() } - None => { *slot = None; true } + None => { *slot = None; false } } } @@ -841,7 +864,8 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, build_codegen_options, "C", "codegen", CG_OPTIONS, cg_type_desc, cgsetters, ar: Option = (None, parse_opt_string, [UNTRACKED], - "tool to assemble archives with"), + "tool to assemble archives with (has no effect currently, \ + rustc doesn't use an external archiver)"), linker: Option = (None, parse_opt_string, [UNTRACKED], "system linker to link outputs with"), link_arg: Vec = (vec![], parse_string_push, [UNTRACKED], @@ -890,7 +914,7 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, "metadata to mangle symbol names with"), extra_filename: String = ("".to_string(), parse_string, [UNTRACKED], "extra data to put in each output filename"), - codegen_units: usize = (1, parse_uint, [UNTRACKED], + codegen_units: Option = (None, parse_opt_uint, [UNTRACKED], "divide crate into N units to optimize in parallel"), remark: Passes = (SomePasses(Vec::new()), parse_passes, [UNTRACKED], "print remarks for these optimization passes (space separated, or \"all\")"), @@ -904,7 +928,7 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, debug_assertions: Option = (None, parse_opt_bool, [TRACKED], "explicitly enable the cfg(debug_assertions) directive"), inline_threshold: Option = (None, parse_opt_uint, [TRACKED], - "set the inlining threshold for"), + "set the threshold for inlining a function (default: 225)"), panic: Option = (None, parse_panic_strategy, [TRACKED], "panic strategy to compile crate with"), } @@ -918,6 +942,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "when debug-printing compiler state, do not include spans"), // o/w tests have closure@path identify_regions: bool = (false, parse_bool, [UNTRACKED], "make unnamed regions display as '# (where # is some non-ident unique id)"), + emit_end_regions: bool = (false, parse_bool, [UNTRACKED], + "emit EndRegion as part of MIR; enable transforms that solely process EndRegion"), borrowck_mir: bool = (false, parse_bool, [UNTRACKED], "implicitly treat functions as if they have `#[rustc_mir_borrowck]` attribute"), time_passes: bool = (false, parse_bool, [UNTRACKED], @@ -985,7 +1011,7 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "attempt to recover from parse errors (experimental)"), incremental: Option = (None, parse_opt_string, [UNTRACKED], "enable incremental compilation (experimental)"), - incremental_cc: bool = (true, parse_bool, [UNTRACKED], + incremental_cc: bool = (false, parse_bool, [UNTRACKED], "enable cross-crate incremental compilation (even more experimental)"), incremental_info: bool = (false, parse_bool, [UNTRACKED], "print high-level information about incremental reuse (or the lack thereof)"), @@ -1025,6 +1051,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "print the result of the translation item collection pass"), mir_opt_level: usize = (1, parse_uint, [TRACKED], "set the MIR optimization level (0-3, default: 1)"), + mutable_noalias: bool = (false, parse_bool, [UNTRACKED], + "emit noalias metadata for mutable references"), dump_mir: Option = (None, parse_opt_string, [UNTRACKED], "dump MIR state at various points in translation"), dump_mir_dir: Option = (None, parse_opt_string, [UNTRACKED], @@ -1049,9 +1077,9 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, linker_flavor: Option = (None, parse_linker_flavor, [UNTRACKED], "Linker flavor"), fuel: Option<(String, u64)> = (None, parse_optimization_fuel, [TRACKED], - "Set the optimization fuel quota for a crate."), + "set the optimization fuel quota for a crate"), print_fuel: Option = (None, parse_opt_string, [TRACKED], - "Make Rustc print the total optimization fuel used by a crate."), + "make Rustc print the total optimization fuel used by a crate"), remap_path_prefix_from: Vec = (vec![], parse_string_push, [TRACKED], "add a source pattern to the file path remapping config"), remap_path_prefix_to: Vec = (vec![], parse_string_push, [TRACKED], @@ -1070,6 +1098,10 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "run the non-lexical lifetimes MIR pass"), trans_time_graph: bool = (false, parse_bool, [UNTRACKED], "generate a graphical HTML report of time spent in trans and LLVM"), + thinlto: bool = (false, parse_bool, [TRACKED], + "enable ThinLTO when possible"), + inline_in_all_cgus: Option = (None, parse_opt_bool, [TRACKED], + "control whether #[inline] functions are in all cgus"), } pub fn default_lib_output() -> CrateType { @@ -1146,7 +1178,7 @@ pub fn build_target_config(opts: &Options, sp: &Handler) -> Config { } }; - let (int_type, uint_type) = match &target.target_pointer_width[..] { + let (isize_ty, usize_ty) = match &target.target_pointer_width[..] { "16" => (ast::IntTy::I16, ast::UintTy::U16), "32" => (ast::IntTy::I32, ast::UintTy::U32), "64" => (ast::IntTy::I64, ast::UintTy::U64), @@ -1156,8 +1188,8 @@ pub fn build_target_config(opts: &Options, sp: &Handler) -> Config { Config { target, - int_type, - uint_type, + isize_ty, + usize_ty, } } @@ -1296,7 +1328,7 @@ pub fn rustc_short_optgroups() -> Vec { print on stdout", "[crate-name|file-names|sysroot|cfg|target-list|\ target-cpus|target-features|relocation-models|\ - code-models|target-spec-json]"), + code-models|target-spec-json|native-static-libs]"), opt::flagmulti_s("g", "", "Equivalent to -C debuginfo=2"), opt::flagmulti_s("O", "", "Equivalent to -C opt-level=2"), opt::opt_s("o", "", "Write output to ", "FILENAME"), @@ -1337,20 +1369,20 @@ pub fn rustc_optgroups() -> Vec { always = always colorize output; never = never colorize output", "auto|always|never"), - opt::flagopt("", "pretty", - "Pretty-print the input instead of compiling; - valid types are: `normal` (un-annotated source), - `expanded` (crates expanded), or - `expanded,identified` (fully parenthesized, AST nodes with IDs).", - "TYPE"), - opt::flagopt("", "unpretty", - "Present the input source, unstable (and less-pretty) variants; - valid types are any of the types for `--pretty`, as well as: - `flowgraph=` (graphviz formatted flowgraph for node), - `everybody_loops` (all function bodies replaced with `loop {}`), - `hir` (the HIR), `hir,identified`, or - `hir,typed` (HIR with types for each node).", - "TYPE"), + opt::opt("", "pretty", + "Pretty-print the input instead of compiling; + valid types are: `normal` (un-annotated source), + `expanded` (crates expanded), or + `expanded,identified` (fully parenthesized, AST nodes with IDs).", + "TYPE"), + opt::opt("", "unpretty", + "Present the input source, unstable (and less-pretty) variants; + valid types are any of the types for `--pretty`, as well as: + `flowgraph=` (graphviz formatted flowgraph for node), + `everybody_loops` (all function bodies replaced with `loop {}`), + `hir` (the HIR), `hir,identified`, or + `hir,typed` (HIR with types for each node).", + "TYPE"), ]); opts } @@ -1485,43 +1517,38 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) } let mut cg = build_codegen_options(matches, error_format); + let mut codegen_units = cg.codegen_units; // Issue #30063: if user requests llvm-related output to one // particular path, disable codegen-units. - if matches.opt_present("o") && cg.codegen_units != 1 { - let incompatible: Vec<_> = output_types.iter() - .map(|ot_path| ot_path.0) - .filter(|ot| { - !ot.is_compatible_with_codegen_units_and_single_output_file() - }).collect(); - if !incompatible.is_empty() { - for ot in &incompatible { - early_warn(error_format, &format!("--emit={} with -o incompatible with \ - -C codegen-units=N for N > 1", - ot.shorthand())); + let incompatible: Vec<_> = output_types.iter() + .map(|ot_path| ot_path.0) + .filter(|ot| { + !ot.is_compatible_with_codegen_units_and_single_output_file() + }) + .map(|ot| ot.shorthand()) + .collect(); + if !incompatible.is_empty() { + match codegen_units { + Some(n) if n > 1 => { + if matches.opt_present("o") { + for ot in &incompatible { + early_warn(error_format, &format!("--emit={} with -o incompatible with \ + -C codegen-units=N for N > 1", + ot)); + } + early_warn(error_format, "resetting to default -C codegen-units=1"); + codegen_units = Some(1); + } } - early_warn(error_format, "resetting to default -C codegen-units=1"); - cg.codegen_units = 1; + _ => codegen_units = Some(1), } } - if cg.codegen_units < 1 { + if codegen_units == Some(0) { early_error(error_format, "Value for codegen units must be a positive nonzero integer"); } - // It's possible that we have `codegen_units > 1` but only one item in - // `trans.modules`. We could theoretically proceed and do LTO in that - // case, but it would be confusing to have the validity of - // `-Z lto -C codegen-units=2` depend on details of the crate being - // compiled, so we complain regardless. - if cg.lto && cg.codegen_units > 1 { - // This case is impossible to handle because LTO expects to be able - // to combine the entire crate and all its dependencies into a - // single compilation unit, but each codegen unit is in a separate - // LLVM context, so they can't easily be combined. - early_error(error_format, "can't perform LTO when using multiple codegen units"); - } - if cg.lto && debugging_opts.incremental.is_some() { early_error(error_format, "can't perform LTO when compiling incrementally"); } @@ -1642,6 +1669,7 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) "target-features" => PrintRequest::TargetFeatures, "relocation-models" => PrintRequest::RelocationModels, "code-models" => PrintRequest::CodeModels, + "native-static-libs" => PrintRequest::NativeStaticLibs, "target-spec-json" => { if nightly_options::is_unstable_enabled(matches) { PrintRequest::TargetSpec @@ -1683,6 +1711,48 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) let incremental = debugging_opts.incremental.as_ref().map(|m| PathBuf::from(m)); + let codegen_units = codegen_units.unwrap_or_else(|| { + match opt_level { + // If we're compiling at `-O0` then default to 16 codegen units. + // The number here shouldn't matter too too much as debug mode + // builds don't rely on performance at all, meaning that lost + // opportunities for inlining through multiple codegen units is + // a non-issue. + // + // Note that the high number here doesn't mean that we'll be + // spawning a large number of threads in parallel. The backend + // of rustc contains global rate limiting through the + // `jobserver` crate so we'll never overload the system with too + // much work, but rather we'll only be optimizing when we're + // otherwise cooperating with other instances of rustc. + // + // Rather the high number here means that we should be able to + // keep a lot of idle cpus busy. By ensuring that no codegen + // unit takes *too* long to build we'll be guaranteed that all + // cpus will finish pretty closely to one another and we should + // make relatively optimal use of system resources + // + // Another note worth mentioning here, however, is that this number + // isn't *too* high. When codegen units are increased that means we + // currently have to codegen `#[inline]` functions into each codegen + // unit, which means the more codegen units we're using the more we + // may be generating. In other words, increasing codegen units may + // increase the overall work the compiler does. If we don't have + // enough cores to make up for this loss then increasing the number + // of codegen units could become an overall loss! + // + // As a result we choose a hopefully conservative value 16, which + // should be more than the number of cpus of most hardware compiling + // Rust but also not too much for 2-4 core machines to have too much + // loss of compile time. + OptLevel::No => 16, + + // All other optimization levels default use one codegen unit, + // the historical default in Rust for a Long Time. + _ => 1, + } + }); + (Options { crate_types, optimize: opt_level, @@ -1707,6 +1777,7 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) unstable_features: UnstableFeatures::from_environment(), debug_assertions, actually_rustdoc: false, + codegen_units, }, cfg) } @@ -1945,17 +2016,15 @@ mod dep_tracking { #[cfg(test)] mod tests { - use dep_graph::DepGraph; use errors; use getopts; use lint; - use middle::cstore::{self, DummyCrateStore}; + use middle::cstore; use session::config::{build_configuration, build_session_options_and_crate_config}; use session::build_session; use std::collections::{BTreeMap, BTreeSet}; use std::iter::FromIterator; use std::path::PathBuf; - use std::rc::Rc; use super::{OutputType, OutputTypes, Externs}; use rustc_back::{PanicStrategy, RelroLevel}; use syntax::symbol::Symbol; @@ -1979,7 +2048,6 @@ mod tests { // When the user supplies --test we should implicitly supply --cfg test #[test] fn test_switch_implies_cfg_test() { - let dep_graph = DepGraph::new(false); let matches = &match optgroups().parse(&["--test".to_string()]) { Ok(m) => m, @@ -1987,7 +2055,7 @@ mod tests { }; let registry = errors::registry::Registry::new(&[]); let (sessopts, cfg) = build_session_options_and_crate_config(matches); - let sess = build_session(sessopts, &dep_graph, None, registry, Rc::new(DummyCrateStore)); + let sess = build_session(sessopts, None, registry); let cfg = build_configuration(&sess, cfg); assert!(cfg.contains(&(Symbol::intern("test"), None))); } @@ -1996,7 +2064,6 @@ mod tests { // another --cfg test #[test] fn test_switch_implies_cfg_test_unless_cfg_test() { - let dep_graph = DepGraph::new(false); let matches = &match optgroups().parse(&["--test".to_string(), "--cfg=test".to_string()]) { Ok(m) => m, @@ -2006,8 +2073,7 @@ mod tests { }; let registry = errors::registry::Registry::new(&[]); let (sessopts, cfg) = build_session_options_and_crate_config(matches); - let sess = build_session(sessopts, &dep_graph, None, registry, - Rc::new(DummyCrateStore)); + let sess = build_session(sessopts, None, registry); let cfg = build_configuration(&sess, cfg); let mut test_items = cfg.iter().filter(|&&(name, _)| name == "test"); assert!(test_items.next().is_some()); @@ -2016,15 +2082,13 @@ mod tests { #[test] fn test_can_print_warnings() { - let dep_graph = DepGraph::new(false); { let matches = optgroups().parse(&[ "-Awarnings".to_string() ]).unwrap(); let registry = errors::registry::Registry::new(&[]); let (sessopts, _) = build_session_options_and_crate_config(&matches); - let sess = build_session(sessopts, &dep_graph, None, registry, - Rc::new(DummyCrateStore)); + let sess = build_session(sessopts, None, registry); assert!(!sess.diagnostic().can_emit_warnings); } @@ -2035,8 +2099,7 @@ mod tests { ]).unwrap(); let registry = errors::registry::Registry::new(&[]); let (sessopts, _) = build_session_options_and_crate_config(&matches); - let sess = build_session(sessopts, &dep_graph, None, registry, - Rc::new(DummyCrateStore)); + let sess = build_session(sessopts, None, registry); assert!(sess.diagnostic().can_emit_warnings); } @@ -2046,8 +2109,7 @@ mod tests { ]).unwrap(); let registry = errors::registry::Registry::new(&[]); let (sessopts, _) = build_session_options_and_crate_config(&matches); - let sess = build_session(sessopts, &dep_graph, None, registry, - Rc::new(DummyCrateStore)); + let sess = build_session(sessopts, None, registry); assert!(sess.diagnostic().can_emit_warnings); } } @@ -2419,7 +2481,7 @@ mod tests { opts.cg.extra_filename = String::from("extra-filename"); assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); - opts.cg.codegen_units = 42; + opts.cg.codegen_units = Some(42); assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.cg.remark = super::SomePasses(vec![String::from("pass1"), diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index 7ff9d202c1..bd6e5eb67c 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -11,11 +11,9 @@ pub use self::code_stats::{CodeStats, DataTypeKind, FieldInfo}; pub use self::code_stats::{SizeKind, TypeSizeInfo, VariantInfo}; -use dep_graph::DepGraph; use hir::def_id::{CrateNum, DefIndex}; use lint; -use middle::cstore::CrateStore; use middle::allocator::AllocatorKind; use middle::dependency_format; use session::search_paths::PathKind; @@ -59,11 +57,9 @@ pub mod search_paths; // Represents the data associated with a compilation // session for a single crate. pub struct Session { - pub dep_graph: DepGraph, pub target: config::Config, pub host: Target, pub opts: config::Options, - pub cstore: Rc, pub parse_sess: ParseSess, // For a library crate, this is always none pub entry_fn: RefCell>, @@ -72,8 +68,7 @@ pub struct Session { pub derive_registrar_fn: Cell>, pub default_sysroot: Option, // The name of the root source file of the crate, in the local file system. - // The path is always expected to be absolute. `None` means that there is no - // source file. + // `None` means that there is no source file. pub local_crate_source_file: Option, // The directory the compiler has been executed in plus a flag indicating // if the value stored here has been affected by path remapping. @@ -93,7 +88,7 @@ pub struct Session { // forms a unique global identifier for the crate. It is used to allow // multiple crates with the same name to coexist. See the // trans::back::symbol_names module for more information. - pub crate_disambiguator: RefCell, + pub crate_disambiguator: RefCell>, pub features: RefCell, /// The maximum recursion limit for potentially infinitely recursive @@ -171,7 +166,10 @@ enum DiagnosticBuilderMethod { impl Session { pub fn local_crate_disambiguator(&self) -> Symbol { - *self.crate_disambiguator.borrow() + match *self.crate_disambiguator.borrow() { + Some(sym) => sym, + None => bug!("accessing disambiguator before initialization"), + } } pub fn struct_span_warn<'a, S: Into>(&'a self, sp: S, @@ -410,6 +408,11 @@ impl Session { pub fn print_llvm_passes(&self) -> bool { self.opts.debugging_opts.print_llvm_passes } + pub fn emit_end_regions(&self) -> bool { + self.opts.debugging_opts.emit_end_regions || + (self.opts.debugging_opts.mir_emit_validate > 0) || + self.opts.debugging_opts.borrowck_mir + } pub fn lto(&self) -> bool { self.opts.cg.lto } @@ -499,9 +502,29 @@ impl Session { kind) } + pub fn set_incr_session_load_dep_graph(&self, load: bool) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + match *incr_comp_session { + IncrCompSession::Active { ref mut load_dep_graph, .. } => { + *load_dep_graph = load; + } + _ => {} + } + } + + pub fn incr_session_load_dep_graph(&self) -> bool { + let incr_comp_session = self.incr_comp_session.borrow(); + match *incr_comp_session { + IncrCompSession::Active { load_dep_graph, .. } => load_dep_graph, + _ => false, + } + } + pub fn init_incr_comp_session(&self, session_dir: PathBuf, - lock_file: flock::Lock) { + lock_file: flock::Lock, + load_dep_graph: bool) { let mut incr_comp_session = self.incr_comp_session.borrow_mut(); if let IncrCompSession::NotInitialized = *incr_comp_session { } else { @@ -511,6 +534,7 @@ impl Session { *incr_comp_session = IncrCompSession::Active { session_directory: session_dir, lock_file, + load_dep_graph, }; } @@ -615,27 +639,21 @@ impl Session { } pub fn build_session(sopts: config::Options, - dep_graph: &DepGraph, local_crate_source_file: Option, - registry: errors::registry::Registry, - cstore: Rc) + registry: errors::registry::Registry) -> Session { let file_path_mapping = sopts.file_path_mapping(); build_session_with_codemap(sopts, - dep_graph, local_crate_source_file, registry, - cstore, Rc::new(codemap::CodeMap::new(file_path_mapping)), None) } pub fn build_session_with_codemap(sopts: config::Options, - dep_graph: &DepGraph, local_crate_source_file: Option, registry: errors::registry::Registry, - cstore: Rc, codemap: Rc, emitter_dest: Option>) -> Session { @@ -677,19 +695,15 @@ pub fn build_session_with_codemap(sopts: config::Options, emitter); build_session_(sopts, - dep_graph, local_crate_source_file, diagnostic_handler, - codemap, - cstore) + codemap) } pub fn build_session_(sopts: config::Options, - dep_graph: &DepGraph, local_crate_source_file: Option, span_diagnostic: errors::Handler, - codemap: Rc, - cstore: Rc) + codemap: Rc) -> Session { let host = match Target::search(config::host_triple()) { Ok(t) => t, @@ -707,7 +721,6 @@ pub fn build_session_(sopts: config::Options, let file_path_mapping = sopts.file_path_mapping(); - // Make the path absolute, if necessary let local_crate_source_file = local_crate_source_file.map(|path| { file_path_mapping.map_prefix(path.to_string_lossy().into_owned()).0 }); @@ -722,11 +735,9 @@ pub fn build_session_(sopts: config::Options, let working_dir = file_path_mapping.map_prefix(working_dir); let sess = Session { - dep_graph: dep_graph.clone(), target: target_cfg, host, opts: sopts, - cstore, parse_sess: p_s, // For a library crate, this is always none entry_fn: RefCell::new(None), @@ -743,7 +754,7 @@ pub fn build_session_(sopts: config::Options, plugin_attributes: RefCell::new(Vec::new()), crate_types: RefCell::new(Vec::new()), dependency_formats: RefCell::new(FxHashMap()), - crate_disambiguator: RefCell::new(Symbol::intern("")), + crate_disambiguator: RefCell::new(None), features: RefCell::new(feature_gate::Features::new()), recursion_limit: Cell::new(64), type_length_limit: Cell::new(1048576), @@ -801,6 +812,7 @@ pub enum IncrCompSession { Active { session_directory: PathBuf, lock_file: flock::Lock, + load_dep_graph: bool, }, // This is the state after the session directory has been finalized. In this // state, the contents of the directory must not be modified any more. diff --git a/src/librustc/traits/coherence.rs b/src/librustc/traits/coherence.rs index 34df447a11..f3682f8d35 100644 --- a/src/librustc/traits/coherence.rs +++ b/src/librustc/traits/coherence.rs @@ -13,6 +13,7 @@ use hir::def_id::{DefId, LOCAL_CRATE}; use syntax_pos::DUMMY_SP; use traits::{self, Normalized, SelectionContext, Obligation, ObligationCause, Reveal}; +use traits::select::IntercrateAmbiguityCause; use ty::{self, Ty, TyCtxt}; use ty::subst::Subst; @@ -21,12 +22,17 @@ use infer::{InferCtxt, InferOk}; #[derive(Copy, Clone)] struct InferIsLocal(bool); +pub struct OverlapResult<'tcx> { + pub impl_header: ty::ImplHeader<'tcx>, + pub intercrate_ambiguity_causes: Vec, +} + /// If there are types that satisfy both impls, returns a suitably-freshened /// `ImplHeader` with those types substituted pub fn overlapping_impls<'cx, 'gcx, 'tcx>(infcx: &InferCtxt<'cx, 'gcx, 'tcx>, impl1_def_id: DefId, impl2_def_id: DefId) - -> Option> + -> Option> { debug!("impl_can_satisfy(\ impl1_def_id={:?}, \ @@ -65,7 +71,7 @@ fn with_fresh_ty_vars<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, ' fn overlap<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, a_def_id: DefId, b_def_id: DefId) - -> Option> + -> Option> { debug!("overlap(a_def_id={:?}, b_def_id={:?})", a_def_id, @@ -113,11 +119,14 @@ fn overlap<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, return None } - Some(selcx.infcx().resolve_type_vars_if_possible(&a_impl_header)) + Some(OverlapResult { + impl_header: selcx.infcx().resolve_type_vars_if_possible(&a_impl_header), + intercrate_ambiguity_causes: selcx.intercrate_ambiguity_causes().to_vec(), + }) } pub fn trait_ref_is_knowable<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, - trait_ref: &ty::TraitRef<'tcx>) -> bool + trait_ref: ty::TraitRef<'tcx>) -> bool { debug!("trait_ref_is_knowable(trait_ref={:?})", trait_ref); @@ -131,10 +140,7 @@ pub fn trait_ref_is_knowable<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, // if the trait is not marked fundamental, then it's always possible that // an ancestor crate will impl this in the future, if they haven't // already - if - trait_ref.def_id.krate != LOCAL_CRATE && - !tcx.has_attr(trait_ref.def_id, "fundamental") - { + if !trait_ref_is_local_or_fundamental(tcx, trait_ref) { debug!("trait_ref_is_knowable: trait is neither local nor fundamental"); return false; } @@ -148,6 +154,12 @@ pub fn trait_ref_is_knowable<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err() } +pub fn trait_ref_is_local_or_fundamental<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_ref: ty::TraitRef<'tcx>) + -> bool { + trait_ref.def_id.krate == LOCAL_CRATE || tcx.has_attr(trait_ref.def_id, "fundamental") +} + pub enum OrphanCheckErr<'tcx> { NoLocalInputType, UncoveredTy(Ty<'tcx>), @@ -177,11 +189,11 @@ pub fn orphan_check<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, return Ok(()); } - orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false)) + orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)) } fn orphan_check_trait_ref<'tcx>(tcx: TyCtxt, - trait_ref: &ty::TraitRef<'tcx>, + trait_ref: ty::TraitRef<'tcx>, infer_is_local: InferIsLocal) -> Result<(), OrphanCheckErr<'tcx>> { @@ -301,7 +313,7 @@ fn ty_is_local_constructor(ty: Ty, infer_is_local: InferIsLocal)-> bool { true } - ty::TyClosure(..) | ty::TyAnon(..) => { + ty::TyClosure(..) | ty::TyGenerator(..) | ty::TyAnon(..) => { bug!("ty_is_local invoked on unexpected type: {:?}", ty) } } diff --git a/src/librustc/traits/error_reporting.rs b/src/librustc/traits/error_reporting.rs index 3e64c8c7de..c7c8141f4f 100644 --- a/src/librustc/traits/error_reporting.rs +++ b/src/librustc/traits/error_reporting.rs @@ -15,8 +15,11 @@ use super::{ Obligation, ObligationCause, ObligationCauseCode, + OnUnimplementedDirective, + OnUnimplementedNote, OutputTypeParameterMismatch, TraitNotObjectSafe, + ConstEvalFailure, PredicateObligation, Reveal, SelectionContext, @@ -25,16 +28,16 @@ use super::{ }; use errors::DiagnosticBuilder; -use fmt_macros::{Parser, Piece, Position}; use hir; use hir::def_id::DefId; use infer::{self, InferCtxt}; use infer::type_variable::TypeVariableOrigin; +use middle::const_val; use rustc::lint::builtin::EXTRA_REQUIREMENT_IN_IMPL; use std::fmt; use syntax::ast; use ty::{self, AdtKind, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; -use ty::error::{ExpectedFound, TypeError}; +use ty::error::ExpectedFound; use ty::fast_reject; use ty::fold::TypeFolder; use ty::subst::Subst; @@ -251,6 +254,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { AdtKind::Union => Some(16), AdtKind::Enum => Some(17), }, + ty::TyGenerator(..) => Some(18), ty::TyInfer(..) | ty::TyError => None } } @@ -315,77 +319,56 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } - fn on_unimplemented_note(&self, - trait_ref: ty::PolyTraitRef<'tcx>, - obligation: &PredicateObligation<'tcx>) -> Option { + fn on_unimplemented_note( + &self, + trait_ref: ty::PolyTraitRef<'tcx>, + obligation: &PredicateObligation<'tcx>) -> + OnUnimplementedNote + { let def_id = self.impl_similar_to(trait_ref, obligation) .unwrap_or(trait_ref.def_id()); - let trait_ref = trait_ref.skip_binder(); + let trait_ref = *trait_ref.skip_binder(); + + let desugaring; + let method; + let mut flags = vec![]; + let direct = match obligation.cause.code { + ObligationCauseCode::BuiltinDerivedObligation(..) | + ObligationCauseCode::ImplDerivedObligation(..) => false, + _ => true + }; + if direct { + // this is a "direct", user-specified, rather than derived, + // obligation. + flags.push(("direct", None)); + } - let span = obligation.cause.span; - let mut report = None; - if let Some(item) = self.tcx - .get_attrs(def_id) - .into_iter() - .filter(|a| a.check_name("rustc_on_unimplemented")) - .next() - { - let name = self.tcx.item_name(def_id).as_str(); - let err_sp = item.span.substitute_dummy(span); - let trait_str = self.tcx.item_path_str(trait_ref.def_id); - if let Some(istring) = item.value_str() { - let istring = &*istring.as_str(); - let generics = self.tcx.generics_of(trait_ref.def_id); - let generic_map = generics.types.iter().map(|param| { - (param.name.as_str().to_string(), - trait_ref.substs.type_for_def(param).to_string()) - }).collect::>(); - let parser = Parser::new(istring); - let mut errored = false; - let err: String = parser.filter_map(|p| { - match p { - Piece::String(s) => Some(s), - Piece::NextArgument(a) => match a.position { - Position::ArgumentNamed(s) => match generic_map.get(s) { - Some(val) => Some(val), - None if s == name => { - Some(&trait_str) - } - None => { - span_err!(self.tcx.sess, err_sp, E0272, - "the #[rustc_on_unimplemented] attribute on trait \ - definition for {} refers to non-existent type \ - parameter {}", - trait_str, s); - errored = true; - None - } - }, - _ => { - span_err!(self.tcx.sess, err_sp, E0273, - "the #[rustc_on_unimplemented] attribute on trait \ - definition for {} must have named format arguments, eg \ - `#[rustc_on_unimplemented = \"foo {{T}}\"]`", - trait_str); - errored = true; - None - } - } - } - }).collect(); - // Report only if the format string checks out - if !errored { - report = Some(err); - } - } else { - span_err!(self.tcx.sess, err_sp, E0274, - "the #[rustc_on_unimplemented] attribute on \ - trait definition for {} must have a value, \ - eg `#[rustc_on_unimplemented = \"foo\"]`", - trait_str); + if let ObligationCauseCode::ItemObligation(item) = obligation.cause.code { + // FIXME: maybe also have some way of handling methods + // from other traits? That would require name resolution, + // which we might want to be some sort of hygienic. + // + // Currently I'm leaving it for what I need for `try`. + if self.tcx.trait_of_item(item) == Some(trait_ref.def_id) { + method = self.tcx.item_name(item); + flags.push(("from_method", None)); + flags.push(("from_method", Some(&*method))); } } - report + + if let Some(k) = obligation.cause.span.compiler_desugaring_kind() { + desugaring = k.as_symbol().as_str(); + flags.push(("from_desugaring", None)); + flags.push(("from_desugaring", Some(&*desugaring))); + } + + if let Ok(Some(command)) = OnUnimplementedDirective::of_item( + self.tcx, trait_ref.def_id, def_id + ) { + command.evaluate(self.tcx, trait_ref, &flags) + } else { + OnUnimplementedNote::empty() + } } fn find_similar_impl_candidates(&self, @@ -576,17 +559,23 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let (post_message, pre_message) = self.get_parent_trait_ref(&obligation.cause.code) .map(|t| (format!(" in `{}`", t), format!("within `{}`, ", t))) - .unwrap_or((String::new(), String::new())); + .unwrap_or((String::new(), String::new())); + + let OnUnimplementedNote { message, label } + = self.on_unimplemented_note(trait_ref, obligation); + let have_alt_message = message.is_some() || label.is_some(); + let mut err = struct_span_err!( self.tcx.sess, span, E0277, - "the trait bound `{}` is not satisfied{}", - trait_ref.to_predicate(), - post_message); + "{}", + message.unwrap_or_else(|| { + format!("the trait bound `{}` is not satisfied{}", + trait_ref.to_predicate(), post_message) + })); - let unimplemented_note = self.on_unimplemented_note(trait_ref, obligation); - if let Some(ref s) = unimplemented_note { + if let Some(ref s) = label { // If it has a custom "#[rustc_on_unimplemented]" // error message, let's display it as the label! err.span_label(span, s.as_str()); @@ -614,7 +603,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // which is somewhat confusing. err.help(&format!("consider adding a `where {}` bound", trait_ref.to_predicate())); - } else if unimplemented_note.is_none() { + } else if !have_alt_message { // Can't show anything else useful, try to find similar impls. let impl_candidates = self.find_similar_impl_candidates(trait_ref); self.report_similar_impl_candidates(impl_candidates, &mut err); @@ -711,10 +700,18 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // (which may fail). span_bug!(span, "WF predicate not satisfied for {:?}", ty); } + + ty::Predicate::ConstEvaluatable(..) => { + // Errors for `ConstEvaluatable` predicates show up as + // `SelectionError::ConstEvalFailure`, + // not `Unimplemented`. + span_bug!(span, + "const-evaluatable requirement gave wrong error: `{:?}`", obligation) + } } } - OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => { + OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, _) => { let expected_trait_ref = self.resolve_type_vars_if_possible(&*expected_trait_ref); let actual_trait_ref = self.resolve_type_vars_if_possible(&*actual_trait_ref); if actual_trait_ref.self_ty().references_error() { @@ -725,48 +722,31 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.hir.span_if_local(did) }); - if let &TypeError::TupleSize(ref expected_found) = e { - // Expected `|x| { }`, found `|x, y| { }` - self.report_arg_count_mismatch(span, - found_span, - expected_found.expected, - expected_found.found, - expected_trait_ty.is_closure()) - } else if let &TypeError::Sorts(ref expected_found) = e { - let expected = if let ty::TyTuple(tys, _) = expected_found.expected.sty { - tys.len() - } else { - 1 + let self_ty_count = + match expected_trait_ref.skip_binder().substs.type_at(1).sty { + ty::TyTuple(ref tys, _) => tys.len(), + _ => 1, }; - let found = if let ty::TyTuple(tys, _) = expected_found.found.sty { - tys.len() - } else { - 1 + let arg_ty_count = + match actual_trait_ref.skip_binder().substs.type_at(1).sty { + ty::TyTuple(ref tys, _) => tys.len(), + _ => 1, }; - - if expected != found { - // Expected `|| { }`, found `|x, y| { }` - // Expected `fn(x) -> ()`, found `|| { }` - self.report_arg_count_mismatch(span, - found_span, - expected, - found, - expected_trait_ty.is_closure()) - } else { - self.report_type_argument_mismatch(span, - found_span, - expected_trait_ty, - expected_trait_ref, - actual_trait_ref, - e) - } + if self_ty_count == arg_ty_count { + self.report_closure_arg_mismatch(span, + found_span, + expected_trait_ref, + actual_trait_ref) } else { - self.report_type_argument_mismatch(span, - found_span, - expected_trait_ty, - expected_trait_ref, - actual_trait_ref, - e) + // Expected `|| { }`, found `|x, y| { }` + // Expected `fn(x) -> ()`, found `|| { }` + self.report_arg_count_mismatch( + span, + found_span, + arg_ty_count, + self_ty_count, + expected_trait_ty.is_closure() + ) } } @@ -775,36 +755,18 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.report_object_safety_error(span, did, violations) } + + ConstEvalFailure(ref err) => { + if let const_val::ErrKind::TypeckError = err.kind { + return; + } + err.struct_error(self.tcx, span, "constant expression") + } }; self.note_obligation_cause(&mut err, obligation); err.emit(); } - fn report_type_argument_mismatch(&self, - span: Span, - found_span: Option, - expected_ty: Ty<'tcx>, - expected_ref: ty::PolyTraitRef<'tcx>, - found_ref: ty::PolyTraitRef<'tcx>, - type_error: &TypeError<'tcx>) - -> DiagnosticBuilder<'tcx> - { - let mut err = struct_span_err!(self.tcx.sess, span, E0281, - "type mismatch: `{}` implements the trait `{}`, but the trait `{}` is required", - expected_ty, - expected_ref, - found_ref); - - err.span_label(span, format!("{}", type_error)); - - if let Some(sp) = found_span { - err.span_label(span, format!("requires `{}`", found_ref)); - err.span_label(sp, format!("implements `{}`", expected_ref)); - } - - err - } - fn report_arg_count_mismatch(&self, span: Span, found_span: Option, @@ -833,6 +795,57 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } err } + + fn report_closure_arg_mismatch(&self, + span: Span, + found_span: Option, + expected_ref: ty::PolyTraitRef<'tcx>, + found: ty::PolyTraitRef<'tcx>) + -> DiagnosticBuilder<'tcx> + { + fn build_fn_sig_string<'a, 'gcx, 'tcx>(tcx: ty::TyCtxt<'a, 'gcx, 'tcx>, + trait_ref: &ty::TraitRef<'tcx>) -> String { + let inputs = trait_ref.substs.type_at(1); + let sig = if let ty::TyTuple(inputs, _) = inputs.sty { + tcx.mk_fn_sig( + inputs.iter().map(|&x| x), + tcx.mk_infer(ty::TyVar(ty::TyVid { index: 0 })), + false, + hir::Unsafety::Normal, + ::syntax::abi::Abi::Rust + ) + } else { + tcx.mk_fn_sig( + ::std::iter::once(inputs), + tcx.mk_infer(ty::TyVar(ty::TyVid { index: 0 })), + false, + hir::Unsafety::Normal, + ::syntax::abi::Abi::Rust + ) + }; + format!("{}", ty::Binder(sig)) + } + + let argument_is_closure = expected_ref.skip_binder().substs.type_at(0).is_closure(); + let mut err = struct_span_err!(self.tcx.sess, span, E0631, + "type mismatch in {} arguments", + if argument_is_closure { "closure" } else { "function" }); + + let found_str = format!( + "expected signature of `{}`", + build_fn_sig_string(self.tcx, found.skip_binder()) + ); + err.span_label(span, found_str); + + let found_span = found_span.unwrap_or(span); + let expected_str = format!( + "found signature of `{}`", + build_fn_sig_string(self.tcx, expected_ref.skip_binder()) + ); + err.span_label(found_span, expected_str); + + err + } } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { @@ -932,7 +945,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // anyway. In that case, why inundate the user. if !self.tcx.sess.has_errors() { if - self.tcx.lang_items.sized_trait() + self.tcx.lang_items().sized_trait() .map_or(false, |sized_id| sized_id == trait_ref.def_id()) { self.need_type_info(body_id, span, self_ty); diff --git a/src/librustc/traits/fulfill.rs b/src/librustc/traits/fulfill.rs index fbc393cbd9..cc2506d1af 100644 --- a/src/librustc/traits/fulfill.rs +++ b/src/librustc/traits/fulfill.rs @@ -25,7 +25,7 @@ use super::{FulfillmentError, FulfillmentErrorCode}; use super::{ObligationCause, PredicateObligation, Obligation}; use super::project; use super::select::SelectionContext; -use super::Unimplemented; +use super::{Unimplemented, ConstEvalFailure}; impl<'tcx> ForestObligation for PendingPredicateObligation<'tcx> { type Predicate = ty::Predicate<'tcx>; @@ -540,6 +540,29 @@ fn process_predicate<'a, 'gcx, 'tcx>( } } } + + ty::Predicate::ConstEvaluatable(def_id, substs) => { + match selcx.tcx().lift_to_global(&obligation.param_env) { + None => { + Ok(None) + } + Some(param_env) => { + match selcx.tcx().lift_to_global(&substs) { + None => { + pending_obligation.stalled_on = substs.types().collect(); + Ok(None) + } + Some(substs) => { + match selcx.tcx().at(obligation.cause.span) + .const_eval(param_env.and((def_id, substs))) { + Ok(_) => Ok(Some(vec![])), + Err(e) => Err(CodeSelectionError(ConstEvalFailure(e))) + } + } + } + } + } + } } } diff --git a/src/librustc/traits/mod.rs b/src/librustc/traits/mod.rs index 5bae82c1a3..a1817f1810 100644 --- a/src/librustc/traits/mod.rs +++ b/src/librustc/traits/mod.rs @@ -17,7 +17,8 @@ pub use self::ObligationCauseCode::*; use hir; use hir::def_id::DefId; -use middle::region::RegionMaps; +use middle::const_val::ConstEvalErr; +use middle::region; use middle::free_region::FreeRegionMap; use ty::subst::Substs; use ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable, ToPredicate}; @@ -28,17 +29,17 @@ use std::rc::Rc; use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; -pub use self::coherence::orphan_check; -pub use self::coherence::overlapping_impls; -pub use self::coherence::OrphanCheckErr; +pub use self::coherence::{orphan_check, overlapping_impls, OrphanCheckErr, OverlapResult}; pub use self::fulfill::{FulfillmentContext, RegionObligation}; pub use self::project::MismatchedProjectionTypes; pub use self::project::{normalize, normalize_projection_type, Normalized}; pub use self::project::{ProjectionCache, ProjectionCacheSnapshot, Reveal}; pub use self::object_safety::ObjectSafetyViolation; pub use self::object_safety::MethodViolationCode; +pub use self::on_unimplemented::{OnUnimplementedDirective, OnUnimplementedNote}; pub use self::select::{EvaluationCache, SelectionContext, SelectionCache}; -pub use self::specialize::{OverlapError, specialization_graph, specializes, translate_substs}; +pub use self::select::IntercrateAmbiguityCause; +pub use self::specialize::{OverlapError, specialization_graph, translate_substs}; pub use self::specialize::{SpecializesCache, find_associated_item}; pub use self::util::elaborate_predicates; pub use self::util::supertraits; @@ -52,6 +53,7 @@ mod error_reporting; mod fulfill; mod project; mod object_safety; +mod on_unimplemented; mod select; mod specialize; mod structural_impls; @@ -217,6 +219,7 @@ pub enum SelectionError<'tcx> { ty::PolyTraitRef<'tcx>, ty::error::TypeError<'tcx>), TraitNotObjectSafe(DefId), + ConstEvalFailure(ConstEvalErr<'tcx>), } pub struct FulfillmentError<'tcx> { @@ -310,6 +313,9 @@ pub enum Vtable<'tcx, N> { /// Same as above, but for a fn pointer type with the given signature. VtableFnPointer(VtableFnPointerData<'tcx, N>), + + /// Vtable automatically generated for a generator + VtableGenerator(VtableGeneratorData<'tcx, N>), } /// Identifies a particular impl in the source, along with a set of @@ -329,6 +335,15 @@ pub struct VtableImplData<'tcx, N> { pub nested: Vec } +#[derive(Clone, PartialEq, Eq)] +pub struct VtableGeneratorData<'tcx, N> { + pub closure_def_id: DefId, + pub substs: ty::ClosureSubsts<'tcx>, + /// Nested obligations. This can be non-empty if the generator + /// signature contains associated types. + pub nested: Vec +} + #[derive(Clone, PartialEq, Eq)] pub struct VtableClosureData<'tcx, N> { pub closure_def_id: DefId, @@ -366,7 +381,7 @@ pub struct VtableObjectData<'tcx, N> { #[derive(Clone, PartialEq, Eq)] pub struct VtableFnPointerData<'tcx, N> { - pub fn_ty: ty::Ty<'tcx>, + pub fn_ty: Ty<'tcx>, pub nested: Vec } @@ -520,9 +535,9 @@ pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, debug!("normalize_param_env_or_error: normalized predicates={:?}", predicates); - let region_maps = RegionMaps::new(); + let region_scope_tree = region::ScopeTree::default(); let free_regions = FreeRegionMap::new(); - infcx.resolve_regions_and_report_errors(region_context, ®ion_maps, &free_regions); + infcx.resolve_regions_and_report_errors(region_context, ®ion_scope_tree, &free_regions); let predicates = match infcx.fully_resolve(&predicates) { Ok(predicates) => predicates, Err(fixup_err) => { @@ -743,6 +758,7 @@ impl<'tcx, N> Vtable<'tcx, N> { VtableBuiltin(i) => i.nested, VtableDefaultImpl(d) => d.nested, VtableClosure(c) => c.nested, + VtableGenerator(c) => c.nested, VtableObject(d) => d.nested, VtableFnPointer(d) => d.nested, } @@ -754,6 +770,7 @@ impl<'tcx, N> Vtable<'tcx, N> { &mut VtableParam(ref mut n) => n, &mut VtableBuiltin(ref mut i) => &mut i.nested, &mut VtableDefaultImpl(ref mut d) => &mut d.nested, + &mut VtableGenerator(ref mut c) => &mut c.nested, &mut VtableClosure(ref mut c) => &mut c.nested, &mut VtableObject(ref mut d) => &mut d.nested, &mut VtableFnPointer(ref mut d) => &mut d.nested, @@ -784,6 +801,11 @@ impl<'tcx, N> Vtable<'tcx, N> { fn_ty: p.fn_ty, nested: p.nested.into_iter().map(f).collect(), }), + VtableGenerator(c) => VtableGenerator(VtableGeneratorData { + closure_def_id: c.closure_def_id, + substs: c.substs, + nested: c.nested.into_iter().map(f).collect(), + }), VtableClosure(c) => VtableClosure(VtableClosureData { closure_def_id: c.closure_def_id, substs: c.substs, @@ -812,6 +834,7 @@ pub fn provide(providers: &mut ty::maps::Providers) { *providers = ty::maps::Providers { is_object_safe: object_safety::is_object_safe_provider, specialization_graph_of: specialize::specialization_graph_provider, + specializes: specialize::specializes, ..*providers }; } @@ -820,6 +843,7 @@ pub fn provide_extern(providers: &mut ty::maps::Providers) { *providers = ty::maps::Providers { is_object_safe: object_safety::is_object_safe_provider, specialization_graph_of: specialize::specialization_graph_provider, + specializes: specialize::specializes, ..*providers }; } diff --git a/src/librustc/traits/object_safety.rs b/src/librustc/traits/object_safety.rs index 9c04c013c4..1e9816095e 100644 --- a/src/librustc/traits/object_safety.rs +++ b/src/librustc/traits/object_safety.rs @@ -169,7 +169,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::Predicate::RegionOutlives(..) | ty::Predicate::ClosureKind(..) | ty::Predicate::Subtype(..) | - ty::Predicate::Equate(..) => { + ty::Predicate::Equate(..) | + ty::Predicate::ConstEvaluatable(..) => { false } } @@ -181,7 +182,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } fn generics_require_sized_self(self, def_id: DefId) -> bool { - let sized_def_id = match self.lang_items.sized_trait() { + let sized_def_id = match self.lang_items().sized_trait() { Some(def_id) => def_id, None => { return false; /* No Sized trait, can't require it! */ } }; @@ -203,7 +204,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | ty::Predicate::ClosureKind(..) | - ty::Predicate::TypeOutlives(..) => { + ty::Predicate::TypeOutlives(..) | + ty::Predicate::ConstEvaluatable(..) => { false } } diff --git a/src/librustc/traits/on_unimplemented.rs b/src/librustc/traits/on_unimplemented.rs new file mode 100644 index 0000000000..94f6efcad4 --- /dev/null +++ b/src/librustc/traits/on_unimplemented.rs @@ -0,0 +1,307 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use fmt_macros::{Parser, Piece, Position}; + +use hir::def_id::DefId; +use ty::{self, TyCtxt}; +use util::common::ErrorReported; +use util::nodemap::FxHashMap; + +use syntax::ast::{MetaItem, NestedMetaItem}; +use syntax::attr; +use syntax_pos::Span; +use syntax_pos::symbol::InternedString; + +#[derive(Clone, Debug)] +pub struct OnUnimplementedFormatString(InternedString); + +#[derive(Debug)] +pub struct OnUnimplementedDirective { + pub condition: Option, + pub subcommands: Vec, + pub message: Option, + pub label: Option, +} + +pub struct OnUnimplementedNote { + pub message: Option, + pub label: Option, +} + +impl OnUnimplementedNote { + pub fn empty() -> Self { + OnUnimplementedNote { message: None, label: None } + } +} + +fn parse_error(tcx: TyCtxt, span: Span, + message: &str, + label: &str, + note: Option<&str>) + -> ErrorReported +{ + let mut diag = struct_span_err!( + tcx.sess, span, E0232, "{}", message); + diag.span_label(span, label); + if let Some(note) = note { + diag.note(note); + } + diag.emit(); + ErrorReported +} + +impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { + pub fn parse(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_def_id: DefId, + items: &[NestedMetaItem], + span: Span, + is_root: bool) + -> Result + { + let mut errored = false; + let mut item_iter = items.iter(); + + let condition = if is_root { + None + } else { + let cond = item_iter.next().ok_or_else(|| { + parse_error(tcx, span, + "empty `on`-clause in `#[rustc_on_unimplemented]`", + "empty on-clause here", + None) + })?.meta_item().ok_or_else(|| { + parse_error(tcx, span, + "invalid `on`-clause in `#[rustc_on_unimplemented]`", + "invalid on-clause here", + None) + })?; + attr::eval_condition(cond, &tcx.sess.parse_sess, &mut |_| true); + Some(cond.clone()) + }; + + let mut message = None; + let mut label = None; + let mut subcommands = vec![]; + for item in item_iter { + if item.check_name("message") && message.is_none() { + if let Some(message_) = item.value_str() { + message = Some(OnUnimplementedFormatString::try_parse( + tcx, trait_def_id, message_.as_str(), span)?); + continue; + } + } else if item.check_name("label") && label.is_none() { + if let Some(label_) = item.value_str() { + label = Some(OnUnimplementedFormatString::try_parse( + tcx, trait_def_id, label_.as_str(), span)?); + continue; + } + } else if item.check_name("on") && is_root && + message.is_none() && label.is_none() + { + if let Some(items) = item.meta_item_list() { + if let Ok(subcommand) = + Self::parse(tcx, trait_def_id, &items, item.span, false) + { + subcommands.push(subcommand); + } else { + errored = true; + } + continue + } + } + + // nothing found + parse_error(tcx, item.span, + "this attribute must have a valid value", + "expected value here", + Some(r#"eg `#[rustc_on_unimplemented = "foo"]`"#)); + } + + if errored { + Err(ErrorReported) + } else { + Ok(OnUnimplementedDirective { condition, message, label, subcommands }) + } + } + + + pub fn of_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_def_id: DefId, + impl_def_id: DefId) + -> Result, ErrorReported> + { + let attrs = tcx.get_attrs(impl_def_id); + + let attr = if let Some(item) = + attrs.into_iter().find(|a| a.check_name("rustc_on_unimplemented")) + { + item + } else { + return Ok(None); + }; + + let result = if let Some(items) = attr.meta_item_list() { + Self::parse(tcx, trait_def_id, &items, attr.span, true).map(Some) + } else if let Some(value) = attr.value_str() { + Ok(Some(OnUnimplementedDirective { + condition: None, + message: None, + subcommands: vec![], + label: Some(OnUnimplementedFormatString::try_parse( + tcx, trait_def_id, value.as_str(), attr.span)?) + })) + } else { + return Err(parse_error(tcx, attr.span, + "`#[rustc_on_unimplemented]` requires a value", + "value required here", + Some(r#"eg `#[rustc_on_unimplemented = "foo"]`"#))); + }; + debug!("of_item({:?}/{:?}) = {:?}", trait_def_id, impl_def_id, result); + result + } + + pub fn evaluate(&self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_ref: ty::TraitRef<'tcx>, + options: &[(&str, Option<&str>)]) + -> OnUnimplementedNote + { + let mut message = None; + let mut label = None; + info!("evaluate({:?}, trait_ref={:?}, options={:?})", + self, trait_ref, options); + + for command in self.subcommands.iter().chain(Some(self)).rev() { + if let Some(ref condition) = command.condition { + if !attr::eval_condition(condition, &tcx.sess.parse_sess, &mut |c| { + options.contains(&(&c.name().as_str(), + match c.value_str().map(|s| s.as_str()) { + Some(ref s) => Some(s), + None => None + })) + }) { + debug!("evaluate: skipping {:?} due to condition", command); + continue + } + } + debug!("evaluate: {:?} succeeded", command); + if let Some(ref message_) = command.message { + message = Some(message_.clone()); + } + + if let Some(ref label_) = command.label { + label = Some(label_.clone()); + } + } + + OnUnimplementedNote { + label: label.map(|l| l.format(tcx, trait_ref)), + message: message.map(|m| m.format(tcx, trait_ref)) + } + } +} + +impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { + pub fn try_parse(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_def_id: DefId, + from: InternedString, + err_sp: Span) + -> Result + { + let result = OnUnimplementedFormatString(from); + result.verify(tcx, trait_def_id, err_sp)?; + Ok(result) + } + + fn verify(&self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_def_id: DefId, + span: Span) + -> Result<(), ErrorReported> + { + let name = tcx.item_name(trait_def_id); + let generics = tcx.generics_of(trait_def_id); + let parser = Parser::new(&self.0); + let types = &generics.types; + let mut result = Ok(()); + for token in parser { + match token { + Piece::String(_) => (), // Normal string, no need to check it + Piece::NextArgument(a) => match a.position { + // `{Self}` is allowed + Position::ArgumentNamed(s) if s == "Self" => (), + // `{ThisTraitsName}` is allowed + Position::ArgumentNamed(s) if s == name => (), + // So is `{A}` if A is a type parameter + Position::ArgumentNamed(s) => match types.iter().find(|t| { + t.name == s + }) { + Some(_) => (), + None => { + span_err!(tcx.sess, span, E0230, + "there is no type parameter \ + {} on trait {}", + s, name); + result = Err(ErrorReported); + } + }, + // `{:1}` and `{}` are not to be used + Position::ArgumentIs(_) => { + span_err!(tcx.sess, span, E0231, + "only named substitution \ + parameters are allowed"); + result = Err(ErrorReported); + } + } + } + } + + result + } + + pub fn format(&self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_ref: ty::TraitRef<'tcx>) + -> String + { + let name = tcx.item_name(trait_ref.def_id); + let trait_str = tcx.item_path_str(trait_ref.def_id); + let generics = tcx.generics_of(trait_ref.def_id); + let generic_map = generics.types.iter().map(|param| { + (param.name.as_str().to_string(), + trait_ref.substs.type_for_def(param).to_string()) + }).collect::>(); + + let parser = Parser::new(&self.0); + parser.map(|p| { + match p { + Piece::String(s) => s, + Piece::NextArgument(a) => match a.position { + Position::ArgumentNamed(s) => match generic_map.get(s) { + Some(val) => val, + None if s == name => { + &trait_str + } + None => { + bug!("broken on_unimplemented {:?} for {:?}: \ + no argument matching {:?}", + self.0, trait_ref, s) + } + }, + _ => { + bug!("broken on_unimplemented {:?} - bad \ + format arg", self.0) + } + } + } + }).collect() + } +} diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs index 3fb615e29c..0fa1b32ceb 100644 --- a/src/librustc/traits/project.rs +++ b/src/librustc/traits/project.rs @@ -19,6 +19,7 @@ use super::PredicateObligation; use super::SelectionContext; use super::SelectionError; use super::VtableClosureData; +use super::VtableGeneratorData; use super::VtableFnPointerData; use super::VtableImplData; use super::util; @@ -26,10 +27,10 @@ use super::util; use hir::def_id::DefId; use infer::{InferCtxt, InferOk}; use infer::type_variable::TypeVariableOrigin; +use middle::const_val::ConstVal; use rustc_data_structures::snapshot_map::{Snapshot, SnapshotMap}; -use syntax::ast; use syntax::symbol::Symbol; -use ty::subst::Subst; +use ty::subst::{Subst, Substs}; use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder}; use util::common::FN_OUTPUT_NAME; @@ -259,7 +260,7 @@ impl<'a, 'b, 'gcx, 'tcx> AssociatedTypeNormalizer<'a, 'b, 'gcx, 'tcx> { fn fold>(&mut self, value: &T) -> T { let value = self.selcx.infcx().resolve_type_vars_if_possible(value); - if !value.has_projection_types() { + if !value.has_projections() { value.clone() } else { value.fold_with(self) @@ -331,6 +332,39 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, } } } + + fn fold_const(&mut self, constant: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { + if let ConstVal::Unevaluated(def_id, substs) = constant.val { + if substs.needs_infer() { + let identity_substs = Substs::identity_for_item(self.tcx(), def_id); + let data = self.param_env.and((def_id, identity_substs)); + match self.tcx().lift_to_global(&data) { + Some(data) => { + match self.tcx().const_eval(data) { + Ok(evaluated) => { + let evaluated = evaluated.subst(self.tcx(), substs); + return self.fold_const(evaluated); + } + Err(_) => {} + } + } + None => {} + } + } else { + let data = self.param_env.and((def_id, substs)); + match self.tcx().lift_to_global(&data) { + Some(data) => { + match self.tcx().const_eval(data) { + Ok(evaluated) => return self.fold_const(evaluated), + Err(_) => {} + } + } + None => {} + } + } + } + constant + } } #[derive(Clone)] @@ -505,9 +539,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( let obligation = Obligation::with_depth(cause.clone(), depth, param_env, projection_ty); match project_type(selcx, &obligation) { - Ok(ProjectedTy::Progress(Progress { ty: projected_ty, - mut obligations, - cacheable })) => { + Ok(ProjectedTy::Progress(Progress { ty: projected_ty, mut obligations })) => { // if projection succeeded, then what we get out of this // is also non-normalized (consider: it was derived from // an impl, where-clause etc) and hence we must @@ -516,14 +548,12 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( debug!("opt_normalize_projection_type: \ projected_ty={:?} \ depth={} \ - obligations={:?} \ - cacheable={:?}", + obligations={:?}", projected_ty, depth, - obligations, - cacheable); + obligations); - let result = if projected_ty.has_projection_types() { + let result = if projected_ty.has_projections() { let mut normalizer = AssociatedTypeNormalizer::new(selcx, param_env, cause, @@ -548,7 +578,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( }; let cache_value = prune_cache_value_obligations(infcx, &result); - infcx.projection_cache.borrow_mut().insert_ty(cache_key, cache_value, cacheable); + infcx.projection_cache.borrow_mut().insert_ty(cache_key, cache_value); Some(result) } @@ -560,7 +590,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( value: projected_ty, obligations: vec![] }; - infcx.projection_cache.borrow_mut().insert_ty(cache_key, result.clone(), true); + infcx.projection_cache.borrow_mut().insert_ty(cache_key, result.clone()); Some(result) } Err(ProjectionTyError::TooManyCandidates) => { @@ -709,7 +739,6 @@ enum ProjectedTy<'tcx> { struct Progress<'tcx> { ty: Ty<'tcx>, obligations: Vec>, - cacheable: bool, } impl<'tcx> Progress<'tcx> { @@ -717,7 +746,6 @@ impl<'tcx> Progress<'tcx> { Progress { ty: tcx.types.err, obligations: vec![], - cacheable: true } } @@ -987,6 +1015,7 @@ fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>( match vtable { super::VtableClosure(_) | + super::VtableGenerator(_) | super::VtableFnPointer(_) | super::VtableObject(_) => { debug!("assemble_candidates_from_impls: vtable={:?}", @@ -1014,10 +1043,9 @@ fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>( // In either case, we handle this by not adding a // candidate for an impl if it contains a `default` // type. - let item_name = selcx.tcx().associated_item(obligation.predicate.item_def_id).name; let node_item = assoc_ty_def(selcx, impl_data.impl_def_id, - item_name); + obligation.predicate.item_def_id); let is_default = if node_item.node.is_from_trait() { // If true, the impl inherited a `type Foo = Bar` @@ -1146,6 +1174,8 @@ fn confirm_select_candidate<'cx, 'gcx, 'tcx>( match vtable { super::VtableImpl(data) => confirm_impl_candidate(selcx, obligation, data), + super::VtableGenerator(data) => + confirm_generator_candidate(selcx, obligation, data), super::VtableClosure(data) => confirm_closure_candidate(selcx, obligation, data), super::VtableFnPointer(data) => @@ -1228,6 +1258,60 @@ fn confirm_object_candidate<'cx, 'gcx, 'tcx>( confirm_param_env_candidate(selcx, obligation, env_predicate) } +fn confirm_generator_candidate<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + vtable: VtableGeneratorData<'tcx, PredicateObligation<'tcx>>) + -> Progress<'tcx> +{ + let gen_sig = selcx.infcx().generator_sig(vtable.closure_def_id).unwrap() + .subst(selcx.tcx(), vtable.substs.substs); + let Normalized { + value: gen_sig, + obligations + } = normalize_with_depth(selcx, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth+1, + &gen_sig); + + debug!("confirm_generator_candidate: obligation={:?},gen_sig={:?},obligations={:?}", + obligation, + gen_sig, + obligations); + + let tcx = selcx.tcx(); + + let gen_def_id = tcx.lang_items().gen_trait().unwrap(); + + // Note: we unwrap the binder here but re-create it below (1) + let ty::Binder((trait_ref, yield_ty, return_ty)) = + tcx.generator_trait_ref_and_outputs(gen_def_id, + obligation.predicate.self_ty(), + gen_sig); + + let name = tcx.associated_item(obligation.predicate.item_def_id).name; + let ty = if name == Symbol::intern("Return") { + return_ty + } else if name == Symbol::intern("Yield") { + yield_ty + } else { + bug!() + }; + + let predicate = ty::Binder(ty::ProjectionPredicate { // (1) recreate binder here + projection_ty: ty::ProjectionTy { + substs: trait_ref.substs, + item_def_id: obligation.predicate.item_def_id, + }, + ty: ty + }); + + confirm_param_env_candidate(selcx, obligation, predicate) + .with_addl_obligations(vtable.nested) + .with_addl_obligations(obligations) +} + fn confirm_fn_pointer_candidate<'cx, 'gcx, 'tcx>( selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, obligation: &ProjectionTyObligation<'tcx>, @@ -1295,7 +1379,7 @@ fn confirm_callable_candidate<'cx, 'gcx, 'tcx>( fn_sig); // the `Output` associated type is declared on `FnOnce` - let fn_once_def_id = tcx.lang_items.fn_once_trait().unwrap(); + let fn_once_def_id = tcx.lang_items().fn_once_trait().unwrap(); // Note: we unwrap the binder here but re-create it below (1) let ty::Binder((trait_ref, ret_type)) = @@ -1331,7 +1415,6 @@ fn confirm_param_env_candidate<'cx, 'gcx, 'tcx>( Progress { ty: ty_match.value, obligations, - cacheable: ty_match.unconstrained_regions.is_empty(), } } Err(e) => { @@ -1356,8 +1439,7 @@ fn confirm_impl_candidate<'cx, 'gcx, 'tcx>( let tcx = selcx.tcx(); let param_env = obligation.param_env; - let assoc_ty = assoc_ty_def(selcx, impl_def_id, - tcx.associated_item(obligation.predicate.item_def_id).name); + let assoc_ty = assoc_ty_def(selcx, impl_def_id, obligation.predicate.item_def_id); let ty = if !assoc_ty.item.defaultness.has_value() { // This means that the impl is missing a definition for the @@ -1375,7 +1457,6 @@ fn confirm_impl_candidate<'cx, 'gcx, 'tcx>( Progress { ty: ty.subst(tcx, substs), obligations: nested, - cacheable: true } } @@ -1387,10 +1468,11 @@ fn confirm_impl_candidate<'cx, 'gcx, 'tcx>( fn assoc_ty_def<'cx, 'gcx, 'tcx>( selcx: &SelectionContext<'cx, 'gcx, 'tcx>, impl_def_id: DefId, - assoc_ty_name: ast::Name) + assoc_ty_def_id: DefId) -> specialization_graph::NodeItem { let tcx = selcx.tcx(); + let assoc_ty_name = tcx.associated_item(assoc_ty_def_id).name; let trait_def_id = tcx.impl_trait_ref(impl_def_id).unwrap().def_id; let trait_def = tcx.trait_def(trait_def_id); @@ -1402,7 +1484,8 @@ fn assoc_ty_def<'cx, 'gcx, 'tcx>( // cycle error if the specialization graph is currently being built. let impl_node = specialization_graph::Node::Impl(impl_def_id); for item in impl_node.items(tcx) { - if item.kind == ty::AssociatedKind::Type && item.name == assoc_ty_name { + if item.kind == ty::AssociatedKind::Type && + tcx.hygienic_eq(item.name, assoc_ty_name, trait_def_id) { return specialization_graph::NodeItem { node: specialization_graph::Node::Impl(impl_def_id), item, @@ -1412,7 +1495,7 @@ fn assoc_ty_def<'cx, 'gcx, 'tcx>( if let Some(assoc_item) = trait_def .ancestors(tcx, impl_def_id) - .defs(tcx, assoc_ty_name, ty::AssociatedKind::Type) + .defs(tcx, assoc_ty_name, ty::AssociatedKind::Type, trait_def_id) .next() { assoc_item } else { @@ -1537,20 +1620,11 @@ impl<'tcx> ProjectionCache<'tcx> { Ok(()) } - /// Indicates that `key` was normalized to `value`. If `cacheable` is false, - /// then this result is sadly not cacheable (this only occurs in weird - /// buggy cases, like #38714). - fn insert_ty(&mut self, - key: ProjectionCacheKey<'tcx>, - value: NormalizedTy<'tcx>, - cacheable: bool) { + /// Indicates that `key` was normalized to `value`. + fn insert_ty(&mut self, key: ProjectionCacheKey<'tcx>, value: NormalizedTy<'tcx>) { debug!("ProjectionCacheEntry::insert_ty: adding cache entry: key={:?}, value={:?}", key, value); - let fresh_key = if cacheable { - self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value)) - } else { - !self.map.remove(key) - }; + let fresh_key = self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value)); assert!(!fresh_key, "never started projecting `{:?}`", key); } diff --git a/src/librustc/traits/select.rs b/src/librustc/traits/select.rs index 551bfb5db4..00f0672822 100644 --- a/src/librustc/traits/select.rs +++ b/src/librustc/traits/select.rs @@ -24,9 +24,9 @@ use super::{ObjectCastObligation, Obligation}; use super::TraitNotObjectSafe; use super::Selection; use super::SelectionResult; -use super::{VtableBuiltin, VtableImpl, VtableParam, VtableClosure, +use super::{VtableBuiltin, VtableImpl, VtableParam, VtableClosure, VtableGenerator, VtableFnPointer, VtableObject, VtableDefaultImpl}; -use super::{VtableImplData, VtableObjectData, VtableBuiltinData, +use super::{VtableImplData, VtableObjectData, VtableBuiltinData, VtableGeneratorData, VtableClosureData, VtableDefaultImplData, VtableFnPointerData}; use super::util; @@ -36,13 +36,13 @@ use infer; use infer::{InferCtxt, InferOk, TypeFreshener}; use ty::subst::{Kind, Subst, Substs}; use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; -use traits; use ty::fast_reject; use ty::relate::TypeRelation; use middle::lang_items; use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::snapshot_vec::{SnapshotVecDelegate, SnapshotVec}; +use std::iter; use std::cell::RefCell; use std::cmp; use std::fmt; @@ -90,6 +90,45 @@ pub struct SelectionContext<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { intercrate: bool, inferred_obligations: SnapshotVec>, + + intercrate_ambiguity_causes: Vec, +} + +#[derive(Clone)] +pub enum IntercrateAmbiguityCause { + DownstreamCrate { + trait_desc: String, + self_desc: Option, + }, + UpstreamCrateUpdate { + trait_desc: String, + self_desc: Option, + }, +} + +impl IntercrateAmbiguityCause { + /// Emits notes when the overlap is caused by complex intercrate ambiguities. + /// See #23980 for details. + pub fn add_intercrate_ambiguity_hint<'a, 'tcx>(&self, + err: &mut ::errors::DiagnosticBuilder) { + match self { + &IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc } => { + let self_desc = if let &Some(ref ty) = self_desc { + format!(" for type `{}`", ty) + } else { "".to_string() }; + err.note(&format!("downstream crates may implement trait `{}`{}", + trait_desc, self_desc)); + } + &IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc } => { + let self_desc = if let &Some(ref ty) = self_desc { + format!(" for type `{}`", ty) + } else { "".to_string() }; + err.note(&format!("upstream crates may add new impl of trait `{}`{} \ + in future versions", + trait_desc, self_desc)); + } + } + } } // A stack that walks back up the stack frame. @@ -193,9 +232,12 @@ enum SelectionCandidate<'tcx> { ProjectionCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous types - /// generated for a `||` expression. The ty::ClosureKind informs the - /// confirmation step what ClosureKind obligation to emit. - ClosureCandidate(/* closure */ DefId, ty::ClosureSubsts<'tcx>, ty::ClosureKind), + /// generated for a `||` expression. + ClosureCandidate, + + /// Implementation of a `Generator` trait by one of the anonymous types + /// generated for a generator. + GeneratorCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous /// types generated for a fn pointer type (e.g., `fn(int)->int`) @@ -224,15 +266,12 @@ impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> { ObjectCandidate => ObjectCandidate, BuiltinObjectCandidate => BuiltinObjectCandidate, BuiltinUnsizeCandidate => BuiltinUnsizeCandidate, + ClosureCandidate => ClosureCandidate, + GeneratorCandidate => GeneratorCandidate, ParamCandidate(ref trait_ref) => { return tcx.lift(trait_ref).map(ParamCandidate); } - ClosureCandidate(def_id, ref substs, kind) => { - return tcx.lift(substs).map(|substs| { - ClosureCandidate(def_id, substs, kind) - }); - } }) } } @@ -380,6 +419,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { freshener: infcx.freshener(), intercrate: false, inferred_obligations: SnapshotVec::new(), + intercrate_ambiguity_causes: Vec::new(), } } @@ -389,6 +429,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { freshener: infcx.freshener(), intercrate: true, inferred_obligations: SnapshotVec::new(), + intercrate_ambiguity_causes: Vec::new(), } } @@ -404,6 +445,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.infcx } + pub fn intercrate_ambiguity_causes(&self) -> &[IntercrateAmbiguityCause] { + &self.intercrate_ambiguity_causes + } + /// Wraps the inference context's in_snapshot s.t. snapshot handling is only from the selection /// context's self. fn in_snapshot(&mut self, f: F) -> R @@ -687,6 +732,21 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } } } + + ty::Predicate::ConstEvaluatable(def_id, substs) => { + match self.tcx().lift_to_global(&(obligation.param_env, substs)) { + Some((param_env, substs)) => { + match self.tcx().const_eval(param_env.and((def_id, substs))) { + Ok(_) => EvaluatedToOk, + Err(_) => EvaluatedToErr + } + } + None => { + // Inference variables still left in param_env or substs. + EvaluatedToAmbig + } + } + } } } @@ -757,6 +817,22 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { if unbound_input_types && self.intercrate { debug!("evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous", stack.fresh_trait_ref); + // Heuristics: show the diagnostics when there are no candidates in crate. + if let Ok(candidate_set) = self.assemble_candidates(stack) { + if !candidate_set.ambiguous && candidate_set.vec.is_empty() { + let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; + let self_ty = trait_ref.self_ty(); + let cause = IntercrateAmbiguityCause::DownstreamCrate { + trait_desc: trait_ref.to_string(), + self_desc: if self_ty.has_concrete_skeleton() { + Some(self_ty.to_string()) + } else { + None + }, + }; + self.intercrate_ambiguity_causes.push(cause); + } + } return EvaluatedToAmbig; } if unbound_input_types && @@ -1003,6 +1079,25 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { if !self.is_knowable(stack) { debug!("coherence stage: not knowable"); + // Heuristics: show the diagnostics when there are no candidates in crate. + let candidate_set = self.assemble_candidates(stack)?; + if !candidate_set.ambiguous && candidate_set.vec.is_empty() { + let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; + let self_ty = trait_ref.self_ty(); + let trait_desc = trait_ref.to_string(); + let self_desc = if self_ty.has_concrete_skeleton() { + Some(self_ty.to_string()) + } else { + None + }; + let cause = if !coherence::trait_ref_is_local_or_fundamental(self.tcx(), + trait_ref) { + IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc } + } else { + IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } + }; + self.intercrate_ambiguity_causes.push(cause); + } return Ok(None); } @@ -1124,7 +1219,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // ok to skip binder because of the nature of the // trait-ref-is-knowable check, which does not care about // bound regions - let trait_ref = &predicate.skip_binder().trait_ref; + let trait_ref = predicate.skip_binder().trait_ref; coherence::trait_ref_is_knowable(self.tcx(), trait_ref) } @@ -1235,7 +1330,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // and applicable impls. There is a certain set of precedence rules here. let def_id = obligation.predicate.def_id(); - if self.tcx().lang_items.copy_trait() == Some(def_id) { + let lang_items = self.tcx().lang_items(); + if lang_items.copy_trait() == Some(def_id) { debug!("obligation self ty is {:?}", obligation.predicate.0.self_ty()); @@ -1244,25 +1340,26 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.assemble_candidates_from_impls(obligation, &mut candidates)?; // For other types, we'll use the builtin rules. - let copy_conditions = self.copy_conditions(obligation); + let copy_conditions = self.copy_clone_conditions(obligation); self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates)?; - } else if self.tcx().lang_items.sized_trait() == Some(def_id) { + } else if lang_items.sized_trait() == Some(def_id) { // Sized is never implementable by end-users, it is // always automatically computed. let sized_conditions = self.sized_conditions(obligation); self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates)?; - } else if self.tcx().lang_items.unsize_trait() == Some(def_id) { + } else if lang_items.unsize_trait() == Some(def_id) { self.assemble_candidates_for_unsizing(obligation, &mut candidates); } else { - if self.tcx().lang_items.clone_trait() == Some(def_id) { - // Same builtin conditions as `Copy`, i.e. every type which has builtin support - // for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone` - // types have builtin support for `Clone`. - let clone_conditions = self.copy_conditions(obligation); - self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?; - } - + if lang_items.clone_trait() == Some(def_id) { + // Same builtin conditions as `Copy`, i.e. every type which has builtin support + // for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone` + // types have builtin support for `Clone`. + let clone_conditions = self.copy_clone_conditions(obligation); + self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?; + } + + self.assemble_generator_candidates(obligation, &mut candidates)?; self.assemble_closure_candidates(obligation, &mut candidates)?; self.assemble_fn_pointer_candidates(obligation, &mut candidates)?; self.assemble_candidates_from_impls(obligation, &mut candidates)?; @@ -1447,6 +1544,37 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { }) } + fn assemble_generator_candidates(&mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) + -> Result<(),SelectionError<'tcx>> + { + if self.tcx().lang_items().gen_trait() != Some(obligation.predicate.def_id()) { + return Ok(()); + } + + // ok to skip binder because the substs on generator types never + // touch bound regions, they just capture the in-scope + // type/region parameters + let self_ty = *obligation.self_ty().skip_binder(); + match self_ty.sty { + ty::TyGenerator(..) => { + debug!("assemble_generator_candidates: self_ty={:?} obligation={:?}", + self_ty, + obligation); + + candidates.vec.push(GeneratorCandidate); + Ok(()) + } + ty::TyInfer(ty::TyVar(_)) => { + debug!("assemble_generator_candidates: ambiguous self-type"); + candidates.ambiguous = true; + return Ok(()); + } + _ => { return Ok(()); } + } + } + /// Check for the artificial impl that the compiler will create for an obligation like `X : /// FnMut<..>` where `X` is a closure type. /// @@ -1458,7 +1586,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { - let kind = match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) { + let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.0.def_id()) { Some(k) => k, None => { return Ok(()); } }; @@ -1466,36 +1594,31 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // ok to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters - let self_ty = *obligation.self_ty().skip_binder(); - let (closure_def_id, substs) = match self_ty.sty { - ty::TyClosure(id, substs) => (id, substs), + match obligation.self_ty().skip_binder().sty { + ty::TyClosure(closure_def_id, _) => { + debug!("assemble_unboxed_candidates: kind={:?} obligation={:?}", + kind, obligation); + match self.infcx.closure_kind(closure_def_id) { + Some(closure_kind) => { + debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind); + if closure_kind.extends(kind) { + candidates.vec.push(ClosureCandidate); + } + } + None => { + debug!("assemble_unboxed_candidates: closure_kind not yet known"); + candidates.vec.push(ClosureCandidate); + } + }; + Ok(()) + } ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_unboxed_closure_candidates: ambiguous self-type"); candidates.ambiguous = true; return Ok(()); } _ => { return Ok(()); } - }; - - debug!("assemble_unboxed_candidates: self_ty={:?} kind={:?} obligation={:?}", - self_ty, - kind, - obligation); - - match self.infcx.closure_kind(closure_def_id) { - Some(closure_kind) => { - debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind); - if closure_kind.extends(kind) { - candidates.vec.push(ClosureCandidate(closure_def_id, substs, kind)); - } - } - None => { - debug!("assemble_unboxed_candidates: closure_kind not yet known"); - candidates.vec.push(ClosureCandidate(closure_def_id, substs, kind)); - } } - - Ok(()) } /// Implement one of the `Fn()` family for a fn pointer. @@ -1505,7 +1628,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { -> Result<(),SelectionError<'tcx>> { // We provide impl of all fn traits for fn pointers. - if self.tcx().lang_items.fn_trait_kind(obligation.predicate.def_id()).is_none() { + if self.tcx().lang_items().fn_trait_kind(obligation.predicate.def_id()).is_none() { return Ok(()); } @@ -1812,7 +1935,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { when there are other valid candidates"); } ImplCandidate(..) | - ClosureCandidate(..) | + ClosureCandidate | + GeneratorCandidate | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate | @@ -1836,7 +1960,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { if other.evaluation == EvaluatedToOk { if let ImplCandidate(victim_def) = victim.candidate { let tcx = self.tcx().global_tcx(); - return traits::specializes(tcx, other_def, victim_def) || + return tcx.specializes((other_def, victim_def)) || tcx.impls_are_allowed_to_overlap(other_def, victim_def); } } @@ -1893,7 +2017,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyRawPtr(..) | - ty::TyChar | ty::TyRef(..) | + ty::TyChar | ty::TyRef(..) | ty::TyGenerator(..) | ty::TyArray(..) | ty::TyClosure(..) | ty::TyNever | ty::TyError => { // safe for everything @@ -1926,7 +2050,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } } - fn copy_conditions(&mut self, obligation: &TraitObligation<'tcx>) + fn copy_clone_conditions(&mut self, obligation: &TraitObligation<'tcx>) -> BuiltinImplConditions<'tcx> { // NOTE: binder moved to (*) @@ -1944,8 +2068,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Where(ty::Binder(Vec::new())) } - ty::TyDynamic(..) | ty::TyStr | ty::TySlice(..) | - ty::TyClosure(..) | + ty::TyDynamic(..) | ty::TyStr | ty::TySlice(..) | ty::TyGenerator(..) | ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { Never } @@ -1960,6 +2083,22 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Where(ty::Binder(tys.to_vec())) } + ty::TyClosure(def_id, substs) => { + let trait_id = obligation.predicate.def_id(); + let copy_closures = + Some(trait_id) == self.tcx().lang_items().copy_trait() && + self.tcx().has_copy_closures(def_id.krate); + let clone_closures = + Some(trait_id) == self.tcx().lang_items().clone_trait() && + self.tcx().has_clone_closures(def_id.krate); + + if copy_closures || clone_closures { + Where(ty::Binder(substs.upvar_tys(def_id, self.tcx()).collect())) + } else { + Never + } + } + ty::TyAdt(..) | ty::TyProjection(..) | ty::TyParam(..) | ty::TyAnon(..) => { // Fallback to whatever user-defined impls exist in this case. None @@ -2046,6 +2185,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { substs.upvar_tys(def_id, self.tcx()).collect() } + ty::TyGenerator(def_id, ref substs, interior) => { + let witness = iter::once(interior.witness); + substs.upvar_tys(def_id, self.tcx()).chain(witness).collect() + } + // for `PhantomData`, we pass `T` ty::TyAdt(def, substs) if def.is_phantom_data() => { substs.types().collect() @@ -2149,12 +2293,16 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Ok(VtableImpl(self.confirm_impl_candidate(obligation, impl_def_id))) } - ClosureCandidate(closure_def_id, substs, kind) => { - let vtable_closure = - self.confirm_closure_candidate(obligation, closure_def_id, substs, kind)?; + ClosureCandidate => { + let vtable_closure = self.confirm_closure_candidate(obligation)?; Ok(VtableClosure(vtable_closure)) } + GeneratorCandidate => { + let vtable_generator = self.confirm_generator_candidate(obligation)?; + Ok(VtableGenerator(vtable_generator)) + } + BuiltinObjectCandidate => { // This indicates something like `(Trait+Send) : // Send`. In this case, we know that this holds @@ -2229,17 +2377,18 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { debug!("confirm_builtin_candidate({:?}, {:?})", obligation, has_nested); + let lang_items = self.tcx().lang_items(); let obligations = if has_nested { let trait_def = obligation.predicate.def_id(); let conditions = match trait_def { - _ if Some(trait_def) == self.tcx().lang_items.sized_trait() => { + _ if Some(trait_def) == lang_items.sized_trait() => { self.sized_conditions(obligation) } - _ if Some(trait_def) == self.tcx().lang_items.copy_trait() => { - self.copy_conditions(obligation) + _ if Some(trait_def) == lang_items.copy_trait() => { + self.copy_clone_conditions(obligation) } - _ if Some(trait_def) == self.tcx().lang_items.clone_trait() => { - self.copy_conditions(obligation) + _ if Some(trait_def) == lang_items.clone_trait() => { + self.copy_clone_conditions(obligation) } _ => bug!("unexpected builtin trait {:?}", trait_def) }; @@ -2487,23 +2636,84 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Ok(VtableFnPointerData { fn_ty: self_ty, nested: obligations }) } - fn confirm_closure_candidate(&mut self, - obligation: &TraitObligation<'tcx>, - closure_def_id: DefId, - substs: ty::ClosureSubsts<'tcx>, - kind: ty::ClosureKind) - -> Result>, + fn confirm_generator_candidate(&mut self, + obligation: &TraitObligation<'tcx>) + -> Result>, SelectionError<'tcx>> { - debug!("confirm_closure_candidate({:?},{:?},{:?})", + // ok to skip binder because the substs on generator types never + // touch bound regions, they just capture the in-scope + // type/region parameters + let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); + let (closure_def_id, substs) = match self_ty.sty { + ty::TyGenerator(id, substs, _) => (id, substs), + _ => bug!("closure candidate for non-closure {:?}", obligation) + }; + + debug!("confirm_generator_candidate({:?},{:?},{:?})", obligation, closure_def_id, substs); + let trait_ref = + self.generator_trait_ref_unnormalized(obligation, closure_def_id, substs); + let Normalized { + value: trait_ref, + obligations + } = normalize_with_depth(self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth+1, + &trait_ref); + + debug!("confirm_generator_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", + closure_def_id, + trait_ref, + obligations); + + self.confirm_poly_trait_refs(obligation.cause.clone(), + obligation.param_env, + obligation.predicate.to_poly_trait_ref(), + trait_ref)?; + + Ok(VtableGeneratorData { + closure_def_id: closure_def_id, + substs: substs.clone(), + nested: obligations + }) + } + + fn confirm_closure_candidate(&mut self, + obligation: &TraitObligation<'tcx>) + -> Result>, + SelectionError<'tcx>> + { + debug!("confirm_closure_candidate({:?})", obligation); + + let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.0.def_id()) { + Some(k) => k, + None => bug!("closure candidate for non-fn trait {:?}", obligation) + }; + + // ok to skip binder because the substs on closure types never + // touch bound regions, they just capture the in-scope + // type/region parameters + let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); + let (closure_def_id, substs) = match self_ty.sty { + ty::TyClosure(id, substs) => (id, substs), + _ => bug!("closure candidate for non-closure {:?}", obligation) + }; + + let trait_ref = + self.closure_trait_ref_unnormalized(obligation, closure_def_id, substs); let Normalized { value: trait_ref, mut obligations - } = self.closure_trait_ref(obligation, closure_def_id, substs); + } = normalize_with_depth(self, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth+1, + &trait_ref); debug!("confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", closure_def_id, @@ -2970,22 +3180,25 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty::Binder(trait_ref) } - fn closure_trait_ref(&mut self, - obligation: &TraitObligation<'tcx>, - closure_def_id: DefId, - substs: ty::ClosureSubsts<'tcx>) - -> Normalized<'tcx, ty::PolyTraitRef<'tcx>> + fn generator_trait_ref_unnormalized(&mut self, + obligation: &TraitObligation<'tcx>, + closure_def_id: DefId, + substs: ty::ClosureSubsts<'tcx>) + -> ty::PolyTraitRef<'tcx> { - let trait_ref = self.closure_trait_ref_unnormalized( - obligation, closure_def_id, substs); + let gen_sig = self.infcx.generator_sig(closure_def_id).unwrap() + .subst(self.tcx(), substs.substs); + let ty::Binder((trait_ref, ..)) = + self.tcx().generator_trait_ref_and_outputs(obligation.predicate.def_id(), + obligation.predicate.0.self_ty(), // (1) + gen_sig); + // (1) Feels icky to skip the binder here, but OTOH we know + // that the self-type is an generator type and hence is + // in fact unparameterized (or at least does not reference any + // regions bound in the obligation). Still probably some + // refactoring could make this nicer. - // A closure signature can contain associated types which - // must be normalized. - normalize_with_depth(self, - obligation.param_env, - obligation.cause.clone(), - obligation.recursion_depth+1, - &trait_ref) + ty::Binder(trait_ref) } /// Returns the obligations that are implied by instantiating an diff --git a/src/librustc/traits/specialize/mod.rs b/src/librustc/traits/specialize/mod.rs index 7c916e162a..20da4c084f 100644 --- a/src/librustc/traits/specialize/mod.rs +++ b/src/librustc/traits/specialize/mod.rs @@ -25,6 +25,7 @@ use hir::def_id::DefId; use infer::{InferCtxt, InferOk}; use ty::subst::{Subst, Substs}; use traits::{self, Reveal, ObligationCause}; +use traits::select::IntercrateAmbiguityCause; use ty::{self, TyCtxt, TypeFoldable}; use syntax_pos::DUMMY_SP; use std::rc::Rc; @@ -36,6 +37,7 @@ pub struct OverlapError { pub with_impl: DefId, pub trait_desc: String, pub self_desc: Option, + pub intercrate_ambiguity_causes: Vec, } /// Given a subst for the requested impl, translate it to a subst @@ -123,7 +125,7 @@ pub fn find_associated_item<'a, 'tcx>( let trait_def = tcx.trait_def(trait_def_id); let ancestors = trait_def.ancestors(tcx, impl_data.impl_def_id); - match ancestors.defs(tcx, item.name, item.kind).next() { + match ancestors.defs(tcx, item.name, item.kind, trait_def_id).next() { Some(node_item) => { let substs = tcx.infer_ctxt().enter(|infcx| { let param_env = ty::ParamEnv::empty(Reveal::All); @@ -150,15 +152,12 @@ pub fn find_associated_item<'a, 'tcx>( /// Specialization is determined by the sets of types to which the impls apply; /// impl1 specializes impl2 if it applies to a subset of the types impl2 applies /// to. -pub fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - impl1_def_id: DefId, - impl2_def_id: DefId) -> bool { +pub(super) fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + (impl1_def_id, impl2_def_id): (DefId, DefId)) + -> bool +{ debug!("specializes({:?}, {:?})", impl1_def_id, impl2_def_id); - if let Some(r) = tcx.specializes_cache.borrow().check(impl1_def_id, impl2_def_id) { - return r; - } - // The feature gate should prevent introducing new specializations, but not // taking advantage of upstream ones. if !tcx.sess.features.borrow().specialization && @@ -188,7 +187,7 @@ pub fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id).unwrap(); // Create a infcx, taking the predicates of impl1 as assumptions: - let result = tcx.infer_ctxt().enter(|infcx| { + tcx.infer_ctxt().enter(|infcx| { // Normalize the trait reference. The WF rules ought to ensure // that this always succeeds. let impl1_trait_ref = @@ -204,10 +203,7 @@ pub fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Attempt to prove that impl2 applies, given all of the above. fulfill_implication(&infcx, penv, impl1_trait_ref, impl2_def_id).is_ok() - }); - - tcx.specializes_cache.borrow_mut().insert(impl1_def_id, impl2_def_id, result); - result + }) } /// Attempt to fulfill all obligations of `target_impl` after unification with @@ -343,6 +339,10 @@ pub(super) fn specialization_graph_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx } } + for cause in &overlap.intercrate_ambiguity_causes { + cause.add_intercrate_ambiguity_hint(&mut err); + } + err.emit(); } } else { diff --git a/src/librustc/traits/specialize/specialization_graph.rs b/src/librustc/traits/specialize/specialization_graph.rs index 8b31cb599e..da9dbc0e2c 100644 --- a/src/librustc/traits/specialize/specialization_graph.rs +++ b/src/librustc/traits/specialize/specialization_graph.rs @@ -8,9 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::{OverlapError, specializes}; +use super::OverlapError; use hir::def_id::DefId; +use ich::{self, StableHashingContext}; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, + StableHasherResult}; use traits; use ty::{self, TyCtxt, TypeFoldable}; use ty::fast_reject::{self, SimplifiedType}; @@ -113,17 +116,17 @@ impl<'a, 'gcx, 'tcx> Children { let overlap = traits::overlapping_impls(&infcx, possible_sibling, impl_def_id); - if let Some(impl_header) = overlap { + if let Some(overlap) = overlap { if tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) { return Ok((false, false)); } - let le = specializes(tcx, impl_def_id, possible_sibling); - let ge = specializes(tcx, possible_sibling, impl_def_id); + let le = tcx.specializes((impl_def_id, possible_sibling)); + let ge = tcx.specializes((possible_sibling, impl_def_id)); if le == ge { // overlap, but no specialization; error out - let trait_ref = impl_header.trait_ref.unwrap(); + let trait_ref = overlap.impl_header.trait_ref.unwrap(); let self_ty = trait_ref.self_ty(); Err(OverlapError { with_impl: possible_sibling, @@ -135,7 +138,8 @@ impl<'a, 'gcx, 'tcx> Children { Some(self_ty.to_string()) } else { None - } + }, + intercrate_ambiguity_causes: overlap.intercrate_ambiguity_causes, }) } else { Ok((le, ge)) @@ -342,11 +346,14 @@ impl<'a, 'gcx, 'tcx> Ancestors { /// Search the items from the given ancestors, returning each definition /// with the given name and the given kind. #[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait. - pub fn defs(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, name: Name, kind: ty::AssociatedKind) + pub fn defs(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_item_name: Name, + trait_item_kind: ty::AssociatedKind, trait_def_id: DefId) -> impl Iterator> + 'a { self.flat_map(move |node| { - node.items(tcx).filter(move |item| item.kind == kind && item.name == name) - .map(move |item| NodeItem { node: node, item: item }) + node.items(tcx).filter(move |impl_item| { + impl_item.kind == trait_item_kind && + tcx.hygienic_eq(impl_item.name, trait_item_name, trait_def_id) + }).map(move |item| NodeItem { node: node, item: item }) }) } } @@ -364,3 +371,21 @@ pub fn ancestors(tcx: TyCtxt, current_source: Some(Node::Impl(start_from_impl)), } } + +impl<'gcx> HashStable> for Children { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + let Children { + ref nonblanket_impls, + ref blanket_impls, + } = *self; + + ich::hash_stable_trait_impls(hcx, hasher, blanket_impls, nonblanket_impls); + } +} + +impl_stable_hash_for!(struct self::Graph { + parent, + children +}); diff --git a/src/librustc/traits/structural_impls.rs b/src/librustc/traits/structural_impls.rs index d913c76ec3..19ed03aa14 100644 --- a/src/librustc/traits/structural_impls.rs +++ b/src/librustc/traits/structural_impls.rs @@ -53,6 +53,9 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::Vtable<'tcx, N> { super::VtableClosure(ref d) => write!(f, "{:?}", d), + super::VtableGenerator(ref d) => + write!(f, "{:?}", d), + super::VtableFnPointer(ref d) => write!(f, "VtableFnPointer({:?})", d), @@ -77,6 +80,15 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableImplData<'tcx, N> { } } +impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableGeneratorData<'tcx, N> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "VtableGenerator(closure_def_id={:?}, substs={:?}, nested={:?})", + self.closure_def_id, + self.substs, + self.nested) + } +} + impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableClosureData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "VtableClosure(closure_def_id={:?}, substs={:?}, nested={:?})", @@ -161,6 +173,9 @@ impl<'a, 'tcx> Lift<'tcx> for traits::SelectionError<'a> { super::TraitNotObjectSafe(def_id) => { Some(super::TraitNotObjectSafe(def_id)) } + super::ConstEvalFailure(ref err) => { + tcx.lift(err).map(super::ConstEvalFailure) + } } } } @@ -278,6 +293,19 @@ impl<'a, 'tcx> Lift<'tcx> for traits::Vtable<'a, ()> { }) } traits::VtableDefaultImpl(t) => Some(traits::VtableDefaultImpl(t)), + traits::VtableGenerator(traits::VtableGeneratorData { + closure_def_id, + substs, + nested + }) => { + tcx.lift(&substs).map(|substs| { + traits::VtableGenerator(traits::VtableGeneratorData { + closure_def_id: closure_def_id, + substs: substs, + nested: nested + }) + }) + } traits::VtableClosure(traits::VtableClosureData { closure_def_id, substs, @@ -351,6 +379,20 @@ impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableImplData< } } +impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableGeneratorData<'tcx, N> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::VtableGeneratorData { + closure_def_id: self.closure_def_id, + substs: self.substs.fold_with(folder), + nested: self.nested.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.substs.visit_with(visitor) || self.nested.visit_with(visitor) + } +} + impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableClosureData<'tcx, N> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { traits::VtableClosureData { @@ -422,6 +464,9 @@ impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> match *self { traits::VtableImpl(ref v) => traits::VtableImpl(v.fold_with(folder)), traits::VtableDefaultImpl(ref t) => traits::VtableDefaultImpl(t.fold_with(folder)), + traits::VtableGenerator(ref d) => { + traits::VtableGenerator(d.fold_with(folder)) + } traits::VtableClosure(ref d) => { traits::VtableClosure(d.fold_with(folder)) } @@ -438,6 +483,7 @@ impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> match *self { traits::VtableImpl(ref v) => v.visit_with(visitor), traits::VtableDefaultImpl(ref t) => t.visit_with(visitor), + traits::VtableGenerator(ref d) => d.visit_with(visitor), traits::VtableClosure(ref d) => d.visit_with(visitor), traits::VtableFnPointer(ref d) => d.visit_with(visitor), traits::VtableParam(ref n) => n.visit_with(visitor), diff --git a/src/librustc/traits/trans/mod.rs b/src/librustc/traits/trans/mod.rs index 827a5092c0..947e7117c4 100644 --- a/src/librustc/traits/trans/mod.rs +++ b/src/librustc/traits/trans/mod.rs @@ -19,7 +19,7 @@ use std::cell::RefCell; use std::marker::PhantomData; use syntax::ast; use syntax_pos::Span; -use traits::{FulfillmentContext, Obligation, ObligationCause, Reveal, SelectionContext, Vtable}; +use traits::{FulfillmentContext, Obligation, ObligationCause, SelectionContext, Vtable}; use ty::{self, Ty, TyCtxt}; use ty::subst::{Subst, Substs}; use ty::fold::{TypeFoldable, TypeFolder}; @@ -31,24 +31,25 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { /// (necessarily) resolve all nested obligations on the impl. Note /// that type check should guarantee to us that all nested /// obligations *could be* resolved if we wanted to. + /// Assumes that this is run after the entire crate has been successfully type-checked. pub fn trans_fulfill_obligation(self, span: Span, + param_env: ty::ParamEnv<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) -> Vtable<'tcx, ()> { // Remove any references to regions; this helps improve caching. let trait_ref = self.erase_regions(&trait_ref); - self.trans_trait_caches.trait_cache.memoize(trait_ref, || { + self.trans_trait_caches.trait_cache.memoize((param_env, trait_ref), || { debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})", - trait_ref, trait_ref.def_id()); + (param_env, trait_ref), trait_ref.def_id()); // Do the initial selection for the obligation. This yields the // shallow result we are looking for -- that is, what specific impl. self.infer_ctxt().enter(|infcx| { let mut selcx = SelectionContext::new(&infcx); - let param_env = ty::ParamEnv::empty(Reveal::All); let obligation_cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID); let obligation = Obligation::new(obligation_cause, @@ -120,7 +121,7 @@ impl<'a, 'gcx> AssociatedTypeNormalizer<'a, 'gcx> { } fn fold>(&mut self, value: &T) -> T { - if !value.has_projection_types() { + if !value.has_projections() { value.clone() } else { value.fold_with(self) @@ -134,7 +135,7 @@ impl<'a, 'gcx> TypeFolder<'gcx, 'gcx> for AssociatedTypeNormalizer<'a, 'gcx> { } fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> { - if !ty.has_projection_types() { + if !ty.has_projections() { ty } else { self.tcx.trans_trait_caches.project_cache.memoize(ty, || { @@ -167,7 +168,7 @@ pub struct TraitSelectionCache<'tcx> { } impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> { - type Key = ty::PolyTraitRef<'tcx>; + type Key = (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>); type Value = Vtable<'tcx, ()>; fn to_dep_kind() -> DepKind { DepKind::TraitSelect diff --git a/src/librustc/traits/util.rs b/src/librustc/traits/util.rs index dae0c89690..42e0834e8e 100644 --- a/src/librustc/traits/util.rs +++ b/src/librustc/traits/util.rs @@ -48,6 +48,9 @@ fn anonymize_predicate<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::Predicate::Subtype(ref data) => ty::Predicate::Subtype(tcx.anonymize_late_bound_regions(data)), + + ty::Predicate::ConstEvaluatable(def_id, substs) => + ty::Predicate::ConstEvaluatable(def_id, substs), } } @@ -175,6 +178,10 @@ impl<'cx, 'gcx, 'tcx> Elaborator<'cx, 'gcx, 'tcx> { ty::Predicate::ClosureKind(..) => { // Nothing to elaborate when waiting for a closure's kind to be inferred. } + ty::Predicate::ConstEvaluatable(..) => { + // Currently, we do not elaborate const-evaluatable + // predicates. + } ty::Predicate::RegionOutlives(..) => { // Nothing to elaborate from `'a: 'b`. @@ -513,6 +520,19 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::Binder((trait_ref, sig.skip_binder().output())) } + pub fn generator_trait_ref_and_outputs(self, + fn_trait_def_id: DefId, + self_ty: Ty<'tcx>, + sig: ty::PolyGenSig<'tcx>) + -> ty::Binder<(ty::TraitRef<'tcx>, Ty<'tcx>, Ty<'tcx>)> + { + let trait_ref = ty::TraitRef { + def_id: fn_trait_def_id, + substs: self.mk_substs_trait(self_ty, &[]), + }; + ty::Binder((trait_ref, sig.skip_binder().yield_ty, sig.skip_binder().return_ty)) + } + pub fn impl_is_default(self, node_item_def_id: DefId) -> bool { match self.hir.as_local_node_id(node_item_def_id) { Some(node_id) => { @@ -525,8 +545,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } None => { self.global_tcx() - .sess - .cstore .impl_defaultness(node_item_def_id) .is_default() } diff --git a/src/librustc/ty/README.md b/src/librustc/ty/README.md new file mode 100644 index 0000000000..4f63912a1e --- /dev/null +++ b/src/librustc/ty/README.md @@ -0,0 +1,165 @@ +# Types and the Type Context + +The `ty` module defines how the Rust compiler represents types +internally. It also defines the *typing context* (`tcx` or `TyCtxt`), +which is the central data structure in the compiler. + +## The tcx and how it uses lifetimes + +The `tcx` ("typing context") is the central data structure in the +compiler. It is the context that you use to perform all manner of +queries. The struct `TyCtxt` defines a reference to this shared context: + +```rust +tcx: TyCtxt<'a, 'gcx, 'tcx> +// -- ---- ---- +// | | | +// | | innermost arena lifetime (if any) +// | "global arena" lifetime +// lifetime of this reference +``` + +As you can see, the `TyCtxt` type takes three lifetime parameters. +These lifetimes are perhaps the most complex thing to understand about +the tcx. During Rust compilation, we allocate most of our memory in +**arenas**, which are basically pools of memory that get freed all at +once. When you see a reference with a lifetime like `'tcx` or `'gcx`, +you know that it refers to arena-allocated data (or data that lives as +long as the arenas, anyhow). + +We use two distinct levels of arenas. The outer level is the "global +arena". This arena lasts for the entire compilation: so anything you +allocate in there is only freed once compilation is basically over +(actually, when we shift to executing LLVM). + +To reduce peak memory usage, when we do type inference, we also use an +inner level of arena. These arenas get thrown away once type inference +is over. This is done because type inference generates a lot of +"throw-away" types that are not particularly interesting after type +inference completes, so keeping around those allocations would be +wasteful. + +Often, we wish to write code that explicitly asserts that it is not +taking place during inference. In that case, there is no "local" +arena, and all the types that you can access are allocated in the +global arena. To express this, the idea is to us the same lifetime +for the `'gcx` and `'tcx` parameters of `TyCtxt`. Just to be a touch +confusing, we tend to use the name `'tcx` in such contexts. Here is an +example: + +```rust +fn not_in_inference<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { + // ---- ---- + // Using the same lifetime here asserts + // that the innermost arena accessible through + // this reference *is* the global arena. +} +``` + +In contrast, if we want to code that can be usable during type inference, then you +need to declare a distinct `'gcx` and `'tcx` lifetime parameter: + +```rust +fn maybe_in_inference<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) { + // ---- ---- + // Using different lifetimes here means that + // the innermost arena *may* be distinct + // from the global arena (but doesn't have to be). +} +``` + +### Allocating and working with types + +Rust types are represented using the `Ty<'tcx>` defined in the `ty` +module (not to be confused with the `Ty` struct from [the HIR]). This +is in fact a simple type alias for a reference with `'tcx` lifetime: + +```rust +pub type Ty<'tcx> = &'tcx TyS<'tcx>; +``` + +[the HIR]: ../hir/README.md + +You can basically ignore the `TyS` struct -- you will basically never +access it explicitly. We always pass it by reference using the +`Ty<'tcx>` alias -- the only exception I think is to define inherent +methods on types. Instances of `TyS` are only ever allocated in one of +the rustc arenas (never e.g. on the stack). + +One common operation on types is to **match** and see what kinds of +types they are. This is done by doing `match ty.sty`, sort of like this: + +```rust +fn test_type<'tcx>(ty: Ty<'tcx>) { + match ty.sty { + ty::TyArray(elem_ty, len) => { ... } + ... + } +} +``` + +The `sty` field (the origin of this name is unclear to me; perhaps +structural type?) is of type `TypeVariants<'tcx>`, which is an enum +definined all of the different kinds of types in the compiler. + +> NB: inspecting the `sty` field on types during type inference can be +> risky, as there are may be inference variables and other things to +> consider, or sometimes types are not yet known that will become +> known later.). + +To allocate a new type, you can use the various `mk_` methods defined +on the `tcx`. These have names that correpond mostly to the various kinds +of type variants. For example: + +```rust +let array_ty = tcx.mk_array(elem_ty, len * 2); +``` + +These methods all return a `Ty<'tcx>` -- note that the lifetime you +get back is the lifetime of the innermost arena that this `tcx` has +access to. In fact, types are always canonicalized and interned (so we +never allocate exactly the same type twice) and are always allocated +in the outermost arena where they can be (so, if they do not contain +any inference variables or other "temporary" types, they will be +allocated in the global arena). However, the lifetime `'tcx` is always +a safe approximation, so that is what you get back. + +> NB. Because types are interned, it is possible to compare them for +> equality efficiently using `==` -- however, this is almost never what +> you want to do unless you happen to be hashing and looking for +> duplicates. This is because often in Rust there are multiple ways to +> represent the same type, particularly once inference is involved. If +> you are going to be testing for type equality, you probably need to +> start looking into the inference code to do it right. + +You can also find various common types in the tcx itself by accessing +`tcx.types.bool`, `tcx.types.char`, etc (see `CommonTypes` for more). + +### Beyond types: Other kinds of arena-allocated data structures + +In addition to types, there are a number of other arena-allocated data +structures that you can allocate, and which are found in this +module. Here are a few examples: + +- `Substs`, allocated with `mk_substs` -- this will intern a slice of types, often used to + specify the values to be substituted for generics (e.g., `HashMap` + would be represented as a slice `&'tcx [tcx.types.i32, tcx.types.u32]`. +- `TraitRef`, typically passed by value -- a **trait reference** + consists of a reference to a trait along with its various type + parameters (including `Self`), like `i32: Display` (here, the def-id + would reference the `Display` trait, and the substs would contain + `i32`). +- `Predicate` defines something the trait system has to prove (see `traits` module). + +### Import conventions + +Although there is no hard and fast rule, the `ty` module tends to be used like so: + +```rust +use ty::{self, Ty, TyCtxt}; +``` + +In particular, since they are so common, the `Ty` and `TyCtxt` types +are imported directly. Other types are often referenced with an +explicit `ty::` prefix (e.g., `ty::TraitRef<'tcx>`). But some modules +choose to import a larger or smaller set of names explicitly. diff --git a/src/librustc/ty/adjustment.rs b/src/librustc/ty/adjustment.rs index 5143666077..349d77cfc1 100644 --- a/src/librustc/ty/adjustment.rs +++ b/src/librustc/ty/adjustment.rs @@ -110,8 +110,8 @@ impl<'a, 'gcx, 'tcx> OverloadedDeref<'tcx> { pub fn method_call(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, source: Ty<'tcx>) -> (DefId, &'tcx Substs<'tcx>) { let trait_def_id = match self.mutbl { - hir::MutImmutable => tcx.lang_items.deref_trait(), - hir::MutMutable => tcx.lang_items.deref_mut_trait() + hir::MutImmutable => tcx.lang_items().deref_trait(), + hir::MutMutable => tcx.lang_items().deref_mut_trait() }; let method_def_id = tcx.associated_items(trait_def_id.unwrap()) .find(|m| m.kind == ty::AssociatedKind::Method).unwrap().def_id; diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 6ce2232eb3..740299b91f 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -11,19 +11,24 @@ //! type context book-keeping use dep_graph::DepGraph; +use dep_graph::{DepNode, DepConstructor}; use errors::DiagnosticBuilder; use session::Session; +use session::config::OutputFilenames; use middle; -use hir::{TraitMap}; -use hir::def::{Def, ExportMap}; -use hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use hir::{TraitCandidate, HirId, ItemLocalId}; +use hir::def::{Def, Export}; +use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE}; use hir::map as hir_map; use hir::map::DefPathHash; use lint::{self, Lint}; -use ich::{self, StableHashingContext, NodeIdHashingMode}; +use ich::{StableHashingContext, NodeIdHashingMode}; +use middle::const_val::ConstVal; +use middle::cstore::{CrateStore, LinkMeta, EncodedMetadataHashes}; +use middle::cstore::EncodedMetadata; use middle::free_region::FreeRegionMap; use middle::lang_items; -use middle::resolve_lifetime; +use middle::resolve_lifetime::{self, ObjectLifetimeDefault}; use middle::stability; use mir::Mir; use mir::transform::Passes; @@ -32,8 +37,7 @@ use ty::ReprOptions; use traits; use ty::{self, Ty, TypeAndMut}; use ty::{TyS, TypeVariants, Slice}; -use ty::{AdtKind, AdtDef, ClosureSubsts, Region}; -use hir::FreevarMap; +use ty::{AdtKind, AdtDef, ClosureSubsts, GeneratorInterior, Region, Const}; use ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predicate}; use ty::RegionKind; use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; @@ -46,11 +50,13 @@ use ty::BindingMode; use util::nodemap::{NodeMap, NodeSet, DefIdSet, ItemLocalMap}; use util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::accumulate_vec::AccumulateVec; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher, - StableHasherResult}; - +use rustc_data_structures::stable_hasher::{HashStable, hash_stable_hashmap, + StableHasher, StableHasherResult, + StableVec}; use arena::{TypedArena, DroplessArena}; +use rustc_const_math::{ConstInt, ConstUsize}; use rustc_data_structures::indexed_vec::IndexVec; +use std::any::Any; use std::borrow::Borrow; use std::cell::{Cell, RefCell}; use std::cmp::Ordering; @@ -60,6 +66,8 @@ use std::mem; use std::ops::Deref; use std::iter; use std::rc::Rc; +use std::sync::mpsc; +use std::sync::Arc; use syntax::abi; use syntax::ast::{self, Name, NodeId}; use syntax::attr; @@ -109,6 +117,7 @@ pub struct CtxtInterners<'tcx> { region: RefCell>>, existential_predicates: RefCell>>>>, predicates: RefCell>>>>, + const_: RefCell>>>, } impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> { @@ -121,6 +130,7 @@ impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> { region: RefCell::new(FxHashSet()), existential_predicates: RefCell::new(FxHashSet()), predicates: RefCell::new(FxHashSet()), + const_: RefCell::new(FxHashSet()), } } @@ -305,7 +315,7 @@ impl<'a, V> LocalTableInContextMut<'a, V> { } } -#[derive(RustcEncodable, RustcDecodable)] +#[derive(RustcEncodable, RustcDecodable, Debug)] pub struct TypeckTables<'tcx> { /// The HirId::owner all ItemLocalIds in this table are relative to. pub local_id_root: Option, @@ -327,9 +337,24 @@ pub struct TypeckTables<'tcx> { adjustments: ItemLocalMap>>, - // Stores the actual binding mode for all instances of hir::BindingAnnotation. + /// Stores the actual binding mode for all instances of hir::BindingAnnotation. pat_binding_modes: ItemLocalMap, + /// Stores the types which were implicitly dereferenced in pattern binding modes + /// for later usage in HAIR lowering. For example, + /// + /// ``` + /// match &&Some(5i32) { + /// Some(n) => {}, + /// _ => {}, + /// } + /// ``` + /// leads to a `vec![&&Option, &Option]`. Empty vectors are not stored. + /// + /// See: + /// https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions + pat_adjustments: ItemLocalMap>>, + /// Borrows pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>, @@ -340,6 +365,10 @@ pub struct TypeckTables<'tcx> { /// that caused the closure to be this kind. closure_kinds: ItemLocalMap<(ty::ClosureKind, Option<(Span, ast::Name)>)>, + generator_sigs: ItemLocalMap>>, + + generator_interiors: ItemLocalMap>, + /// For each fn, records the "liberated" types of its arguments /// and return type. Liberated means that all bound regions /// (including late-bound regions) are replaced with free @@ -380,7 +409,10 @@ impl<'tcx> TypeckTables<'tcx> { node_substs: ItemLocalMap(), adjustments: ItemLocalMap(), pat_binding_modes: ItemLocalMap(), + pat_adjustments: ItemLocalMap(), upvar_capture_map: FxHashMap(), + generator_sigs: ItemLocalMap(), + generator_interiors: ItemLocalMap(), closure_tys: ItemLocalMap(), closure_kinds: ItemLocalMap(), liberated_fn_sigs: ItemLocalMap(), @@ -558,6 +590,21 @@ impl<'tcx> TypeckTables<'tcx> { } } + pub fn pat_adjustments(&self) -> LocalTableInContext>> { + LocalTableInContext { + local_id_root: self.local_id_root, + data: &self.pat_adjustments, + } + } + + pub fn pat_adjustments_mut(&mut self) + -> LocalTableInContextMut>> { + LocalTableInContextMut { + local_id_root: self.local_id_root, + data: &mut self.pat_adjustments, + } + } + pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> ty::UpvarCapture<'tcx> { self.upvar_capture_map[&upvar_id] } @@ -634,11 +681,47 @@ impl<'tcx> TypeckTables<'tcx> { data: &mut self.cast_kinds } } + + pub fn generator_sigs(&self) + -> LocalTableInContext>> + { + LocalTableInContext { + local_id_root: self.local_id_root, + data: &self.generator_sigs, + } + } + + pub fn generator_sigs_mut(&mut self) + -> LocalTableInContextMut>> + { + LocalTableInContextMut { + local_id_root: self.local_id_root, + data: &mut self.generator_sigs, + } + } + + pub fn generator_interiors(&self) + -> LocalTableInContext> + { + LocalTableInContext { + local_id_root: self.local_id_root, + data: &self.generator_interiors, + } + } + + pub fn generator_interiors_mut(&mut self) + -> LocalTableInContextMut> + { + LocalTableInContextMut { + local_id_root: self.local_id_root, + data: &mut self.generator_interiors, + } + } } -impl<'a, 'gcx, 'tcx> HashStable> for TypeckTables<'gcx> { +impl<'gcx> HashStable> for TypeckTables<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let ty::TypeckTables { local_id_root, @@ -647,6 +730,7 @@ impl<'a, 'gcx, 'tcx> HashStable> for Typeck ref node_substs, ref adjustments, ref pat_binding_modes, + ref pat_adjustments, ref upvar_capture_map, ref closure_tys, ref closure_kinds, @@ -658,15 +742,18 @@ impl<'a, 'gcx, 'tcx> HashStable> for Typeck ref used_trait_imports, tainted_by_errors, ref free_region_map, + ref generator_sigs, + ref generator_interiors, } = *self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { - ich::hash_stable_itemlocalmap(hcx, hasher, type_dependent_defs); - ich::hash_stable_itemlocalmap(hcx, hasher, node_types); - ich::hash_stable_itemlocalmap(hcx, hasher, node_substs); - ich::hash_stable_itemlocalmap(hcx, hasher, adjustments); - ich::hash_stable_itemlocalmap(hcx, hasher, pat_binding_modes); - ich::hash_stable_hashmap(hcx, hasher, upvar_capture_map, |hcx, up_var_id| { + type_dependent_defs.hash_stable(hcx, hasher); + node_types.hash_stable(hcx, hasher); + node_substs.hash_stable(hcx, hasher); + adjustments.hash_stable(hcx, hasher); + pat_binding_modes.hash_stable(hcx, hasher); + pat_adjustments.hash_stable(hcx, hasher); + hash_stable_hashmap(hcx, hasher, upvar_capture_map, |up_var_id, hcx| { let ty::UpvarId { var_id, closure_expr_id @@ -675,27 +762,27 @@ impl<'a, 'gcx, 'tcx> HashStable> for Typeck let local_id_root = local_id_root.expect("trying to hash invalid TypeckTables"); - let var_def_id = DefId { + let var_owner_def_id = DefId { krate: local_id_root.krate, - index: var_id, + index: var_id.owner, }; let closure_def_id = DefId { krate: local_id_root.krate, index: closure_expr_id, }; - (hcx.def_path_hash(var_def_id), hcx.def_path_hash(closure_def_id)) - }); - - ich::hash_stable_itemlocalmap(hcx, hasher, closure_tys); - ich::hash_stable_itemlocalmap(hcx, hasher, closure_kinds); - ich::hash_stable_itemlocalmap(hcx, hasher, liberated_fn_sigs); - ich::hash_stable_itemlocalmap(hcx, hasher, fru_field_types); - ich::hash_stable_itemlocalmap(hcx, hasher, cast_kinds); - - ich::hash_stable_hashset(hcx, hasher, used_trait_imports, |hcx, def_id| { - hcx.def_path_hash(*def_id) + (hcx.def_path_hash(var_owner_def_id), + var_id.local_id, + hcx.def_path_hash(closure_def_id)) }); + closure_tys.hash_stable(hcx, hasher); + closure_kinds.hash_stable(hcx, hasher); + liberated_fn_sigs.hash_stable(hcx, hasher); + fru_field_types.hash_stable(hcx, hasher); + cast_kinds.hash_stable(hcx, hasher); + generator_sigs.hash_stable(hcx, hasher); + generator_interiors.hash_stable(hcx, hasher); + used_trait_imports.hash_stable(hcx, hasher); tainted_by_errors.hash_stable(hcx, hasher); free_region_map.hash_stable(hcx, hasher); }) @@ -740,9 +827,10 @@ impl<'tcx> CommonTypes<'tcx> { } } -/// The data structure to keep track of all the information that typechecker -/// generates so that so that it can be reused and doesn't have to be redone -/// later on. +/// The central data structure of the compiler. It stores references +/// to the various **arenas** and also houses the results of the +/// various **compiler queries** that have been performed. See [the +/// README](README.md) for more deatils. #[derive(Copy, Clone)] pub struct TyCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { gcx: &'a GlobalCtxt<'gcx>, @@ -760,9 +848,10 @@ pub struct GlobalCtxt<'tcx> { global_arenas: &'tcx GlobalArenas<'tcx>, global_interners: CtxtInterners<'tcx>, + cstore: &'tcx CrateStore, + pub sess: &'tcx Session, - pub specializes_cache: RefCell, pub trans_trait_caches: traits::trans::TransTraitCaches<'tcx>, @@ -773,12 +862,14 @@ pub struct GlobalCtxt<'tcx> { /// Map indicating what traits are in scope for places where this /// is relevant; generated by resolve. - pub trait_map: TraitMap, + trait_map: FxHashMap>>>>, /// Export map produced by name resolution. - pub export_map: ExportMap, + export_map: FxHashMap>>, - pub named_region_map: resolve_lifetime::NamedRegionMap, + named_region_map: NamedRegionMap, pub hir: hir_map::Map<'tcx>, @@ -793,11 +884,11 @@ pub struct GlobalCtxt<'tcx> { // Records the free variables refrenced by every closure // expression. Do not track deps for this, just recompute it from // scratch every time. - pub freevars: RefCell, + freevars: FxHashMap>>, - pub maybe_unused_trait_imports: NodeSet, + maybe_unused_trait_imports: FxHashSet, - pub maybe_unused_extern_crates: Vec<(NodeId, Span)>, + maybe_unused_extern_crates: Vec<(DefId, Span)>, // Internal cache for metadata decoding. No need to track deps on this. pub rcache: RefCell>>, @@ -807,20 +898,11 @@ pub struct GlobalCtxt<'tcx> { pub inhabitedness_cache: RefCell, DefIdForest>>, - pub lang_items: middle::lang_items::LanguageItems, - - /// Set of used unsafe nodes (functions or blocks). Unsafe nodes not - /// present in this set can be warned about. - pub used_unsafe: RefCell, - /// Set of nodes which mark locals as mutable which end up getting used at /// some point. Local variable definitions not in this set can be warned /// about. pub used_mut_nodes: RefCell, - /// Maps any item's def-id to its stability index. - pub stability: RefCell>, - /// Caches the results of trait selection. This cache is used /// for things that do not have to do with the parameters in scope. pub selection_cache: traits::SelectionCache<'tcx>, @@ -856,6 +938,16 @@ pub struct GlobalCtxt<'tcx> { /// error reporting, and so is lazily initialized and generally /// shouldn't taint the common path (hence the RefCell). pub all_traits: RefCell>>, + + /// A general purpose channel to throw data out the back towards LLVM worker + /// threads. + /// + /// This is intended to only get used during the trans phase of the compiler + /// when satisfying the query for a particular codegen unit. Internally in + /// the query it'll send data along this channel to get processed later. + pub tx_to_llvm_workers: mpsc::Sender>, + + output_filenames: Arc, } impl<'tcx> GlobalCtxt<'tcx> { @@ -869,30 +961,6 @@ impl<'tcx> GlobalCtxt<'tcx> { } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - pub fn crate_name(self, cnum: CrateNum) -> Symbol { - if cnum == LOCAL_CRATE { - self.crate_name - } else { - self.sess.cstore.crate_name(cnum) - } - } - - pub fn original_crate_name(self, cnum: CrateNum) -> Symbol { - if cnum == LOCAL_CRATE { - self.crate_name.clone() - } else { - self.sess.cstore.original_crate_name(cnum) - } - } - - pub fn crate_disambiguator(self, cnum: CrateNum) -> Symbol { - if cnum == LOCAL_CRATE { - self.sess.local_crate_disambiguator() - } else { - self.sess.cstore.crate_disambiguator(cnum) - } - } - pub fn alloc_generics(self, generics: ty::Generics) -> &'gcx ty::Generics { self.global_arenas.generics.alloc(generics) } @@ -923,6 +991,32 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.global_arenas.adt_def.alloc(def) } + pub fn alloc_byte_array(self, bytes: &[u8]) -> &'gcx [u8] { + if bytes.is_empty() { + &[] + } else { + self.global_interners.arena.alloc_slice(bytes) + } + } + + pub fn alloc_const_slice(self, values: &[&'tcx ty::Const<'tcx>]) + -> &'tcx [&'tcx ty::Const<'tcx>] { + if values.is_empty() { + &[] + } else { + self.interners.arena.alloc_slice(values) + } + } + + pub fn alloc_name_const_slice(self, values: &[(ast::Name, &'tcx ty::Const<'tcx>)]) + -> &'tcx [(ast::Name, &'tcx ty::Const<'tcx>)] { + if values.is_empty() { + &[] + } else { + self.interners.arena.alloc_slice(values) + } + } + pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability { if let Some(st) = self.stability_interner.borrow().get(&stab) { return st; @@ -968,6 +1062,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// value (types, substs, etc.) can only be used while `ty::tls` has a valid /// reference to the context, to allow formatting values that need it. pub fn create_and_enter(s: &'tcx Session, + cstore: &'tcx CrateStore, local_providers: ty::maps::Providers<'tcx>, extern_providers: ty::maps::Providers<'tcx>, mir_passes: Rc, @@ -976,9 +1071,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { resolutions: ty::Resolutions, named_region_map: resolve_lifetime::NamedRegionMap, hir: hir_map::Map<'tcx>, - lang_items: middle::lang_items::LanguageItems, - stability: stability::Index<'tcx>, crate_name: &str, + tx: mpsc::Sender>, + output_filenames: &OutputFilenames, f: F) -> R where F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'tcx>) -> R { @@ -986,16 +1081,15 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let interners = CtxtInterners::new(arena); let common_types = CommonTypes::new(&interners); let dep_graph = hir.dep_graph.clone(); - let max_cnum = s.cstore.crates().iter().map(|c| c.as_usize()).max().unwrap_or(0); + let max_cnum = cstore.crates_untracked().iter().map(|c| c.as_usize()).max().unwrap_or(0); let mut providers = IndexVec::from_elem_n(extern_providers, max_cnum + 1); providers[LOCAL_CRATE] = local_providers; let def_path_hash_to_def_id = if s.opts.build_dep_graph() { - let upstream_def_path_tables: Vec<(CrateNum, Rc<_>)> = s - .cstore - .crates() + let upstream_def_path_tables: Vec<(CrateNum, Rc<_>)> = cstore + .crates_untracked() .iter() - .map(|&cnum| (cnum, s.cstore.def_path_table(cnum))) + .map(|&cnum| (cnum, cstore.def_path_table(cnum))) .collect(); let def_path_tables = || { @@ -1023,31 +1117,75 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { None }; + let mut trait_map = FxHashMap(); + for (k, v) in resolutions.trait_map { + let hir_id = hir.node_to_hir_id(k); + let map = trait_map.entry(hir_id.owner) + .or_insert_with(|| Rc::new(FxHashMap())); + Rc::get_mut(map).unwrap() + .insert(hir_id.local_id, + Rc::new(StableVec::new(v))); + } + let mut defs = FxHashMap(); + for (k, v) in named_region_map.defs { + let hir_id = hir.node_to_hir_id(k); + let map = defs.entry(hir_id.owner) + .or_insert_with(|| Rc::new(FxHashMap())); + Rc::get_mut(map).unwrap().insert(hir_id.local_id, v); + } + let mut late_bound = FxHashMap(); + for k in named_region_map.late_bound { + let hir_id = hir.node_to_hir_id(k); + let map = late_bound.entry(hir_id.owner) + .or_insert_with(|| Rc::new(FxHashSet())); + Rc::get_mut(map).unwrap().insert(hir_id.local_id); + } + let mut object_lifetime_defaults = FxHashMap(); + for (k, v) in named_region_map.object_lifetime_defaults { + let hir_id = hir.node_to_hir_id(k); + let map = object_lifetime_defaults.entry(hir_id.owner) + .or_insert_with(|| Rc::new(FxHashMap())); + Rc::get_mut(map).unwrap().insert(hir_id.local_id, Rc::new(v)); + } + tls::enter_global(GlobalCtxt { sess: s, + cstore, trans_trait_caches: traits::trans::TransTraitCaches::new(dep_graph.clone()), - specializes_cache: RefCell::new(traits::SpecializesCache::new()), global_arenas: arenas, global_interners: interners, dep_graph: dep_graph.clone(), types: common_types, - named_region_map, - trait_map: resolutions.trait_map, - export_map: resolutions.export_map, + named_region_map: NamedRegionMap { + defs, + late_bound, + object_lifetime_defaults, + }, + trait_map, + export_map: resolutions.export_map.into_iter().map(|(k, v)| { + (k, Rc::new(v)) + }).collect(), + freevars: resolutions.freevars.into_iter().map(|(k, v)| { + (hir.local_def_id(k), Rc::new(v)) + }).collect(), + maybe_unused_trait_imports: + resolutions.maybe_unused_trait_imports + .into_iter() + .map(|id| hir.local_def_id(id)) + .collect(), + maybe_unused_extern_crates: + resolutions.maybe_unused_extern_crates + .into_iter() + .map(|(id, sp)| (hir.local_def_id(id), sp)) + .collect(), hir, def_path_hash_to_def_id, maps: maps::Maps::new(providers), mir_passes, - freevars: RefCell::new(resolutions.freevars), - maybe_unused_trait_imports: resolutions.maybe_unused_trait_imports, - maybe_unused_extern_crates: resolutions.maybe_unused_extern_crates, rcache: RefCell::new(FxHashMap()), normalized_cache: RefCell::new(FxHashMap()), inhabitedness_cache: RefCell::new(FxHashMap()), - lang_items, - used_unsafe: RefCell::new(NodeSet()), used_mut_nodes: RefCell::new(NodeSet()), - stability: RefCell::new(stability), selection_cache: traits::SelectionCache::new(), evaluation_cache: traits::EvaluationCache::new(), rvalue_promotable_to_static: RefCell::new(NodeMap()), @@ -1058,6 +1196,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { derive_macros: RefCell::new(NodeMap()), stability_interner: RefCell::new(FxHashSet()), all_traits: RefCell::new(None), + tx_to_llvm_workers: tx, + output_filenames: Arc::new(output_filenames.clone()), }, f) } @@ -1065,6 +1205,129 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let cname = self.crate_name(LOCAL_CRATE).as_str(); self.sess.consider_optimizing(&cname, msg) } + + pub fn lang_items(self) -> Rc { + self.get_lang_items(LOCAL_CRATE) + } + + pub fn stability(self) -> Rc> { + // FIXME(#42293) we should actually track this, but fails too many tests + // today. + self.dep_graph.with_ignore(|| { + self.stability_index(LOCAL_CRATE) + }) + } + + pub fn crates(self) -> Rc> { + self.all_crate_nums(LOCAL_CRATE) + } + + pub fn def_key(self, id: DefId) -> hir_map::DefKey { + if id.is_local() { + self.hir.def_key(id) + } else { + self.cstore.def_key(id) + } + } + + /// Convert a `DefId` into its fully expanded `DefPath` (every + /// `DefId` is really just an interned def-path). + /// + /// Note that if `id` is not local to this crate, the result will + /// be a non-local `DefPath`. + pub fn def_path(self, id: DefId) -> hir_map::DefPath { + if id.is_local() { + self.hir.def_path(id) + } else { + self.cstore.def_path(id) + } + } + + #[inline] + pub fn def_path_hash(self, def_id: DefId) -> hir_map::DefPathHash { + if def_id.is_local() { + self.hir.definitions().def_path_hash(def_id.index) + } else { + self.cstore.def_path_hash(def_id) + } + } + + pub fn def_path_debug_str(self, def_id: DefId) -> String { + // We are explicitly not going through queries here in order to get + // crate name and disambiguator since this code is called from debug!() + // statements within the query system and we'd run into endless + // recursion otherwise. + let (crate_name, crate_disambiguator) = if def_id.is_local() { + (self.crate_name.clone(), + self.sess.local_crate_disambiguator()) + } else { + (self.cstore.crate_name_untracked(def_id.krate), + self.cstore.crate_disambiguator_untracked(def_id.krate)) + }; + + format!("{}[{}]{}", + crate_name, + // Don't print the whole crate disambiguator. That's just + // annoying in debug output. + &(crate_disambiguator.as_str())[..4], + self.def_path(def_id).to_string_no_crate()) + } + + pub fn metadata_encoding_version(self) -> Vec { + self.cstore.metadata_encoding_version().to_vec() + } + + // Note that this is *untracked* and should only be used within the query + // system if the result is otherwise tracked through queries + pub fn crate_data_as_rc_any(self, cnum: CrateNum) -> Rc { + self.cstore.crate_data_as_rc_any(cnum) + } + + pub fn create_stable_hashing_context(self) -> StableHashingContext<'gcx> { + let krate = self.dep_graph.with_ignore(|| self.gcx.hir.krate()); + + StableHashingContext::new(self.sess, + krate, + self.hir.definitions(), + self.cstore) + } + + // This method makes sure that we have a DepNode and a Fingerprint for + // every upstream crate. It needs to be called once right after the tcx is + // created. + // With full-fledged red/green, the method will probably become unnecessary + // as this will be done on-demand. + pub fn allocate_metadata_dep_nodes(self) { + // We cannot use the query versions of crates() and crate_hash(), since + // those would need the DepNodes that we are allocating here. + for cnum in self.cstore.crates_untracked() { + let dep_node = DepNode::new(self, DepConstructor::CrateMetadata(cnum)); + let crate_hash = self.cstore.crate_hash_untracked(cnum); + self.dep_graph.with_task(dep_node, + self, + crate_hash, + |_, x| x // No transformation needed + ); + } + } + + // This method exercises the `in_scope_traits_map` query for all possible + // values so that we have their fingerprints available in the DepGraph. + // This is only required as long as we still use the old dependency tracking + // which needs to have the fingerprints of all input nodes beforehand. + pub fn precompute_in_scope_traits_hashes(self) { + for &def_index in self.trait_map.keys() { + self.in_scope_traits_map(def_index); + } + } +} + +impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { + pub fn encode_metadata(self, link_meta: &LinkMeta, reachable: &NodeSet) + -> (EncodedMetadata, EncodedMetadataHashes) + { + self.cstore.encode_metadata(self, link_meta, reachable) + } } impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> { @@ -1099,18 +1362,6 @@ pub trait Lift<'tcx> { fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option; } -impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> { - type Lifted = ty::ParamEnv<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option> { - self.caller_bounds.lift_to_tcx(tcx).and_then(|caller_bounds| { - Some(ty::ParamEnv { - reveal: self.reveal, - caller_bounds, - }) - }) - } -} - impl<'a, 'tcx> Lift<'tcx> for Ty<'a> { type Lifted = Ty<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option> { @@ -1126,13 +1377,10 @@ impl<'a, 'tcx> Lift<'tcx> for Ty<'a> { } } -impl<'a, 'tcx> Lift<'tcx> for &'a Substs<'a> { - type Lifted = &'tcx Substs<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Substs<'tcx>> { - if self.len() == 0 { - return Some(Slice::empty()); - } - if tcx.interners.arena.in_arena(&self[..] as *const _) { +impl<'a, 'tcx> Lift<'tcx> for Region<'a> { + type Lifted = Region<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option> { + if tcx.interners.arena.in_arena(*self as *const _) { return Some(unsafe { mem::transmute(*self) }); } // Also try in the global tcx if we're not that. @@ -1144,9 +1392,9 @@ impl<'a, 'tcx> Lift<'tcx> for &'a Substs<'a> { } } -impl<'a, 'tcx> Lift<'tcx> for Region<'a> { - type Lifted = Region<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option> { +impl<'a, 'tcx> Lift<'tcx> for &'a Const<'a> { + type Lifted = &'tcx Const<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Const<'tcx>> { if tcx.interners.arena.in_arena(*self as *const _) { return Some(unsafe { mem::transmute(*self) }); } @@ -1159,6 +1407,24 @@ impl<'a, 'tcx> Lift<'tcx> for Region<'a> { } } +impl<'a, 'tcx> Lift<'tcx> for &'a Substs<'a> { + type Lifted = &'tcx Substs<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Substs<'tcx>> { + if self.len() == 0 { + return Some(Slice::empty()); + } + if tcx.interners.arena.in_arena(&self[..] as *const _) { + return Some(unsafe { mem::transmute(*self) }); + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + impl<'a, 'tcx> Lift<'tcx> for &'a Slice> { type Lifted = &'tcx Slice>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) @@ -1364,7 +1630,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { pub fn print_debug_stats(self) { sty_debug_print!( self, - TyAdt, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr, + TyAdt, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr, TyGenerator, TyDynamic, TyClosure, TyTuple, TyParam, TyInfer, TyProjection, TyAnon); println!("Substs interner: #{}", self.interners.substs.borrow().len()); @@ -1446,6 +1712,12 @@ impl<'tcx: 'lcx, 'lcx> Borrow<[Predicate<'lcx>]> } } +impl<'tcx: 'lcx, 'lcx> Borrow> for Interned<'tcx, Const<'tcx>> { + fn borrow<'a>(&'a self) -> &'a Const<'lcx> { + &self.0 + } +} + macro_rules! intern_method { ($lt_tcx:tt, $name:ident: $method:ident($alloc:ty, $alloc_method:ident, @@ -1526,7 +1798,8 @@ direct_interners!('tcx, &ty::ReVar(_) | &ty::ReSkolemized(..) => true, _ => false } - }) -> RegionKind + }) -> RegionKind, + const_: mk_const(|c: &Const| keep_local(&c.ty) || keep_local(&c.val)) -> Const<'tcx> ); macro_rules! slice_interners { @@ -1643,8 +1916,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_imm_ptr(self.mk_nil()) } - pub fn mk_array(self, ty: Ty<'tcx>, n: usize) -> Ty<'tcx> { - self.mk_ty(TyArray(ty, n)) + pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> { + let n = ConstUsize::new(n, self.sess.target.usize_ty).unwrap(); + self.mk_array_const_usize(ty, n) + } + + pub fn mk_array_const_usize(self, ty: Ty<'tcx>, n: ConstUsize) -> Ty<'tcx> { + self.mk_ty(TyArray(ty, self.mk_const(ty::Const { + val: ConstVal::Integral(ConstInt::Usize(n)), + ty: self.types.usize + }))) } pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> { @@ -1719,6 +2000,14 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_ty(TyClosure(closure_id, closure_substs)) } + pub fn mk_generator(self, + id: DefId, + closure_substs: ClosureSubsts<'tcx>, + interior: GeneratorInterior<'tcx>) + -> Ty<'tcx> { + self.mk_ty(TyGenerator(id, closure_substs, interior)) + } + pub fn mk_var(self, v: TyVid) -> Ty<'tcx> { self.mk_infer(TyVar(v)) } @@ -1898,6 +2187,29 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let (level, src) = self.lint_level_at_node(lint, id); lint::struct_lint_level(self.sess, lint, level, src, None, msg) } + + pub fn in_scope_traits(self, id: HirId) -> Option>> { + self.in_scope_traits_map(id.owner) + .and_then(|map| map.get(&id.local_id).cloned()) + } + + pub fn named_region(self, id: HirId) -> Option { + self.named_region_map(id.owner) + .and_then(|map| map.get(&id.local_id).cloned()) + } + + pub fn is_late_bound(self, id: HirId) -> bool { + self.is_late_bound_map(id.owner) + .map(|set| set.contains(&id.local_id)) + .unwrap_or(false) + } + + pub fn object_lifetime_defaults(self, id: HirId) + -> Option>> + { + self.object_lifetime_defaults_map(id.owner) + .and_then(|map| map.get(&id.local_id).cloned()) + } } pub trait InternAs { @@ -1943,3 +2255,89 @@ impl InternIteratorElement for Result { Ok(f(&iter.collect::, _>>()?)) } } + +struct NamedRegionMap { + defs: FxHashMap>>, + late_bound: FxHashMap>>, + object_lifetime_defaults: + FxHashMap< + DefIndex, + Rc>>>, + >, +} + +pub fn provide(providers: &mut ty::maps::Providers) { + // FIXME(#44234) - almost all of these queries have no sub-queries and + // therefore no actual inputs, they're just reading tables calculated in + // resolve! Does this work? Unsure! That's what the issue is about + providers.in_scope_traits_map = |tcx, id| tcx.gcx.trait_map.get(&id).cloned(); + providers.module_exports = |tcx, id| tcx.gcx.export_map.get(&id).cloned(); + providers.named_region_map = |tcx, id| tcx.gcx.named_region_map.defs.get(&id).cloned(); + providers.is_late_bound_map = |tcx, id| tcx.gcx.named_region_map.late_bound.get(&id).cloned(); + providers.object_lifetime_defaults_map = |tcx, id| { + tcx.gcx.named_region_map.object_lifetime_defaults.get(&id).cloned() + }; + providers.crate_name = |tcx, id| { + assert_eq!(id, LOCAL_CRATE); + tcx.crate_name + }; + providers.get_lang_items = |tcx, id| { + assert_eq!(id, LOCAL_CRATE); + // FIXME(#42293) Right now we insert a `with_ignore` node in the dep + // graph here to ignore the fact that `get_lang_items` below depends on + // the entire crate. For now this'll prevent false positives of + // recompiling too much when anything changes. + // + // Once red/green incremental compilation lands we should be able to + // remove this because while the crate changes often the lint level map + // will change rarely. + tcx.dep_graph.with_ignore(|| Rc::new(middle::lang_items::collect(tcx))) + }; + providers.freevars = |tcx, id| tcx.gcx.freevars.get(&id).cloned(); + providers.maybe_unused_trait_import = |tcx, id| { + tcx.maybe_unused_trait_imports.contains(&id) + }; + providers.maybe_unused_extern_crates = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + Rc::new(tcx.maybe_unused_extern_crates.clone()) + }; + + providers.stability_index = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + Rc::new(stability::Index::new(tcx)) + }; + providers.lookup_stability = |tcx, id| { + assert_eq!(id.krate, LOCAL_CRATE); + let id = tcx.hir.definitions().def_index_to_hir_id(id.index); + tcx.stability().local_stability(id) + }; + providers.lookup_deprecation_entry = |tcx, id| { + assert_eq!(id.krate, LOCAL_CRATE); + let id = tcx.hir.definitions().def_index_to_hir_id(id.index); + tcx.stability().local_deprecation_entry(id) + }; + providers.extern_mod_stmt_cnum = |tcx, id| { + let id = tcx.hir.as_local_node_id(id).unwrap(); + tcx.cstore.extern_mod_stmt_cnum_untracked(id) + }; + providers.all_crate_nums = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + Rc::new(tcx.cstore.crates_untracked()) + }; + providers.postorder_cnums = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + Rc::new(tcx.cstore.postorder_cnums_untracked()) + }; + providers.output_filenames = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + tcx.output_filenames.clone() + }; + providers.has_copy_closures = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + tcx.sess.features.borrow().copy_closures + }; + providers.has_clone_closures = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + tcx.sess.features.borrow().clone_closures + }; +} diff --git a/src/librustc/ty/error.rs b/src/librustc/ty/error.rs index 3442cf0ef6..52a8389bd8 100644 --- a/src/librustc/ty/error.rs +++ b/src/librustc/ty/error.rs @@ -10,6 +10,7 @@ use hir::def_id::DefId; use infer::type_variable; +use middle::const_val::ConstVal; use ty::{self, BoundRegion, DefIdTree, Region, Ty, TyCtxt}; use std::fmt; @@ -18,6 +19,8 @@ use syntax::ast; use errors::DiagnosticBuilder; use syntax_pos::Span; +use rustc_const_math::ConstInt; + use hir; #[derive(Clone, Copy, Debug)] @@ -34,13 +37,13 @@ pub enum TypeError<'tcx> { AbiMismatch(ExpectedFound), Mutability, TupleSize(ExpectedFound), - FixedArraySize(ExpectedFound), + FixedArraySize(ExpectedFound), ArgCount, + RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>), - RegionsNotSame(Region<'tcx>, Region<'tcx>), - RegionsNoOverlap(Region<'tcx>, Region<'tcx>), - RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>, Option>), - RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>, Option>), + RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>), + RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>), + Sorts(ExpectedFound>), IntMismatch(ExpectedFound), FloatMismatch(ExpectedFound), @@ -110,19 +113,13 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { RegionsDoesNotOutlive(..) => { write!(f, "lifetime mismatch") } - RegionsNotSame(..) => { - write!(f, "lifetimes are not the same") - } - RegionsNoOverlap(..) => { - write!(f, "lifetimes do not intersect") - } - RegionsInsufficientlyPolymorphic(br, _, _) => { + RegionsInsufficientlyPolymorphic(br, _) => { write!(f, "expected bound lifetime parameter{}{}, found concrete lifetime", if br.is_named() { " " } else { "" }, br) } - RegionsOverlyPolymorphic(br, _, _) => { + RegionsOverlyPolymorphic(br, _) => { write!(f, "expected concrete lifetime, found bound lifetime parameter{}{}", if br.is_named() { " " } else { "" }, @@ -185,7 +182,13 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { ty::TyTuple(ref tys, _) if tys.is_empty() => self.to_string(), ty::TyAdt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)), - ty::TyArray(_, n) => format!("array of {} elements", n), + ty::TyArray(_, n) => { + if let ConstVal::Integral(ConstInt::Usize(n)) = n.val { + format!("array of {} elements", n) + } else { + "array".to_string() + } + } ty::TySlice(_) => "slice".to_string(), ty::TyRawPtr(_) => "*-ptr".to_string(), ty::TyRef(region, tymut) => { @@ -213,6 +216,7 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { |p| format!("trait {}", tcx.item_path_str(p.def_id()))) } ty::TyClosure(..) => "closure".to_string(), + ty::TyGenerator(..) => "generator".to_string(), ty::TyTuple(..) => "tuple".to_string(), ty::TyInfer(ty::TyVar(_)) => "inferred type".to_string(), ty::TyInfer(ty::IntVar(_)) => "integral variable".to_string(), @@ -242,33 +246,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { use self::TypeError::*; match err.clone() { - RegionsDoesNotOutlive(subregion, superregion) => { - self.note_and_explain_region(db, "", subregion, "..."); - self.note_and_explain_region(db, "...does not necessarily outlive ", - superregion, ""); - } - RegionsNotSame(region1, region2) => { - self.note_and_explain_region(db, "", region1, "..."); - self.note_and_explain_region(db, "...is not the same lifetime as ", - region2, ""); - } - RegionsNoOverlap(region1, region2) => { - self.note_and_explain_region(db, "", region1, "..."); - self.note_and_explain_region(db, "...does not overlap ", - region2, ""); - } - RegionsInsufficientlyPolymorphic(_, conc_region, _) => { - self.note_and_explain_region(db, "concrete lifetime that was found is ", - conc_region, ""); - } - RegionsOverlyPolymorphic(_, &ty::ReVar(_), _) => { - // don't bother to print out the message below for - // inference variables, it's not very illuminating. - } - RegionsOverlyPolymorphic(_, conc_region, _) => { - self.note_and_explain_region(db, "expected concrete lifetime is ", - conc_region, ""); - } Sorts(values) => { let expected_str = values.expected.sort_string(self); let found_str = values.found.sort_string(self); diff --git a/src/librustc/ty/fast_reject.rs b/src/librustc/ty/fast_reject.rs index 68f85ba7d3..490bfe78a9 100644 --- a/src/librustc/ty/fast_reject.rs +++ b/src/librustc/ty/fast_reject.rs @@ -9,28 +9,44 @@ // except according to those terms. use hir::def_id::DefId; -use ty::{self, Ty, TyCtxt}; +use ich::StableHashingContext; +use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult, + HashStable}; +use std::fmt::Debug; +use std::hash::Hash; +use std::mem; use syntax::ast; +use ty::{self, Ty, TyCtxt}; -use self::SimplifiedType::*; +use self::SimplifiedTypeGen::*; -/// See `simplify_type -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub enum SimplifiedType { +pub type SimplifiedType = SimplifiedTypeGen; + +/// See `simplify_type` +/// +/// Note that we keep this type generic over the type of identifier it uses +/// because we sometimes need to use SimplifiedTypeGen values as stable sorting +/// keys (in which case we use a DefPathHash as id-type) but in the general case +/// the non-stable but fast to construct DefId-version is the better choice. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum SimplifiedTypeGen + where D: Copy + Debug + Ord + Eq + Hash +{ BoolSimplifiedType, CharSimplifiedType, IntSimplifiedType(ast::IntTy), UintSimplifiedType(ast::UintTy), FloatSimplifiedType(ast::FloatTy), - AdtSimplifiedType(DefId), + AdtSimplifiedType(D), StrSimplifiedType, ArraySimplifiedType, PtrSimplifiedType, NeverSimplifiedType, TupleSimplifiedType(usize), - TraitSimplifiedType(DefId), - ClosureSimplifiedType(DefId), - AnonSimplifiedType(DefId), + TraitSimplifiedType(D), + ClosureSimplifiedType(D), + GeneratorSimplifiedType(D), + AnonSimplifiedType(D), FunctionSimplifiedType(usize), ParameterSimplifiedType, } @@ -72,6 +88,9 @@ pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::TyClosure(def_id, _) => { Some(ClosureSimplifiedType(def_id)) } + ty::TyGenerator(def_id, _, _) => { + Some(GeneratorSimplifiedType(def_id)) + } ty::TyNever => Some(NeverSimplifiedType), ty::TyTuple(ref tys, _) => { Some(TupleSimplifiedType(tys.len())) @@ -97,3 +116,62 @@ pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::TyInfer(_) | ty::TyError => None, } } + +impl SimplifiedTypeGen { + pub fn map_def(self, map: F) -> SimplifiedTypeGen + where F: Fn(D) -> U, + U: Copy + Debug + Ord + Eq + Hash, + { + match self { + BoolSimplifiedType => BoolSimplifiedType, + CharSimplifiedType => CharSimplifiedType, + IntSimplifiedType(t) => IntSimplifiedType(t), + UintSimplifiedType(t) => UintSimplifiedType(t), + FloatSimplifiedType(t) => FloatSimplifiedType(t), + AdtSimplifiedType(d) => AdtSimplifiedType(map(d)), + StrSimplifiedType => StrSimplifiedType, + ArraySimplifiedType => ArraySimplifiedType, + PtrSimplifiedType => PtrSimplifiedType, + NeverSimplifiedType => NeverSimplifiedType, + TupleSimplifiedType(n) => TupleSimplifiedType(n), + TraitSimplifiedType(d) => TraitSimplifiedType(map(d)), + ClosureSimplifiedType(d) => ClosureSimplifiedType(map(d)), + GeneratorSimplifiedType(d) => GeneratorSimplifiedType(map(d)), + AnonSimplifiedType(d) => AnonSimplifiedType(map(d)), + FunctionSimplifiedType(n) => FunctionSimplifiedType(n), + ParameterSimplifiedType => ParameterSimplifiedType, + } + } +} + +impl<'gcx, D> HashStable> for SimplifiedTypeGen + where D: Copy + Debug + Ord + Eq + Hash + + HashStable>, +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + BoolSimplifiedType | + CharSimplifiedType | + StrSimplifiedType | + ArraySimplifiedType | + PtrSimplifiedType | + NeverSimplifiedType | + ParameterSimplifiedType => { + // nothing to do + } + IntSimplifiedType(t) => t.hash_stable(hcx, hasher), + UintSimplifiedType(t) => t.hash_stable(hcx, hasher), + FloatSimplifiedType(t) => t.hash_stable(hcx, hasher), + AdtSimplifiedType(d) => d.hash_stable(hcx, hasher), + TupleSimplifiedType(n) => n.hash_stable(hcx, hasher), + TraitSimplifiedType(d) => d.hash_stable(hcx, hasher), + ClosureSimplifiedType(d) => d.hash_stable(hcx, hasher), + GeneratorSimplifiedType(d) => d.hash_stable(hcx, hasher), + AnonSimplifiedType(d) => d.hash_stable(hcx, hasher), + FunctionSimplifiedType(n) => n.hash_stable(hcx, hasher), + } + } +} diff --git a/src/librustc/ty/flags.rs b/src/librustc/ty/flags.rs index 31ed61a919..9ece719c76 100644 --- a/src/librustc/ty/flags.rs +++ b/src/librustc/ty/flags.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use middle::const_val::{ConstVal, ConstAggregate}; use ty::subst::Substs; use ty::{self, Ty, TypeFlags, TypeFoldable}; @@ -85,6 +86,13 @@ impl FlagComputation { } } + &ty::TyGenerator(_, ref substs, ref interior) => { + self.add_flags(TypeFlags::HAS_TY_CLOSURE); + self.add_flags(TypeFlags::HAS_LOCAL_NAMES); + self.add_substs(&substs.substs); + self.add_ty(interior.witness); + } + &ty::TyClosure(_, ref substs) => { self.add_flags(TypeFlags::HAS_TY_CLOSURE); self.add_flags(TypeFlags::HAS_LOCAL_NAMES); @@ -138,7 +146,12 @@ impl FlagComputation { self.add_region(r); } - &ty::TyArray(tt, _) | &ty::TySlice(tt) => { + &ty::TyArray(tt, len) => { + self.add_ty(tt); + self.add_const(len); + } + + &ty::TySlice(tt) => { self.add_ty(tt) } @@ -195,6 +208,40 @@ impl FlagComputation { } } + fn add_const(&mut self, constant: &ty::Const) { + self.add_ty(constant.ty); + match constant.val { + ConstVal::Integral(_) | + ConstVal::Float(_) | + ConstVal::Str(_) | + ConstVal::ByteStr(_) | + ConstVal::Bool(_) | + ConstVal::Char(_) | + ConstVal::Variant(_) => {} + ConstVal::Function(_, substs) => { + self.add_substs(substs); + } + ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { + for &(_, v) in fields { + self.add_const(v); + } + } + ConstVal::Aggregate(ConstAggregate::Tuple(fields)) | + ConstVal::Aggregate(ConstAggregate::Array(fields)) => { + for v in fields { + self.add_const(v); + } + } + ConstVal::Aggregate(ConstAggregate::Repeat(v, _)) => { + self.add_const(v); + } + ConstVal::Unevaluated(_, substs) => { + self.add_flags(TypeFlags::HAS_PROJECTION); + self.add_substs(substs); + } + } + } + fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection) { self.add_substs(projection.substs); self.add_ty(projection.ty); diff --git a/src/librustc/ty/fold.rs b/src/librustc/ty/fold.rs index a1cd92c760..543e8f3e2f 100644 --- a/src/librustc/ty/fold.rs +++ b/src/librustc/ty/fold.rs @@ -39,6 +39,7 @@ //! These methods return true to indicate that the visitor has found what it is looking for //! and does not need to visit anything else. +use middle::const_val::ConstVal; use ty::{self, Binder, Ty, TyCtxt, TypeFlags}; use std::fmt; @@ -67,7 +68,7 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { fn has_type_flags(&self, flags: TypeFlags) -> bool { self.visit_with(&mut HasTypeFlagsVisitor { flags: flags }) } - fn has_projection_types(&self) -> bool { + fn has_projections(&self) -> bool { self.has_type_flags(TypeFlags::HAS_PROJECTION) } fn references_error(&self) -> bool { @@ -139,6 +140,10 @@ pub trait TypeFolder<'gcx: 'tcx, 'tcx> : Sized { fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { r.super_fold_with(self) } + + fn fold_const(&mut self, c: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { + c.super_fold_with(self) + } } pub trait TypeVisitor<'tcx> : Sized { @@ -153,6 +158,10 @@ pub trait TypeVisitor<'tcx> : Sized { fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { r.super_visit_with(self) } + + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool { + c.super_visit_with(self) + } } /////////////////////////////////////////////////////////////////////////// @@ -603,6 +612,17 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { debug!("HasTypeFlagsVisitor: r={:?} r.flags={:?} self.flags={:?}", r, flags, self.flags); flags.intersects(self.flags) } + + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool { + if let ConstVal::Unevaluated(..) = c.val { + let projection_flags = TypeFlags::HAS_NORMALIZABLE_PROJECTION | + TypeFlags::HAS_PROJECTION; + if projection_flags.intersects(self.flags) { + return true; + } + } + c.super_visit_with(self) + } } /// Collects all the late-bound regions it finds into a hash set. diff --git a/src/librustc/ty/inhabitedness/mod.rs b/src/librustc/ty/inhabitedness/mod.rs index 900197f3db..a829814e09 100644 --- a/src/librustc/ty/inhabitedness/mod.rs +++ b/src/librustc/ty/inhabitedness/mod.rs @@ -205,7 +205,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { })) }, TyArray(ty, len) => { - if len == 0 { + if len.val.to_const_int().and_then(|i| i.to_u64()) == Some(0) { DefIdForest::empty() } else { ty.uninhabited_from(visited, tcx) diff --git a/src/librustc/ty/instance.rs b/src/librustc/ty/instance.rs index 7d543f689c..35ab1cec4c 100644 --- a/src/librustc/ty/instance.rs +++ b/src/librustc/ty/instance.rs @@ -9,7 +9,11 @@ // except according to those terms. use hir::def_id::DefId; -use ty::{self, Ty, TypeFoldable, Substs}; +use ty::{self, Ty, TypeFoldable, Substs, TyCtxt}; +use ty::subst::{Kind, Subst}; +use traits; +use syntax::abi::Abi; +use syntax::codemap::DUMMY_SP; use util::ppaux; use std::fmt; @@ -38,7 +42,7 @@ pub enum InstanceDef<'tcx> { /// drop_in_place::; None for empty drop glue. DropGlue(DefId, Option>), - /// Builtin method implementation, e.g. `Clone::clone`. + ///`::clone` shim. CloneShim(DefId, Ty<'tcx>), } @@ -57,12 +61,12 @@ impl<'tcx> InstanceDef<'tcx> { } #[inline] - pub fn def_ty<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { + pub fn def_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { tcx.type_of(self.def_id()) } #[inline] - pub fn attrs<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> ty::Attributes<'tcx> { + pub fn attrs<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ty::Attributes<'tcx> { tcx.get_attrs(self.def_id()) } } @@ -103,7 +107,7 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { Instance { def: InstanceDef::Item(def_id), substs: substs } } - pub fn mono(tcx: ty::TyCtxt<'a, 'tcx, 'b>, def_id: DefId) -> Instance<'tcx> { + pub fn mono(tcx: TyCtxt<'a, 'tcx, 'b>, def_id: DefId) -> Instance<'tcx> { Instance::new(def_id, tcx.global_tcx().empty_substs_for_def_id(def_id)) } @@ -111,4 +115,211 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { pub fn def_id(&self) -> DefId { self.def.def_id() } + + /// Resolve a (def_id, substs) pair to an (optional) instance -- most commonly, + /// this is used to find the precise code that will run for a trait method invocation, + /// if known. + /// + /// Returns `None` if we cannot resolve `Instance` to a specific instance. + /// For example, in a context like this, + /// + /// ``` + /// fn foo(t: T) { ... } + /// ``` + /// + /// trying to resolve `Debug::fmt` applied to `T` will yield `None`, because we do not + /// know what code ought to run. (Note that this setting is also affected by the + /// `RevealMode` in the parameter environment.) + /// + /// Presuming that coherence and type-check have succeeded, if this method is invoked + /// in a monomorphic context (i.e., like during trans), then it is guaranteed to return + /// `Some`. + pub fn resolve(tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>) -> Option> { + debug!("resolve(def_id={:?}, substs={:?})", def_id, substs); + let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) { + debug!(" => associated item, attempting to find impl"); + let item = tcx.associated_item(def_id); + resolve_associated_item(tcx, &item, param_env, trait_def_id, substs) + } else { + let ty = tcx.type_of(def_id); + let item_type = tcx.trans_apply_param_substs(substs, &ty); + + let def = match item_type.sty { + ty::TyFnDef(..) if { + let f = item_type.fn_sig(tcx); + f.abi() == Abi::RustIntrinsic || + f.abi() == Abi::PlatformIntrinsic + } => + { + debug!(" => intrinsic"); + ty::InstanceDef::Intrinsic(def_id) + } + _ => { + if Some(def_id) == tcx.lang_items().drop_in_place_fn() { + let ty = substs.type_at(0); + if ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All)) { + debug!(" => nontrivial drop glue"); + ty::InstanceDef::DropGlue(def_id, Some(ty)) + } else { + debug!(" => trivial drop glue"); + ty::InstanceDef::DropGlue(def_id, None) + } + } else { + debug!(" => free item"); + ty::InstanceDef::Item(def_id) + } + } + }; + Some(Instance { + def: def, + substs: substs + }) + }; + debug!("resolve(def_id={:?}, substs={:?}) = {:?}", def_id, substs, result); + result + } +} + +fn resolve_closure<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: ty::ClosureSubsts<'tcx>, + requested_kind: ty::ClosureKind) +-> Instance<'tcx> +{ + let actual_kind = tcx.closure_kind(def_id); + + match needs_fn_once_adapter_shim(actual_kind, requested_kind) { + Ok(true) => fn_once_adapter_instance(tcx, def_id, substs), + _ => Instance::new(def_id, substs.substs) + } +} + +fn resolve_associated_item<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + trait_item: &ty::AssociatedItem, + param_env: ty::ParamEnv<'tcx>, + trait_id: DefId, + rcvr_substs: &'tcx Substs<'tcx> + ) -> Option> { + let def_id = trait_item.def_id; + debug!("resolve_associated_item(trait_item={:?}, \ + trait_id={:?}, \ + rcvr_substs={:?})", + def_id, trait_id, rcvr_substs); + + let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs); + let vtbl = tcx.trans_fulfill_obligation(DUMMY_SP, param_env, ty::Binder(trait_ref)); + + // Now that we know which impl is being used, we can dispatch to + // the actual function: + match vtbl { + traits::VtableImpl(impl_data) => { + let (def_id, substs) = traits::find_associated_item( + tcx, trait_item, rcvr_substs, &impl_data); + let substs = tcx.erase_regions(&substs); + Some(ty::Instance::new(def_id, substs)) + } + traits::VtableGenerator(closure_data) => { + Some(Instance { + def: ty::InstanceDef::Item(closure_data.closure_def_id), + substs: closure_data.substs.substs + }) + } + traits::VtableClosure(closure_data) => { + let trait_closure_kind = tcx.lang_items().fn_trait_kind(trait_id).unwrap(); + Some(resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs, + trait_closure_kind)) + } + traits::VtableFnPointer(ref data) => { + Some(Instance { + def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty), + substs: rcvr_substs + }) + } + traits::VtableObject(ref data) => { + let index = tcx.get_vtable_index_of_object_method(data, def_id); + Some(Instance { + def: ty::InstanceDef::Virtual(def_id, index), + substs: rcvr_substs + }) + } + traits::VtableBuiltin(..) => { + if let Some(_) = tcx.lang_items().clone_trait() { + Some(Instance { + def: ty::InstanceDef::CloneShim(def_id, trait_ref.self_ty()), + substs: rcvr_substs + }) + } else { + None + } + } + traits::VtableDefaultImpl(..) | traits::VtableParam(..) => None + } +} + +fn needs_fn_once_adapter_shim<'a, 'tcx>(actual_closure_kind: ty::ClosureKind, + trait_closure_kind: ty::ClosureKind) + -> Result +{ + match (actual_closure_kind, trait_closure_kind) { + (ty::ClosureKind::Fn, ty::ClosureKind::Fn) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) | + (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => { + // No adapter needed. + Ok(false) + } + (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => { + // The closure fn `llfn` is a `fn(&self, ...)`. We want a + // `fn(&mut self, ...)`. In fact, at trans time, these are + // basically the same thing, so we can just return llfn. + Ok(false) + } + (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => { + // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut + // self, ...)`. We want a `fn(self, ...)`. We can produce + // this by doing something like: + // + // fn call_once(self, ...) { call_mut(&self, ...) } + // fn call_once(mut self, ...) { call_mut(&mut self, ...) } + // + // These are both the same at trans time. + Ok(true) + } + (ty::ClosureKind::FnMut, _) | + (ty::ClosureKind::FnOnce, _) => Err(()) + } +} + +fn fn_once_adapter_instance<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + closure_did: DefId, + substs: ty::ClosureSubsts<'tcx>, + ) -> Instance<'tcx> { + debug!("fn_once_adapter_shim({:?}, {:?})", + closure_did, + substs); + let fn_once = tcx.lang_items().fn_once_trait().unwrap(); + let call_once = tcx.associated_items(fn_once) + .find(|it| it.kind == ty::AssociatedKind::Method) + .unwrap().def_id; + let def = ty::InstanceDef::ClosureOnceShim { call_once }; + + let self_ty = tcx.mk_closure_from_closure_substs( + closure_did, substs); + + let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs); + let sig = tcx.erase_late_bound_regions_and_normalize(&sig); + assert_eq!(sig.inputs().len(), 1); + let substs = tcx.mk_substs([ + Kind::from(self_ty), + Kind::from(sig.inputs()[0]), + ].iter().cloned()); + + debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig); + Instance { def, substs } } diff --git a/src/librustc/ty/item_path.rs b/src/librustc/ty/item_path.rs index 5caf513981..a8ccb3e269 100644 --- a/src/librustc/ty/item_path.rs +++ b/src/librustc/ty/item_path.rs @@ -13,6 +13,7 @@ use hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use ty::{self, Ty, TyCtxt}; use syntax::ast; use syntax::symbol::Symbol; +use syntax::symbol::InternedString; use std::cell::Cell; @@ -128,9 +129,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn try_push_visible_item_path(self, buffer: &mut T, external_def_id: DefId) -> bool where T: ItemPathBuffer { - let visible_parent_map = self.sess.cstore.visible_parent_map(self.sess); + let visible_parent_map = self.visible_parent_map(LOCAL_CRATE); - let (mut cur_def, mut cur_path) = (external_def_id, Vec::::new()); + let (mut cur_def, mut cur_path) = (external_def_id, Vec::::new()); loop { // If `cur_def` is a direct or injected extern crate, push the path to the crate // followed by the path to the item within the crate and return. @@ -138,21 +139,21 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { match *self.extern_crate(cur_def) { Some(ref extern_crate) if extern_crate.direct => { self.push_item_path(buffer, extern_crate.def_id); - cur_path.iter().rev().map(|segment| buffer.push(&segment.as_str())).count(); + cur_path.iter().rev().map(|segment| buffer.push(&segment)).count(); return true; } None => { buffer.push(&self.crate_name(cur_def.krate).as_str()); - cur_path.iter().rev().map(|segment| buffer.push(&segment.as_str())).count(); + cur_path.iter().rev().map(|segment| buffer.push(&segment)).count(); return true; } _ => {}, } } - cur_path.push(self.sess.cstore.def_key(cur_def) + cur_path.push(self.def_key(cur_def) .disambiguated_data.data.get_opt_name().unwrap_or_else(|| - Symbol::intern(""))); + Symbol::intern("").as_str())); match visible_parent_map.get(&cur_def) { Some(&def) => cur_def = def, None => return false, @@ -194,7 +195,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { data @ DefPathData::Initializer | data @ DefPathData::MacroDef(..) | data @ DefPathData::ClosureExpr | - data @ DefPathData::Binding(..) | data @ DefPathData::ImplTrait | data @ DefPathData::Typeof | data @ DefPathData::GlobalMetaData(..) => { @@ -345,6 +345,7 @@ pub fn characteristic_def_id_of_type(ty: Ty) -> Option { ty::TyFnDef(def_id, _) | ty::TyClosure(def_id, _) => Some(def_id), + ty::TyGenerator(def_id, _, _) => Some(def_id), ty::TyBool | ty::TyChar | diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 4ee9b2e65a..1709f9ed2d 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -23,8 +23,13 @@ use std::cmp; use std::fmt; use std::i64; use std::iter; +use std::mem; use std::ops::Deref; +use ich::StableHashingContext; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, + StableHasherResult}; + /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout) /// for a target, which contains everything needed to compute layouts. pub struct TargetDataLayout { @@ -386,7 +391,7 @@ impl Integer { } } - pub fn to_ty<'a, 'tcx>(&self, tcx: &ty::TyCtxt<'a, 'tcx, 'tcx>, + pub fn to_ty<'a, 'tcx>(&self, tcx: &TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> { match (*self, signed) { (I1, false) => tcx.types.u8, @@ -837,12 +842,22 @@ impl<'a, 'tcx> Struct { // Is this a fixed-size array of something non-zero // with at least one element? - (_, &ty::TyArray(ety, d)) if d > 0 => { - Struct::non_zero_field_paths( - tcx, - param_env, - Some(ety).into_iter(), - None) + (_, &ty::TyArray(ety, mut count)) => { + if count.has_projections() { + count = tcx.normalize_associated_type_in_env(&count, param_env); + if count.has_projections() { + return Err(LayoutError::Unknown(ty)); + } + } + if count.val.to_const_int().unwrap().to_u64().unwrap() != 0 { + Struct::non_zero_field_paths( + tcx, + param_env, + Some(ety).into_iter(), + None) + } else { + Ok(None) + } } (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => { @@ -1174,12 +1189,17 @@ impl<'a, 'tcx> Layout { } // Arrays and slices. - ty::TyArray(element, count) => { + ty::TyArray(element, mut count) => { + if count.has_projections() { + count = tcx.normalize_associated_type_in_env(&count, param_env); + if count.has_projections() { + return Err(LayoutError::Unknown(ty)); + } + } + let element = element.layout(tcx, param_env)?; let element_size = element.size(dl); - // FIXME(eddyb) Don't use host `usize` for array lengths. - let usize_count: usize = count; - let count = usize_count as u64; + let count = count.val.to_const_int().unwrap().to_u64().unwrap(); if element_size.checked_mul(count, dl).is_none() { return Err(LayoutError::SizeOverflow(ty)); } @@ -1226,7 +1246,17 @@ impl<'a, 'tcx> Layout { Univariant { variant: unit, non_zero: false } } - // Tuples and closures. + // Tuples, generators and closures. + ty::TyGenerator(def_id, ref substs, _) => { + let tys = substs.field_tys(def_id, tcx); + let st = Struct::new(dl, + &tys.map(|ty| ty.layout(tcx, param_env)) + .collect::, _>>()?, + &ReprOptions::default(), + StructKind::AlwaysSizedUnivariant, ty)?; + Univariant { variant: st, non_zero: false } + } + ty::TyClosure(def_id, ref substs) => { let tys = substs.upvar_tys(def_id, tcx); let st = Struct::new(dl, @@ -1334,7 +1364,7 @@ impl<'a, 'tcx> Layout { } else { let st = Struct::new(dl, &fields, &def.repr, kind, ty)?; - let non_zero = Some(def.did) == tcx.lang_items.non_zero(); + let non_zero = Some(def.did) == tcx.lang_items().non_zero(); Univariant { variant: st, non_zero: non_zero } }; return success(layout); @@ -2033,7 +2063,7 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 { return Ok(SizeSkeleton::Pointer { non_zero: non_zero || - Some(def.did) == tcx.lang_items.non_zero(), + Some(def.did) == tcx.lang_items().non_zero(), tail, }); } else { @@ -2240,11 +2270,15 @@ impl<'a, 'tcx> TyLayout<'tcx> { ty::TySlice(element) => element, ty::TyStr => tcx.types.u8, - // Tuples and closures. + // Tuples, generators and closures. ty::TyClosure(def_id, ref substs) => { substs.upvar_tys(def_id, tcx).nth(i).unwrap() } + ty::TyGenerator(def_id, ref substs, _) => { + substs.field_tys(def_id, tcx).nth(i).unwrap() + } + ty::TyTuple(tys, _) => tys[i], // SIMD vector types. @@ -2271,3 +2305,128 @@ impl<'a, 'tcx> TyLayout<'tcx> { cx.layout_of(cx.normalize_projections(self.field_type(cx, i))) } } + +impl<'gcx> HashStable> for Layout +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + use ty::layout::Layout::*; + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + Scalar { value, non_zero } => { + value.hash_stable(hcx, hasher); + non_zero.hash_stable(hcx, hasher); + } + Vector { element, count } => { + element.hash_stable(hcx, hasher); + count.hash_stable(hcx, hasher); + } + Array { sized, align, primitive_align, element_size, count } => { + sized.hash_stable(hcx, hasher); + align.hash_stable(hcx, hasher); + primitive_align.hash_stable(hcx, hasher); + element_size.hash_stable(hcx, hasher); + count.hash_stable(hcx, hasher); + } + FatPointer { ref metadata, non_zero } => { + metadata.hash_stable(hcx, hasher); + non_zero.hash_stable(hcx, hasher); + } + CEnum { discr, signed, non_zero, min, max } => { + discr.hash_stable(hcx, hasher); + signed.hash_stable(hcx, hasher); + non_zero.hash_stable(hcx, hasher); + min.hash_stable(hcx, hasher); + max.hash_stable(hcx, hasher); + } + Univariant { ref variant, non_zero } => { + variant.hash_stable(hcx, hasher); + non_zero.hash_stable(hcx, hasher); + } + UntaggedUnion { ref variants } => { + variants.hash_stable(hcx, hasher); + } + General { discr, ref variants, size, align, primitive_align } => { + discr.hash_stable(hcx, hasher); + variants.hash_stable(hcx, hasher); + size.hash_stable(hcx, hasher); + align.hash_stable(hcx, hasher); + primitive_align.hash_stable(hcx, hasher); + } + RawNullablePointer { nndiscr, ref value } => { + nndiscr.hash_stable(hcx, hasher); + value.hash_stable(hcx, hasher); + } + StructWrappedNullablePointer { + nndiscr, + ref nonnull, + ref discrfield, + ref discrfield_source + } => { + nndiscr.hash_stable(hcx, hasher); + nonnull.hash_stable(hcx, hasher); + discrfield.hash_stable(hcx, hasher); + discrfield_source.hash_stable(hcx, hasher); + } + } + } +} + +impl_stable_hash_for!(enum ::ty::layout::Integer { + I1, + I8, + I16, + I32, + I64, + I128 +}); + +impl_stable_hash_for!(enum ::ty::layout::Primitive { + Int(integer), + F32, + F64, + Pointer +}); + +impl_stable_hash_for!(struct ::ty::layout::Align { + abi, + pref +}); + +impl_stable_hash_for!(struct ::ty::layout::Size { + raw +}); + +impl<'gcx> HashStable> for LayoutError<'gcx> +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + use ty::layout::LayoutError::*; + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + Unknown(t) | + SizeOverflow(t) => t.hash_stable(hcx, hasher) + } + } +} + +impl_stable_hash_for!(struct ::ty::layout::Struct { + align, + primitive_align, + packed, + sized, + offsets, + memory_index, + min_size +}); + +impl_stable_hash_for!(struct ::ty::layout::Union { + align, + primitive_align, + min_size, + packed +}); diff --git a/src/librustc/ty/maps.rs b/src/librustc/ty/maps.rs deleted file mode 100644 index a73202ced6..0000000000 --- a/src/librustc/ty/maps.rs +++ /dev/null @@ -1,1174 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dep_graph::{DepConstructor, DepNode, DepNodeIndex}; -use errors::{Diagnostic, DiagnosticBuilder}; -use hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; -use hir::def::Def; -use hir; -use lint; -use middle::const_val; -use middle::cstore::{ExternCrate, LinkagePreference}; -use middle::privacy::AccessLevels; -use middle::region::RegionMaps; -use mir; -use mir::transform::{MirSuite, MirPassIndex}; -use session::CompileResult; -use traits::specialization_graph; -use ty::{self, CrateInherentImpls, Ty, TyCtxt}; -use ty::layout::{Layout, LayoutError}; -use ty::item_path; -use ty::steal::Steal; -use ty::subst::Substs; -use ty::fast_reject::SimplifiedType; -use util::nodemap::{DefIdSet, NodeSet}; -use util::common::{profq_msg, ProfileQueriesMsg}; - -use rustc_data_structures::indexed_vec::IndexVec; -use rustc_data_structures::fx::FxHashMap; -use std::cell::{RefCell, RefMut, Cell}; -use std::fmt::Debug; -use std::hash::Hash; -use std::marker::PhantomData; -use std::mem; -use std::collections::BTreeMap; -use std::ops::Deref; -use std::rc::Rc; -use syntax_pos::{Span, DUMMY_SP}; -use syntax::attr; -use syntax::ast; -use syntax::symbol::Symbol; - -pub trait Key: Clone + Hash + Eq + Debug { - fn map_crate(&self) -> CrateNum; - fn default_span(&self, tcx: TyCtxt) -> Span; -} - -impl<'tcx> Key for ty::InstanceDef<'tcx> { - fn map_crate(&self) -> CrateNum { - LOCAL_CRATE - } - - fn default_span(&self, tcx: TyCtxt) -> Span { - tcx.def_span(self.def_id()) - } -} - -impl<'tcx> Key for ty::Instance<'tcx> { - fn map_crate(&self) -> CrateNum { - LOCAL_CRATE - } - - fn default_span(&self, tcx: TyCtxt) -> Span { - tcx.def_span(self.def_id()) - } -} - -impl Key for CrateNum { - fn map_crate(&self) -> CrateNum { - *self - } - fn default_span(&self, _: TyCtxt) -> Span { - DUMMY_SP - } -} - -impl Key for DefId { - fn map_crate(&self) -> CrateNum { - self.krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - tcx.def_span(*self) - } -} - -impl Key for (DefId, DefId) { - fn map_crate(&self) -> CrateNum { - self.0.krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.1.default_span(tcx) - } -} - -impl Key for (CrateNum, DefId) { - fn map_crate(&self) -> CrateNum { - self.0 - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.1.default_span(tcx) - } -} - -impl Key for (DefId, SimplifiedType) { - fn map_crate(&self) -> CrateNum { - self.0.krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.0.default_span(tcx) - } -} - -impl<'tcx> Key for (DefId, &'tcx Substs<'tcx>) { - fn map_crate(&self) -> CrateNum { - self.0.krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.0.default_span(tcx) - } -} - -impl Key for (MirSuite, DefId) { - fn map_crate(&self) -> CrateNum { - self.1.map_crate() - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.1.default_span(tcx) - } -} - -impl Key for (MirSuite, MirPassIndex, DefId) { - fn map_crate(&self) -> CrateNum { - self.2.map_crate() - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.2.default_span(tcx) - } -} - -impl<'tcx, T: Clone + Hash + Eq + Debug> Key for ty::ParamEnvAnd<'tcx, T> { - fn map_crate(&self) -> CrateNum { - LOCAL_CRATE - } - fn default_span(&self, _: TyCtxt) -> Span { - DUMMY_SP - } -} - -trait Value<'tcx>: Sized { - fn from_cycle_error<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self; -} - -impl<'tcx, T> Value<'tcx> for T { - default fn from_cycle_error<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> T { - tcx.sess.abort_if_errors(); - bug!("Value::from_cycle_error called without errors"); - } -} - -impl<'tcx, T: Default> Value<'tcx> for T { - default fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> T { - T::default() - } -} - -impl<'tcx> Value<'tcx> for Ty<'tcx> { - fn from_cycle_error<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { - tcx.types.err - } -} - -impl<'tcx> Value<'tcx> for ty::DtorckConstraint<'tcx> { - fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> Self { - Self::empty() - } -} - -impl<'tcx> Value<'tcx> for ty::SymbolName { - fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> Self { - ty::SymbolName { name: Symbol::intern("").as_str() } - } -} - -struct QueryMap { - phantom: PhantomData, - map: FxHashMap>, -} - -struct QueryValue { - value: T, - index: DepNodeIndex, - diagnostics: Option>, -} - -struct QueryDiagnostics { - diagnostics: Vec, - emitted_diagnostics: Cell, -} - -impl QueryMap { - fn new() -> QueryMap { - QueryMap { - phantom: PhantomData, - map: FxHashMap(), - } - } -} - -struct CycleError<'a, 'tcx: 'a> { - span: Span, - cycle: RefMut<'a, [(Span, Query<'tcx>)]>, -} - -impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - fn report_cycle(self, CycleError { span, cycle }: CycleError) - -> DiagnosticBuilder<'a> - { - // Subtle: release the refcell lock before invoking `describe()` - // below by dropping `cycle`. - let stack = cycle.to_vec(); - mem::drop(cycle); - - assert!(!stack.is_empty()); - - // Disable naming impls with types in this path, since that - // sometimes cycles itself, leading to extra cycle errors. - // (And cycle errors around impls tend to occur during the - // collect/coherence phases anyhow.) - item_path::with_forced_impl_filename_line(|| { - let mut err = - struct_span_err!(self.sess, span, E0391, - "unsupported cyclic reference between types/traits detected"); - err.span_label(span, "cyclic reference"); - - err.span_note(stack[0].0, &format!("the cycle begins when {}...", - stack[0].1.describe(self))); - - for &(span, ref query) in &stack[1..] { - err.span_note(span, &format!("...which then requires {}...", - query.describe(self))); - } - - err.note(&format!("...which then again requires {}, completing the cycle.", - stack[0].1.describe(self))); - - return err - }) - } - - fn cycle_check(self, span: Span, query: Query<'gcx>, compute: F) - -> Result> - where F: FnOnce() -> R - { - { - let mut stack = self.maps.query_stack.borrow_mut(); - if let Some((i, _)) = stack.iter().enumerate().rev() - .find(|&(_, &(_, ref q))| *q == query) { - return Err(CycleError { - span, - cycle: RefMut::map(stack, |stack| &mut stack[i..]) - }); - } - stack.push((span, query)); - } - - let result = compute(); - - self.maps.query_stack.borrow_mut().pop(); - - Ok(result) - } -} - -pub trait QueryConfig { - type Key: Eq + Hash + Clone; - type Value; -} - -trait QueryDescription: QueryConfig { - fn describe(tcx: TyCtxt, key: Self::Key) -> String; -} - -impl> QueryDescription for M { - default fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("processing `{}`", tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription for queries::is_copy_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing whether `{}` is `Copy`", env.value) - } -} - -impl<'tcx> QueryDescription for queries::is_sized_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing whether `{}` is `Sized`", env.value) - } -} - -impl<'tcx> QueryDescription for queries::is_freeze_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing whether `{}` is freeze", env.value) - } -} - -impl<'tcx> QueryDescription for queries::needs_drop_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing whether `{}` needs drop", env.value) - } -} - -impl<'tcx> QueryDescription for queries::layout_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing layout of `{}`", env.value) - } -} - -impl<'tcx> QueryDescription for queries::super_predicates_of<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("computing the supertraits of `{}`", - tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription for queries::type_param_predicates<'tcx> { - fn describe(tcx: TyCtxt, (_, def_id): (DefId, DefId)) -> String { - let id = tcx.hir.as_local_node_id(def_id).unwrap(); - format!("computing the bounds for type parameter `{}`", - tcx.hir.ty_param_name(id)) - } -} - -impl<'tcx> QueryDescription for queries::coherent_trait<'tcx> { - fn describe(tcx: TyCtxt, (_, def_id): (CrateNum, DefId)) -> String { - format!("coherence checking all impls of trait `{}`", - tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription for queries::crate_inherent_impls<'tcx> { - fn describe(_: TyCtxt, k: CrateNum) -> String { - format!("all inherent impls defined in crate `{:?}`", k) - } -} - -impl<'tcx> QueryDescription for queries::crate_inherent_impls_overlap_check<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("check for overlap between inherent impls defined in this crate") - } -} - -impl<'tcx> QueryDescription for queries::crate_variances<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("computing the variances for items in this crate") - } -} - -impl<'tcx> QueryDescription for queries::mir_shims<'tcx> { - fn describe(tcx: TyCtxt, def: ty::InstanceDef<'tcx>) -> String { - format!("generating MIR shim for `{}`", - tcx.item_path_str(def.def_id())) - } -} - -impl<'tcx> QueryDescription for queries::privacy_access_levels<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("privacy access levels") - } -} - -impl<'tcx> QueryDescription for queries::typeck_item_bodies<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("type-checking all item bodies") - } -} - -impl<'tcx> QueryDescription for queries::reachable_set<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("reachability") - } -} - -impl<'tcx> QueryDescription for queries::const_eval<'tcx> { - fn describe(tcx: TyCtxt, key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) -> String { - format!("const-evaluating `{}`", tcx.item_path_str(key.value.0)) - } -} - -impl<'tcx> QueryDescription for queries::mir_keys<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("getting a list of all mir_keys") - } -} - -impl<'tcx> QueryDescription for queries::symbol_name<'tcx> { - fn describe(_tcx: TyCtxt, instance: ty::Instance<'tcx>) -> String { - format!("computing the symbol for `{}`", instance) - } -} - -impl<'tcx> QueryDescription for queries::describe_def<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("describe_def") - } -} - -impl<'tcx> QueryDescription for queries::def_span<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("def_span") - } -} - - -impl<'tcx> QueryDescription for queries::stability<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("stability") - } -} - -impl<'tcx> QueryDescription for queries::deprecation<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("deprecation") - } -} - -impl<'tcx> QueryDescription for queries::item_attrs<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("item_attrs") - } -} - -impl<'tcx> QueryDescription for queries::is_exported_symbol<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("is_exported_symbol") - } -} - -impl<'tcx> QueryDescription for queries::fn_arg_names<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("fn_arg_names") - } -} - -impl<'tcx> QueryDescription for queries::impl_parent<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("impl_parent") - } -} - -impl<'tcx> QueryDescription for queries::trait_of_item<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("trait_of_item") - } -} - -impl<'tcx> QueryDescription for queries::item_body_nested_bodies<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("nested item bodies of `{}`", tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription for queries::const_is_rvalue_promotable_to_static<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("const checking if rvalue is promotable to static `{}`", - tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription for queries::is_mir_available<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("checking if item is mir available: `{}`", - tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription for queries::trait_impls_of<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("trait impls of `{}`", tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription for queries::is_object_safe<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("determine object safety of trait `{}`", tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription for queries::is_const_fn<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("checking if item is const fn: `{}`", tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription for queries::dylib_dependency_formats<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - "dylib dependency formats of crate".to_string() - } -} - -impl<'tcx> QueryDescription for queries::is_allocator<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - "checking if the crate is_allocator".to_string() - } -} - -impl<'tcx> QueryDescription for queries::is_panic_runtime<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - "checking if the crate is_panic_runtime".to_string() - } -} - -impl<'tcx> QueryDescription for queries::is_compiler_builtins<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - "checking if the crate is_compiler_builtins".to_string() - } -} - -impl<'tcx> QueryDescription for queries::has_global_allocator<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - "checking if the crate has_global_allocator".to_string() - } -} - -impl<'tcx> QueryDescription for queries::extern_crate<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - "getting crate's ExternCrateData".to_string() - } -} - -impl<'tcx> QueryDescription for queries::lint_levels<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("computing the lint levels for items in this crate") - } -} - -// If enabled, send a message to the profile-queries thread -macro_rules! profq_msg { - ($tcx:expr, $msg:expr) => { - if cfg!(debug_assertions) { - if $tcx.sess.profile_queries() { - profq_msg($msg) - } - } - } -} - -// If enabled, format a key using its debug string, which can be -// expensive to compute (in terms of time). -macro_rules! profq_key { - ($tcx:expr, $key:expr) => { - if cfg!(debug_assertions) { - if $tcx.sess.profile_queries_and_keys() { - Some(format!("{:?}", $key)) - } else { None } - } else { None } - } -} - -macro_rules! define_maps { - (<$tcx:tt> - $($(#[$attr:meta])* - [$($modifiers:tt)*] $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { - define_map_struct! { - tcx: $tcx, - input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) - } - - impl<$tcx> Maps<$tcx> { - pub fn new(providers: IndexVec>) - -> Self { - Maps { - providers, - query_stack: RefCell::new(vec![]), - $($name: RefCell::new(QueryMap::new())),* - } - } - } - - #[allow(bad_style)] - #[derive(Copy, Clone, Debug, PartialEq, Eq)] - pub enum Query<$tcx> { - $($(#[$attr])* $name($K)),* - } - - #[allow(bad_style)] - #[derive(Clone, Debug, PartialEq, Eq)] - pub enum QueryMsg { - $($name(Option)),* - } - - impl<$tcx> Query<$tcx> { - pub fn describe(&self, tcx: TyCtxt) -> String { - match *self { - $(Query::$name(key) => queries::$name::describe(tcx, key)),* - } - } - } - - pub mod queries { - use std::marker::PhantomData; - - $(#[allow(bad_style)] - pub struct $name<$tcx> { - data: PhantomData<&$tcx ()> - })* - } - - $(impl<$tcx> QueryConfig for queries::$name<$tcx> { - type Key = $K; - type Value = $V; - } - - impl<'a, $tcx, 'lcx> queries::$name<$tcx> { - #[allow(unused)] - fn to_dep_node(tcx: TyCtxt<'a, $tcx, 'lcx>, key: &$K) -> DepNode { - use dep_graph::DepConstructor::*; - - DepNode::new(tcx, $node(*key)) - } - - fn try_get_with(tcx: TyCtxt<'a, $tcx, 'lcx>, - mut span: Span, - key: $K, - f: F) - -> Result> - where F: FnOnce(&$V) -> R - { - debug!("ty::queries::{}::try_get_with(key={:?}, span={:?})", - stringify!($name), - key, - span); - - profq_msg!(tcx, - ProfileQueriesMsg::QueryBegin( - span.clone(), - QueryMsg::$name(profq_key!(tcx, key)) - ) - ); - - if let Some(value) = tcx.maps.$name.borrow().map.get(&key) { - if let Some(ref d) = value.diagnostics { - if !d.emitted_diagnostics.get() { - d.emitted_diagnostics.set(true); - let handle = tcx.sess.diagnostic(); - for diagnostic in d.diagnostics.iter() { - DiagnosticBuilder::new_diagnostic(handle, diagnostic.clone()) - .emit(); - } - } - } - profq_msg!(tcx, ProfileQueriesMsg::CacheHit); - tcx.dep_graph.read_index(value.index); - return Ok(f(&value.value)); - } - // else, we are going to run the provider: - profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin); - - // FIXME(eddyb) Get more valid Span's on queries. - // def_span guard is necessary to prevent a recursive loop, - // default_span calls def_span query internally. - if span == DUMMY_SP && stringify!($name) != "def_span" { - span = key.default_span(tcx) - } - - let res = tcx.cycle_check(span, Query::$name(key), || { - let dep_node = Self::to_dep_node(tcx, &key); - - tcx.sess.diagnostic().track_diagnostics(|| { - if dep_node.kind.is_anon() { - tcx.dep_graph.with_anon_task(dep_node.kind, || { - let provider = tcx.maps.providers[key.map_crate()].$name; - provider(tcx.global_tcx(), key) - }) - } else { - fn run_provider<'a, 'tcx, 'lcx>(tcx: TyCtxt<'a, 'tcx, 'lcx>, - key: $K) - -> $V { - let provider = tcx.maps.providers[key.map_crate()].$name; - provider(tcx.global_tcx(), key) - } - - tcx.dep_graph.with_task(dep_node, tcx, key, run_provider) - } - }) - })?; - profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd); - let ((result, dep_node_index), diagnostics) = res; - - tcx.dep_graph.read_index(dep_node_index); - - let value = QueryValue { - value: result, - index: dep_node_index, - diagnostics: if diagnostics.len() == 0 { - None - } else { - Some(Box::new(QueryDiagnostics { - diagnostics, - emitted_diagnostics: Cell::new(true), - })) - }, - }; - - Ok(f(&tcx.maps - .$name - .borrow_mut() - .map - .entry(key) - .or_insert(value) - .value)) - } - - pub fn try_get(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K) - -> Result<$V, DiagnosticBuilder<'a>> { - match Self::try_get_with(tcx, span, key, Clone::clone) { - Ok(e) => Ok(e), - Err(e) => Err(tcx.report_cycle(e)), - } - } - - pub fn force(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K) { - // Ignore dependencies, since we not reading the computed value - let _task = tcx.dep_graph.in_ignore(); - - match Self::try_get_with(tcx, span, key, |_| ()) { - Ok(()) => {} - Err(e) => tcx.report_cycle(e).emit(), - } - } - })* - - #[derive(Copy, Clone)] - pub struct TyCtxtAt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { - pub tcx: TyCtxt<'a, 'gcx, 'tcx>, - pub span: Span, - } - - impl<'a, 'gcx, 'tcx> Deref for TyCtxtAt<'a, 'gcx, 'tcx> { - type Target = TyCtxt<'a, 'gcx, 'tcx>; - fn deref(&self) -> &Self::Target { - &self.tcx - } - } - - impl<'a, $tcx, 'lcx> TyCtxt<'a, $tcx, 'lcx> { - /// Return a transparent wrapper for `TyCtxt` which uses - /// `span` as the location of queries performed through it. - pub fn at(self, span: Span) -> TyCtxtAt<'a, $tcx, 'lcx> { - TyCtxtAt { - tcx: self, - span - } - } - - $($(#[$attr])* - pub fn $name(self, key: $K) -> $V { - self.at(DUMMY_SP).$name(key) - })* - } - - impl<'a, $tcx, 'lcx> TyCtxtAt<'a, $tcx, 'lcx> { - $($(#[$attr])* - pub fn $name(self, key: $K) -> $V { - queries::$name::try_get(self.tcx, self.span, key).unwrap_or_else(|mut e| { - e.emit(); - Value::from_cycle_error(self.global_tcx()) - }) - })* - } - - define_provider_struct! { - tcx: $tcx, - input: ($(([$($modifiers)*] [$name] [$K] [$V]))*), - output: () - } - - impl<$tcx> Copy for Providers<$tcx> {} - impl<$tcx> Clone for Providers<$tcx> { - fn clone(&self) -> Self { *self } - } - } -} - -macro_rules! define_map_struct { - // Initial state - (tcx: $tcx:tt, - input: $input:tt) => { - define_map_struct! { - tcx: $tcx, - input: $input, - output: () - } - }; - - // Final output - (tcx: $tcx:tt, - input: (), - output: ($($output:tt)*)) => { - pub struct Maps<$tcx> { - providers: IndexVec>, - query_stack: RefCell)>>, - $($output)* - } - }; - - // Field recognized and ready to shift into the output - (tcx: $tcx:tt, - ready: ([$($pub:tt)*] [$($attr:tt)*] [$name:ident]), - input: $input:tt, - output: ($($output:tt)*)) => { - define_map_struct! { - tcx: $tcx, - input: $input, - output: ($($output)* - $(#[$attr])* $($pub)* $name: RefCell>>,) - } - }; - - // No modifiers left? This is a private item. - (tcx: $tcx:tt, - input: (([] $attrs:tt $name:tt) $($input:tt)*), - output: $output:tt) => { - define_map_struct! { - tcx: $tcx, - ready: ([] $attrs $name), - input: ($($input)*), - output: $output - } - }; - - // Skip other modifiers - (tcx: $tcx:tt, - input: (([$other_modifier:tt $($modifiers:tt)*] $($fields:tt)*) $($input:tt)*), - output: $output:tt) => { - define_map_struct! { - tcx: $tcx, - input: (([$($modifiers)*] $($fields)*) $($input)*), - output: $output - } - }; -} - -macro_rules! define_provider_struct { - // Initial state: - (tcx: $tcx:tt, input: $input:tt) => { - define_provider_struct! { - tcx: $tcx, - input: $input, - output: () - } - }; - - // Final state: - (tcx: $tcx:tt, - input: (), - output: ($(([$name:ident] [$K:ty] [$R:ty]))*)) => { - pub struct Providers<$tcx> { - $(pub $name: for<'a> fn(TyCtxt<'a, $tcx, $tcx>, $K) -> $R,)* - } - - impl<$tcx> Default for Providers<$tcx> { - fn default() -> Self { - $(fn $name<'a, $tcx>(_: TyCtxt<'a, $tcx, $tcx>, key: $K) -> $R { - bug!("tcx.maps.{}({:?}) unsupported by its crate", - stringify!($name), key); - })* - Providers { $($name),* } - } - } - }; - - // Something ready to shift: - (tcx: $tcx:tt, - ready: ($name:tt $K:tt $V:tt), - input: $input:tt, - output: ($($output:tt)*)) => { - define_provider_struct! { - tcx: $tcx, - input: $input, - output: ($($output)* ($name $K $V)) - } - }; - - // Regular queries produce a `V` only. - (tcx: $tcx:tt, - input: (([] $name:tt $K:tt $V:tt) $($input:tt)*), - output: $output:tt) => { - define_provider_struct! { - tcx: $tcx, - ready: ($name $K $V), - input: ($($input)*), - output: $output - } - }; - - // Skip modifiers. - (tcx: $tcx:tt, - input: (([$other_modifier:tt $($modifiers:tt)*] $($fields:tt)*) $($input:tt)*), - output: $output:tt) => { - define_provider_struct! { - tcx: $tcx, - input: (([$($modifiers)*] $($fields)*) $($input)*), - output: $output - } - }; -} - -// Each of these maps also corresponds to a method on a -// `Provider` trait for requesting a value of that type, -// and a method on `Maps` itself for doing that in a -// a way that memoizes and does dep-graph tracking, -// wrapping around the actual chain of providers that -// the driver creates (using several `rustc_*` crates). -define_maps! { <'tcx> - /// Records the type of every item. - [] type_of: TypeOfItem(DefId) -> Ty<'tcx>, - - /// Maps from the def-id of an item (trait/struct/enum/fn) to its - /// associated generics and predicates. - [] generics_of: GenericsOfItem(DefId) -> &'tcx ty::Generics, - [] predicates_of: PredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, - - /// Maps from the def-id of a trait to the list of - /// super-predicates. This is a subset of the full list of - /// predicates. We store these in a separate map because we must - /// evaluate them even during type conversion, often before the - /// full predicates are available (note that supertraits have - /// additional acyclicity requirements). - [] super_predicates_of: SuperPredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, - - /// To avoid cycles within the predicates of a single item we compute - /// per-type-parameter predicates for resolving `T::AssocTy`. - [] type_param_predicates: type_param_predicates((DefId, DefId)) - -> ty::GenericPredicates<'tcx>, - - [] trait_def: TraitDefOfItem(DefId) -> &'tcx ty::TraitDef, - [] adt_def: AdtDefOfItem(DefId) -> &'tcx ty::AdtDef, - [] adt_destructor: AdtDestructor(DefId) -> Option, - [] adt_sized_constraint: SizedConstraint(DefId) -> &'tcx [Ty<'tcx>], - [] adt_dtorck_constraint: DtorckConstraint(DefId) -> ty::DtorckConstraint<'tcx>, - - /// True if this is a const fn - [] is_const_fn: IsConstFn(DefId) -> bool, - - /// True if this is a foreign item (i.e., linked via `extern { ... }`). - [] is_foreign_item: IsForeignItem(DefId) -> bool, - - /// True if this is a default impl (aka impl Foo for ..) - [] is_default_impl: IsDefaultImpl(DefId) -> bool, - - /// Get a map with the variance of every item; use `item_variance` - /// instead. - [] crate_variances: crate_variances(CrateNum) -> Rc, - - /// Maps from def-id of a type or region parameter to its - /// (inferred) variance. - [] variances_of: ItemVariances(DefId) -> Rc>, - - /// Maps from an impl/trait def-id to a list of the def-ids of its items - [] associated_item_def_ids: AssociatedItemDefIds(DefId) -> Rc>, - - /// Maps from a trait item to the trait item "descriptor" - [] associated_item: AssociatedItems(DefId) -> ty::AssociatedItem, - - [] impl_trait_ref: ImplTraitRef(DefId) -> Option>, - [] impl_polarity: ImplPolarity(DefId) -> hir::ImplPolarity, - - /// Maps a DefId of a type to a list of its inherent impls. - /// Contains implementations of methods that are inherent to a type. - /// Methods in these implementations don't need to be exported. - [] inherent_impls: InherentImpls(DefId) -> Rc>, - - /// Set of all the def-ids in this crate that have MIR associated with - /// them. This includes all the body owners, but also things like struct - /// constructors. - [] mir_keys: mir_keys(CrateNum) -> Rc, - - /// Maps DefId's that have an associated Mir to the result - /// of the MIR qualify_consts pass. The actual meaning of - /// the value isn't known except to the pass itself. - [] mir_const_qualif: MirConstQualif(DefId) -> u8, - - /// Fetch the MIR for a given def-id up till the point where it is - /// ready for const evaluation. - /// - /// See the README for the `mir` module for details. - [] mir_const: MirConst(DefId) -> &'tcx Steal>, - - [] mir_validated: MirValidated(DefId) -> &'tcx Steal>, - - /// MIR after our optimization passes have run. This is MIR that is ready - /// for trans. This is also the only query that can fetch non-local MIR, at present. - [] optimized_mir: MirOptimized(DefId) -> &'tcx mir::Mir<'tcx>, - - /// Type of each closure. The def ID is the ID of the - /// expression defining the closure. - [] closure_kind: ClosureKind(DefId) -> ty::ClosureKind, - - /// The signature of functions and closures. - [] fn_sig: FnSignature(DefId) -> ty::PolyFnSig<'tcx>, - - /// Caches CoerceUnsized kinds for impls on custom types. - [] coerce_unsized_info: CoerceUnsizedInfo(DefId) - -> ty::adjustment::CoerceUnsizedInfo, - - [] typeck_item_bodies: typeck_item_bodies_dep_node(CrateNum) -> CompileResult, - - [] typeck_tables_of: TypeckTables(DefId) -> &'tcx ty::TypeckTables<'tcx>, - - [] has_typeck_tables: HasTypeckTables(DefId) -> bool, - - [] coherent_trait: coherent_trait_dep_node((CrateNum, DefId)) -> (), - - [] borrowck: BorrowCheck(DefId) -> (), - // FIXME: shouldn't this return a `Result<(), BorrowckErrors>` instead? - [] mir_borrowck: MirBorrowCheck(DefId) -> (), - - /// Gets a complete map from all types to their inherent impls. - /// Not meant to be used directly outside of coherence. - /// (Defined only for LOCAL_CRATE) - [] crate_inherent_impls: crate_inherent_impls_dep_node(CrateNum) -> CrateInherentImpls, - - /// Checks all types in the krate for overlap in their inherent impls. Reports errors. - /// Not meant to be used directly outside of coherence. - /// (Defined only for LOCAL_CRATE) - [] crate_inherent_impls_overlap_check: inherent_impls_overlap_check_dep_node(CrateNum) -> (), - - /// Results of evaluating const items or constants embedded in - /// other items (such as enum variant explicit discriminants). - [] const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) - -> const_val::EvalResult<'tcx>, - - /// Performs the privacy check and computes "access levels". - [] privacy_access_levels: PrivacyAccessLevels(CrateNum) -> Rc, - - [] reachable_set: reachability_dep_node(CrateNum) -> Rc, - - /// Per-function `RegionMaps`. The `DefId` should be the owner-def-id for the fn body; - /// in the case of closures or "inline" expressions, this will be redirected to the enclosing - /// fn item. - [] region_maps: RegionMaps(DefId) -> Rc, - - [] mir_shims: mir_shim_dep_node(ty::InstanceDef<'tcx>) -> &'tcx mir::Mir<'tcx>, - - [] def_symbol_name: SymbolName(DefId) -> ty::SymbolName, - [] symbol_name: symbol_name_dep_node(ty::Instance<'tcx>) -> ty::SymbolName, - - [] describe_def: DescribeDef(DefId) -> Option, - [] def_span: DefSpan(DefId) -> Span, - [] stability: Stability(DefId) -> Option, - [] deprecation: Deprecation(DefId) -> Option, - [] item_attrs: ItemAttrs(DefId) -> Rc<[ast::Attribute]>, - [] fn_arg_names: FnArgNames(DefId) -> Vec, - [] impl_parent: ImplParent(DefId) -> Option, - [] trait_of_item: TraitOfItem(DefId) -> Option, - [] is_exported_symbol: IsExportedSymbol(DefId) -> bool, - [] item_body_nested_bodies: ItemBodyNestedBodies(DefId) -> Rc>, - [] const_is_rvalue_promotable_to_static: ConstIsRvaluePromotableToStatic(DefId) -> bool, - [] is_mir_available: IsMirAvailable(DefId) -> bool, - - [] trait_impls_of: TraitImpls(DefId) -> Rc, - [] specialization_graph_of: SpecializationGraph(DefId) -> Rc, - [] is_object_safe: ObjectSafety(DefId) -> bool, - - // Get the ParameterEnvironment for a given item; this environment - // will be in "user-facing" mode, meaning that it is suitabe for - // type-checking etc, and it does not normalize specializable - // associated types. This is almost always what you want, - // unless you are doing MIR optimizations, in which case you - // might want to use `reveal_all()` method to change modes. - [] param_env: ParamEnv(DefId) -> ty::ParamEnv<'tcx>, - - // Trait selection queries. These are best used by invoking `ty.moves_by_default()`, - // `ty.is_copy()`, etc, since that will prune the environment where possible. - [] is_copy_raw: is_copy_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] is_sized_raw: is_sized_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx Layout, LayoutError<'tcx>>, - - [] dylib_dependency_formats: DylibDepFormats(DefId) - -> Rc>, - - [] is_allocator: IsAllocator(DefId) -> bool, - [] is_panic_runtime: IsPanicRuntime(DefId) -> bool, - [] is_compiler_builtins: IsCompilerBuiltins(DefId) -> bool, - [] has_global_allocator: HasGlobalAllocator(DefId) -> bool, - - [] extern_crate: ExternCrate(DefId) -> Rc>, - - [] lint_levels: lint_levels(CrateNum) -> Rc, -} - -fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> { - DepConstructor::TypeParamPredicates { - item_id, - param_id - } -} - -fn coherent_trait_dep_node<'tcx>((_, def_id): (CrateNum, DefId)) -> DepConstructor<'tcx> { - DepConstructor::CoherenceCheckTrait(def_id) -} - -fn crate_inherent_impls_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::Coherence -} - -fn inherent_impls_overlap_check_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::CoherenceInherentImplOverlapCheck -} - -fn reachability_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::Reachability -} - -fn mir_shim_dep_node<'tcx>(instance_def: ty::InstanceDef<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::MirShim { - instance_def - } -} - -fn symbol_name_dep_node<'tcx>(instance: ty::Instance<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::InstanceSymbolName { instance } -} - -fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::TypeckBodiesKrate -} - -fn const_eval_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) - -> DepConstructor<'tcx> { - DepConstructor::ConstEval -} - -fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::MirKeys -} - -fn crate_variances<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::CrateVariances -} - -fn is_copy_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::IsCopy -} - -fn is_sized_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::IsSized -} - -fn is_freeze_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::IsFreeze -} - -fn needs_drop_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::NeedsDrop -} - -fn layout_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::Layout -} - -fn lint_levels<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::LintLevels -} diff --git a/src/librustc/ty/maps/README.md b/src/librustc/ty/maps/README.md new file mode 100644 index 0000000000..8abc68d431 --- /dev/null +++ b/src/librustc/ty/maps/README.md @@ -0,0 +1,302 @@ +# The Rust Compiler Query System + +The Compiler Query System is the key to our new demand-driven +organization. The idea is pretty simple. You have various queries +that compute things about the input -- for example, there is a query +called `type_of(def_id)` that, given the def-id of some item, will +compute the type of that item and return it to you. + +Query execution is **memoized** -- so the first time you invoke a +query, it will go do the computation, but the next time, the result is +returned from a hashtable. Moreover, query execution fits nicely into +**incremental computation**; the idea is roughly that, when you do a +query, the result **may** be returned to you by loading stored data +from disk (but that's a separate topic we won't discuss further here). + +The overall vision is that, eventually, the entire compiler +control-flow will be query driven. There will effectively be one +top-level query ("compile") that will run compilation on a crate; this +will in turn demand information about that crate, starting from the +*end*. For example: + +- This "compile" query might demand to get a list of codegen-units + (i.e., modules that need to be compiled by LLVM). +- But computing the list of codegen-units would invoke some subquery + that returns the list of all modules defined in the Rust source. +- That query in turn would invoke something asking for the HIR. +- This keeps going further and further back until we wind up doing the + actual parsing. + +However, that vision is not fully realized. Still, big chunks of the +compiler (for example, generating MIR) work exactly like this. + +### Invoking queries + +To invoke a query is simple. The tcx ("type context") offers a method +for each defined query. So, for example, to invoke the `type_of` +query, you would just do this: + +```rust +let ty = tcx.type_of(some_def_id); +``` + +### Cycles between queries + +Currently, cycles during query execution should always result in a +compilation error. Typically, they arise because of illegal programs +that contain cyclic references they shouldn't (though sometimes they +arise because of compiler bugs, in which case we need to factor our +queries in a more fine-grained fashion to avoid them). + +However, it is nonetheless often useful to *recover* from a cycle +(after reporting an error, say) and try to soldier on, so as to give a +better user experience. In order to recover from a cycle, you don't +get to use the nice method-call-style syntax. Instead, you invoke +using the `try_get` method, which looks roughly like this: + +```rust +use ty::maps::queries; +... +match queries::type_of::try_get(tcx, DUMMY_SP, self.did) { + Ok(result) => { + // no cycle occurred! You can use `result` + } + Err(err) => { + // A cycle occurred! The error value `err` is a `DiagnosticBuilder`, + // meaning essentially an "in-progress", not-yet-reported error message. + // See below for more details on what to do here. + } +} +``` + +So, if you get back an `Err` from `try_get`, then a cycle *did* occur. This means that +you must ensure that a compiler error message is reported. You can do that in two ways: + +The simplest is to invoke `err.emit()`. This will emit the cycle error to the user. + +However, often cycles happen because of an illegal program, and you +know at that point that an error either already has been reported or +will be reported due to this cycle by some other bit of code. In that +case, you can invoke `err.cancel()` to not emit any error. It is +traditional to then invoke: + +``` +tcx.sess.delay_span_bug(some_span, "some message") +``` + +`delay_span_bug()` is a helper that says: we expect a compilation +error to have happened or to happen in the future; so, if compilation +ultimately succeeds, make an ICE with the message `"some +message"`. This is basically just a precaution in case you are wrong. + +### How the compiler executes a query + +So you may be wondering what happens when you invoke a query +method. The answer is that, for each query, the compiler maintains a +cache -- if your query has already been executed, then, the answer is +simple: we clone the return value out of the cache and return it +(therefore, you should try to ensure that the return types of queries +are cheaply cloneable; insert a `Rc` if necessary). + +#### Providers + +If, however, the query is *not* in the cache, then the compiler will +try to find a suitable **provider**. A provider is a function that has +been defined and linked into the compiler somewhere that contains the +code to compute the result of the query. + +**Providers are defined per-crate.** The compiler maintains, +internally, a table of providers for every crate, at least +conceptually. Right now, there are really two sets: the providers for +queries about the **local crate** (that is, the one being compiled) +and providers for queries about **external crates** (that is, +dependencies of the local crate). Note that what determines the crate +that a query is targeting is not the *kind* of query, but the *key*. +For example, when you invoke `tcx.type_of(def_id)`, that could be a +local query or an external query, depending on what crate the `def_id` +is referring to (see the `self::keys::Key` trait for more information +on how that works). + +Providers always have the same signature: + +```rust +fn provider<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx, 'tcx>, + key: QUERY_KEY) + -> QUERY_RESULT +{ + ... +} +``` + +Providers take two arguments: the `tcx` and the query key. Note also +that they take the *global* tcx (i.e., they use the `'tcx` lifetime +twice), rather than taking a tcx with some active inference context. +They return the result of the query. + +#### How providers are setup + +When the tcx is created, it is given the providers by its creator using +the `Providers` struct. This struct is generate by the macros here, but it +is basically a big list of function pointers: + +```rust +struct Providers { + type_of: for<'cx, 'tcx> fn(TyCtxt<'cx, 'tcx, 'tcx>, DefId) -> Ty<'tcx>, + ... +} +``` + +At present, we have one copy of the struct for local crates, and one +for external crates, though the plan is that we may eventually have +one per crate. + +These `Provider` structs are ultimately created and populated by +`librustc_driver`, but it does this by distributing the work +throughout the other `rustc_*` crates. This is done by invoking +various `provide` functions. These functions tend to look something +like this: + +```rust +pub fn provide(providers: &mut Providers) { + *providers = Providers { + type_of, + ..*providers + }; +} +``` + +That is, they take an `&mut Providers` and mutate it in place. Usually +we use the formulation above just because it looks nice, but you could +as well do `providers.type_of = type_of`, which would be equivalent. +(Here, `type_of` would be a top-level function, defined as we saw +before.) So, if we wanted to have add a provider for some other query, +let's call it `fubar`, into the crate above, we might modify the `provide()` +function like so: + +```rust +pub fn provide(providers: &mut Providers) { + *providers = Providers { + type_of, + fubar, + ..*providers + }; +} + +fn fubar<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx>, key: DefId) -> Fubar<'tcx> { .. } +``` + +NB. Most of the `rustc_*` crate only provide **local +providers**. Almost all **extern providers** wind up going through the +`rustc_metadata` crate, which loads the information from the crate +metadata. But in some cases there are crates that provide queries for +*both* local and external crates, in which case they define both a +`provide` and a `provide_extern` function that `rustc_driver` can +invoke. + +### Adding a new kind of query + +So suppose you want to add a new kind of query, how do you do so? +Well, defining a query takes place in two steps: + +1. first, you have to specify the query name and arguments; and then, +2. you have to supply query providers where needed. + +The specify the query name and arguments, you simply add an entry +to the big macro invocation in `mod.rs`. This will probably have changed +by the time you read this README, but at present it looks something +like: + +``` +define_maps! { <'tcx> + /// Records the type of every item. + [] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, + + ... +} +``` + +Each line of the macro defines one query. The name is broken up like this: + +``` +[] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, +^^ ^^^^^^^ ^^^^^^^^^^ ^^^^^ ^^^^^^^^ +| | | | | +| | | | result type of query +| | | query key type +| | dep-node constructor +| name of query +query flags +``` + +Let's go over them one by one: + +- **Query flags:** these are largely unused right now, but the intention + is that we'll be able to customize various aspects of how the query is + processed. +- **Name of query:** the name of the query method + (`tcx.type_of(..)`). Also used as the name of a struct + (`ty::maps::queries::type_of`) that will be generated to represent + this query. +- **Dep-node constructor:** indicates the constructor function that + connects this query to incremental compilation. Typically, this is a + `DepNode` variant, which can be added by modifying the + `define_dep_nodes!` macro invocation in + `librustc/dep_graph/dep_node.rs`. + - However, sometimes we use a custom function, in which case the + name will be in snake case and the function will be defined at the + bottom of the file. This is typically used when the query key is + not a def-id, or just not the type that the dep-node expects. +- **Query key type:** the type of the argument to this query. + This type must implement the `ty::maps::keys::Key` trait, which + defines (for example) how to map it to a crate, and so forth. +- **Result type of query:** the type produced by this query. This type + should (a) not use `RefCell` or other interior mutability and (b) be + cheaply cloneable. Interning or using `Rc` or `Arc` is recommended for + non-trivial data types. + - The one exception to those rules is the `ty::steal::Steal` type, + which is used to cheaply modify MIR in place. See the definition + of `Steal` for more details. New uses of `Steal` should **not** be + added without alerting `@rust-lang/compiler`. + +So, to add a query: + +- Add an entry to `define_maps!` using the format above. +- Possibly add a corresponding entry to the dep-node macro. +- Link the provider by modifying the appropriate `provide` method; + or add a new one if needed and ensure that `rustc_driver` is invoking it. + +#### Query structs and descriptions + +For each kind, the `define_maps` macro will generate a "query struct" +named after the query. This struct is a kind of a place-holder +describing the query. Each such struct implements the +`self::config::QueryConfig` trait, which has associated types for the +key/value of that particular query. Basically the code generated looks something +like this: + +```rust +// Dummy struct representing a particular kind of query: +pub struct type_of<'tcx> { phantom: PhantomData<&'tcx ()> } + +impl<'tcx> QueryConfig for type_of<'tcx> { + type Key = DefId; + type Value = Ty<'tcx>; +} +``` + +There is an additional trait that you may wish to implement called +`self::config::QueryDescription`. This trait is used during cycle +errors to give a "human readable" name for the query, so that we can +summarize what was happening when the cycle occurred. Implementing +this trait is optional if the query key is `DefId`, but if you *don't* +implement it, you get a pretty generic error ("processing `foo`..."). +You can put new impls into the `config` module. They look something like this: + +```rust +impl<'tcx> QueryDescription for queries::type_of<'tcx> { + fn describe(tcx: TyCtxt, key: DefId) -> String { + format!("computing the type of `{}`", tcx.item_path_str(key)) + } +} +``` + diff --git a/src/librustc/ty/maps/config.rs b/src/librustc/ty/maps/config.rs new file mode 100644 index 0000000000..c8520c5be2 --- /dev/null +++ b/src/librustc/ty/maps/config.rs @@ -0,0 +1,504 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::def_id::{CrateNum, DefId, DefIndex}; +use ty::{self, Ty, TyCtxt}; +use ty::maps::queries; +use ty::subst::Substs; + +use std::hash::Hash; +use syntax_pos::symbol::InternedString; + +/// Query configuration and description traits. + +pub trait QueryConfig { + type Key: Eq + Hash + Clone; + type Value; +} + +pub(super) trait QueryDescription: QueryConfig { + fn describe(tcx: TyCtxt, key: Self::Key) -> String; +} + +impl> QueryDescription for M { + default fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("processing `{}`", tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription for queries::is_copy_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing whether `{}` is `Copy`", env.value) + } +} + +impl<'tcx> QueryDescription for queries::is_sized_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing whether `{}` is `Sized`", env.value) + } +} + +impl<'tcx> QueryDescription for queries::is_freeze_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing whether `{}` is freeze", env.value) + } +} + +impl<'tcx> QueryDescription for queries::needs_drop_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing whether `{}` needs drop", env.value) + } +} + +impl<'tcx> QueryDescription for queries::layout_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing layout of `{}`", env.value) + } +} + +impl<'tcx> QueryDescription for queries::super_predicates_of<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("computing the supertraits of `{}`", + tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription for queries::type_param_predicates<'tcx> { + fn describe(tcx: TyCtxt, (_, def_id): (DefId, DefId)) -> String { + let id = tcx.hir.as_local_node_id(def_id).unwrap(); + format!("computing the bounds for type parameter `{}`", + tcx.hir.ty_param_name(id)) + } +} + +impl<'tcx> QueryDescription for queries::coherent_trait<'tcx> { + fn describe(tcx: TyCtxt, (_, def_id): (CrateNum, DefId)) -> String { + format!("coherence checking all impls of trait `{}`", + tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription for queries::crate_inherent_impls<'tcx> { + fn describe(_: TyCtxt, k: CrateNum) -> String { + format!("all inherent impls defined in crate `{:?}`", k) + } +} + +impl<'tcx> QueryDescription for queries::crate_inherent_impls_overlap_check<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + format!("check for overlap between inherent impls defined in this crate") + } +} + +impl<'tcx> QueryDescription for queries::crate_variances<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("computing the variances for items in this crate") + } +} + +impl<'tcx> QueryDescription for queries::mir_shims<'tcx> { + fn describe(tcx: TyCtxt, def: ty::InstanceDef<'tcx>) -> String { + format!("generating MIR shim for `{}`", + tcx.item_path_str(def.def_id())) + } +} + +impl<'tcx> QueryDescription for queries::privacy_access_levels<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + format!("privacy access levels") + } +} + +impl<'tcx> QueryDescription for queries::typeck_item_bodies<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + format!("type-checking all item bodies") + } +} + +impl<'tcx> QueryDescription for queries::reachable_set<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + format!("reachability") + } +} + +impl<'tcx> QueryDescription for queries::const_eval<'tcx> { + fn describe(tcx: TyCtxt, key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) -> String { + format!("const-evaluating `{}`", tcx.item_path_str(key.value.0)) + } +} + +impl<'tcx> QueryDescription for queries::mir_keys<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + format!("getting a list of all mir_keys") + } +} + +impl<'tcx> QueryDescription for queries::symbol_name<'tcx> { + fn describe(_tcx: TyCtxt, instance: ty::Instance<'tcx>) -> String { + format!("computing the symbol for `{}`", instance) + } +} + +impl<'tcx> QueryDescription for queries::describe_def<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("describe_def") + } +} + +impl<'tcx> QueryDescription for queries::def_span<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("def_span") + } +} + + +impl<'tcx> QueryDescription for queries::lookup_stability<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("stability") + } +} + +impl<'tcx> QueryDescription for queries::lookup_deprecation_entry<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("deprecation") + } +} + +impl<'tcx> QueryDescription for queries::item_attrs<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("item_attrs") + } +} + +impl<'tcx> QueryDescription for queries::is_exported_symbol<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("is_exported_symbol") + } +} + +impl<'tcx> QueryDescription for queries::fn_arg_names<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("fn_arg_names") + } +} + +impl<'tcx> QueryDescription for queries::impl_parent<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("impl_parent") + } +} + +impl<'tcx> QueryDescription for queries::trait_of_item<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("trait_of_item") + } +} + +impl<'tcx> QueryDescription for queries::item_body_nested_bodies<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("nested item bodies of `{}`", tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription for queries::const_is_rvalue_promotable_to_static<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("const checking if rvalue is promotable to static `{}`", + tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription for queries::is_mir_available<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("checking if item is mir available: `{}`", + tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription for queries::trait_impls_of<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("trait impls of `{}`", tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription for queries::is_object_safe<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("determine object safety of trait `{}`", tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription for queries::is_const_fn<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("checking if item is const fn: `{}`", tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription for queries::dylib_dependency_formats<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "dylib dependency formats of crate".to_string() + } +} + +impl<'tcx> QueryDescription for queries::is_panic_runtime<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "checking if the crate is_panic_runtime".to_string() + } +} + +impl<'tcx> QueryDescription for queries::is_compiler_builtins<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "checking if the crate is_compiler_builtins".to_string() + } +} + +impl<'tcx> QueryDescription for queries::has_global_allocator<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "checking if the crate has_global_allocator".to_string() + } +} + +impl<'tcx> QueryDescription for queries::extern_crate<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + "getting crate's ExternCrateData".to_string() + } +} + +impl<'tcx> QueryDescription for queries::lint_levels<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("computing the lint levels for items in this crate") + } +} + +impl<'tcx> QueryDescription for queries::specializes<'tcx> { + fn describe(_tcx: TyCtxt, _: (DefId, DefId)) -> String { + format!("computing whether impls specialize one another") + } +} + +impl<'tcx> QueryDescription for queries::in_scope_traits_map<'tcx> { + fn describe(_tcx: TyCtxt, _: DefIndex) -> String { + format!("traits in scope at a block") + } +} + +impl<'tcx> QueryDescription for queries::is_no_builtins<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("test whether a crate has #![no_builtins]") + } +} + +impl<'tcx> QueryDescription for queries::panic_strategy<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("query a crate's configured panic strategy") + } +} + +impl<'tcx> QueryDescription for queries::is_profiler_runtime<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("query a crate is #![profiler_runtime]") + } +} + +impl<'tcx> QueryDescription for queries::is_sanitizer_runtime<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("query a crate is #![sanitizer_runtime]") + } +} + +impl<'tcx> QueryDescription for queries::exported_symbol_ids<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up the exported symbols of a crate") + } +} + +impl<'tcx> QueryDescription for queries::native_libraries<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up the native libraries of a linked crate") + } +} + +impl<'tcx> QueryDescription for queries::plugin_registrar_fn<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up the plugin registrar for a crate") + } +} + +impl<'tcx> QueryDescription for queries::derive_registrar_fn<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up the derive registrar for a crate") + } +} + +impl<'tcx> QueryDescription for queries::crate_disambiguator<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up the disambiguator a crate") + } +} + +impl<'tcx> QueryDescription for queries::crate_hash<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up the hash a crate") + } +} + +impl<'tcx> QueryDescription for queries::original_crate_name<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up the original name a crate") + } +} + +impl<'tcx> QueryDescription for queries::implementations_of_trait<'tcx> { + fn describe(_tcx: TyCtxt, _: (CrateNum, DefId)) -> String { + format!("looking up implementations of a trait in a crate") + } +} + +impl<'tcx> QueryDescription for queries::all_trait_implementations<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up all (?) trait implementations") + } +} + +impl<'tcx> QueryDescription for queries::link_args<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up link arguments for a crate") + } +} + +impl<'tcx> QueryDescription for queries::named_region_map<'tcx> { + fn describe(_tcx: TyCtxt, _: DefIndex) -> String { + format!("looking up a named region") + } +} + +impl<'tcx> QueryDescription for queries::is_late_bound_map<'tcx> { + fn describe(_tcx: TyCtxt, _: DefIndex) -> String { + format!("testing if a region is late boudn") + } +} + +impl<'tcx> QueryDescription for queries::object_lifetime_defaults_map<'tcx> { + fn describe(_tcx: TyCtxt, _: DefIndex) -> String { + format!("looking up lifetime defaults for a region") + } +} + +impl<'tcx> QueryDescription for queries::dep_kind<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("fetching what a dependency looks like") + } +} + +impl<'tcx> QueryDescription for queries::crate_name<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("fetching what a crate is named") + } +} + +impl<'tcx> QueryDescription for queries::get_lang_items<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("calculating the lang items map") + } +} + +impl<'tcx> QueryDescription for queries::defined_lang_items<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("calculating the lang items defined in a crate") + } +} + +impl<'tcx> QueryDescription for queries::missing_lang_items<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("calculating the missing lang items in a crate") + } +} + +impl<'tcx> QueryDescription for queries::visible_parent_map<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("calculating the visible parent map") + } +} + +impl<'tcx> QueryDescription for queries::missing_extern_crate_item<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("seeing if we're missing an `extern crate` item for this crate") + } +} + +impl<'tcx> QueryDescription for queries::used_crate_source<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking at the source for a crate") + } +} + +impl<'tcx> QueryDescription for queries::postorder_cnums<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("generating a postorder list of CrateNums") + } +} + +impl<'tcx> QueryDescription for queries::maybe_unused_extern_crates<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("looking up all possibly unused extern crates") + } +} + +impl<'tcx> QueryDescription for queries::stability_index<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("calculating the stability index for the local crate") + } +} + +impl<'tcx> QueryDescription for queries::all_crate_nums<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("fetching all foreign CrateNum instances") + } +} + +impl<'tcx> QueryDescription for queries::exported_symbols<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("exported_symbols") + } +} + +impl<'tcx> QueryDescription for queries::collect_and_partition_translation_items<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("collect_and_partition_translation_items") + } +} + +impl<'tcx> QueryDescription for queries::codegen_unit<'tcx> { + fn describe(_tcx: TyCtxt, _: InternedString) -> String { + format!("codegen_unit") + } +} + +impl<'tcx> QueryDescription for queries::compile_codegen_unit<'tcx> { + fn describe(_tcx: TyCtxt, _: InternedString) -> String { + format!("compile_codegen_unit") + } +} + +impl<'tcx> QueryDescription for queries::output_filenames<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("output_filenames") + } +} + +impl<'tcx> QueryDescription for queries::has_clone_closures<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("seeing if the crate has enabled `Clone` closures") + } +} + +impl<'tcx> QueryDescription for queries::has_copy_closures<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("seeing if the crate has enabled `Copy` closures") + } +} diff --git a/src/librustc/ty/maps/keys.rs b/src/librustc/ty/maps/keys.rs new file mode 100644 index 0000000000..e37cf66979 --- /dev/null +++ b/src/librustc/ty/maps/keys.rs @@ -0,0 +1,162 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Defines the set of legal keys that can be used in queries. + +use hir::def_id::{CrateNum, DefId, LOCAL_CRATE, DefIndex}; +use mir::transform::{MirSuite, MirPassIndex}; +use ty::{self, Ty, TyCtxt}; +use ty::subst::Substs; +use ty::fast_reject::SimplifiedType; + +use std::fmt::Debug; +use std::hash::Hash; +use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::symbol::InternedString; + +/// The `Key` trait controls what types can legally be used as the key +/// for a query. +pub trait Key: Clone + Hash + Eq + Debug { + /// Given an instance of this key, what crate is it referring to? + /// This is used to find the provider. + fn map_crate(&self) -> CrateNum; + + /// In the event that a cycle occurs, if no explicit span has been + /// given for a query with key `self`, what span should we use? + fn default_span(&self, tcx: TyCtxt) -> Span; +} + +impl<'tcx> Key for ty::InstanceDef<'tcx> { + fn map_crate(&self) -> CrateNum { + LOCAL_CRATE + } + + fn default_span(&self, tcx: TyCtxt) -> Span { + tcx.def_span(self.def_id()) + } +} + +impl<'tcx> Key for ty::Instance<'tcx> { + fn map_crate(&self) -> CrateNum { + LOCAL_CRATE + } + + fn default_span(&self, tcx: TyCtxt) -> Span { + tcx.def_span(self.def_id()) + } +} + +impl Key for CrateNum { + fn map_crate(&self) -> CrateNum { + *self + } + fn default_span(&self, _: TyCtxt) -> Span { + DUMMY_SP + } +} + +impl Key for DefIndex { + fn map_crate(&self) -> CrateNum { + LOCAL_CRATE + } + fn default_span(&self, _tcx: TyCtxt) -> Span { + DUMMY_SP + } +} + +impl Key for DefId { + fn map_crate(&self) -> CrateNum { + self.krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + tcx.def_span(*self) + } +} + +impl Key for (DefId, DefId) { + fn map_crate(&self) -> CrateNum { + self.0.krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.1.default_span(tcx) + } +} + +impl Key for (CrateNum, DefId) { + fn map_crate(&self) -> CrateNum { + self.0 + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.1.default_span(tcx) + } +} + +impl Key for (DefId, SimplifiedType) { + fn map_crate(&self) -> CrateNum { + self.0.krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.0.default_span(tcx) + } +} + +impl<'tcx> Key for (DefId, &'tcx Substs<'tcx>) { + fn map_crate(&self) -> CrateNum { + self.0.krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.0.default_span(tcx) + } +} + +impl Key for (MirSuite, DefId) { + fn map_crate(&self) -> CrateNum { + self.1.map_crate() + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.1.default_span(tcx) + } +} + +impl Key for (MirSuite, MirPassIndex, DefId) { + fn map_crate(&self) -> CrateNum { + self.2.map_crate() + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.2.default_span(tcx) + } +} + +impl<'tcx> Key for Ty<'tcx> { + fn map_crate(&self) -> CrateNum { + LOCAL_CRATE + } + fn default_span(&self, _: TyCtxt) -> Span { + DUMMY_SP + } +} + +impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> { + fn map_crate(&self) -> CrateNum { + self.value.map_crate() + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.value.default_span(tcx) + } +} + +impl Key for InternedString { + fn map_crate(&self) -> CrateNum { + LOCAL_CRATE + } + fn default_span(&self, _tcx: TyCtxt) -> Span { + DUMMY_SP + } +} diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs new file mode 100644 index 0000000000..156f8c3dbc --- /dev/null +++ b/src/librustc/ty/maps/mod.rs @@ -0,0 +1,464 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use dep_graph::{DepConstructor, DepNode}; +use errors::DiagnosticBuilder; +use hir::def_id::{CrateNum, DefId, DefIndex}; +use hir::def::{Def, Export}; +use hir::{self, TraitCandidate, ItemLocalId}; +use hir::svh::Svh; +use lint; +use middle::const_val; +use middle::cstore::{ExternCrate, LinkagePreference, NativeLibrary, + ExternBodyNestedBodies}; +use middle::cstore::{NativeLibraryKind, DepKind, CrateSource, ExternConstBody}; +use middle::privacy::AccessLevels; +use middle::reachable::ReachableSet; +use middle::region; +use middle::resolve_lifetime::{Region, ObjectLifetimeDefault}; +use middle::stability::{self, DeprecationEntry}; +use middle::lang_items::{LanguageItems, LangItem}; +use middle::exported_symbols::SymbolExportLevel; +use middle::trans::{CodegenUnit, Stats}; +use mir; +use session::CompileResult; +use session::config::OutputFilenames; +use traits::specialization_graph; +use ty::{self, CrateInherentImpls, Ty, TyCtxt}; +use ty::layout::{Layout, LayoutError}; +use ty::steal::Steal; +use ty::subst::Substs; +use util::nodemap::{DefIdSet, DefIdMap}; +use util::common::{profq_msg, ProfileQueriesMsg}; + +use rustc_data_structures::indexed_set::IdxSetBuf; +use rustc_back::PanicStrategy; +use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::stable_hasher::StableVec; + +use std::ops::Deref; +use std::rc::Rc; +use std::sync::Arc; +use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::symbol::InternedString; +use syntax::attr; +use syntax::ast; +use syntax::symbol::Symbol; + +#[macro_use] +mod plumbing; +use self::plumbing::*; +pub use self::plumbing::force_from_dep_node; + +mod keys; +pub use self::keys::Key; + +mod values; +use self::values::Value; + +mod config; +pub use self::config::QueryConfig; +use self::config::QueryDescription; + +// Each of these maps also corresponds to a method on a +// `Provider` trait for requesting a value of that type, +// and a method on `Maps` itself for doing that in a +// a way that memoizes and does dep-graph tracking, +// wrapping around the actual chain of providers that +// the driver creates (using several `rustc_*` crates). +define_maps! { <'tcx> + /// Records the type of every item. + [] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, + + /// Maps from the def-id of an item (trait/struct/enum/fn) to its + /// associated generics and predicates. + [] fn generics_of: GenericsOfItem(DefId) -> &'tcx ty::Generics, + [] fn predicates_of: PredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, + + /// Maps from the def-id of a trait to the list of + /// super-predicates. This is a subset of the full list of + /// predicates. We store these in a separate map because we must + /// evaluate them even during type conversion, often before the + /// full predicates are available (note that supertraits have + /// additional acyclicity requirements). + [] fn super_predicates_of: SuperPredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, + + /// To avoid cycles within the predicates of a single item we compute + /// per-type-parameter predicates for resolving `T::AssocTy`. + [] fn type_param_predicates: type_param_predicates((DefId, DefId)) + -> ty::GenericPredicates<'tcx>, + + [] fn trait_def: TraitDefOfItem(DefId) -> &'tcx ty::TraitDef, + [] fn adt_def: AdtDefOfItem(DefId) -> &'tcx ty::AdtDef, + [] fn adt_destructor: AdtDestructor(DefId) -> Option, + [] fn adt_sized_constraint: SizedConstraint(DefId) -> &'tcx [Ty<'tcx>], + [] fn adt_dtorck_constraint: DtorckConstraint(DefId) -> ty::DtorckConstraint<'tcx>, + + /// True if this is a const fn + [] fn is_const_fn: IsConstFn(DefId) -> bool, + + /// True if this is a foreign item (i.e., linked via `extern { ... }`). + [] fn is_foreign_item: IsForeignItem(DefId) -> bool, + + /// True if this is a default impl (aka impl Foo for ..) + [] fn is_default_impl: IsDefaultImpl(DefId) -> bool, + + /// Get a map with the variance of every item; use `item_variance` + /// instead. + [] fn crate_variances: crate_variances(CrateNum) -> Rc, + + /// Maps from def-id of a type or region parameter to its + /// (inferred) variance. + [] fn variances_of: ItemVariances(DefId) -> Rc>, + + /// Maps from an impl/trait def-id to a list of the def-ids of its items + [] fn associated_item_def_ids: AssociatedItemDefIds(DefId) -> Rc>, + + /// Maps from a trait item to the trait item "descriptor" + [] fn associated_item: AssociatedItems(DefId) -> ty::AssociatedItem, + + [] fn impl_trait_ref: ImplTraitRef(DefId) -> Option>, + [] fn impl_polarity: ImplPolarity(DefId) -> hir::ImplPolarity, + + /// Maps a DefId of a type to a list of its inherent impls. + /// Contains implementations of methods that are inherent to a type. + /// Methods in these implementations don't need to be exported. + [] fn inherent_impls: InherentImpls(DefId) -> Rc>, + + /// Set of all the def-ids in this crate that have MIR associated with + /// them. This includes all the body owners, but also things like struct + /// constructors. + [] fn mir_keys: mir_keys(CrateNum) -> Rc, + + /// Maps DefId's that have an associated Mir to the result + /// of the MIR qualify_consts pass. The actual meaning of + /// the value isn't known except to the pass itself. + [] fn mir_const_qualif: MirConstQualif(DefId) -> (u8, Rc>), + + /// Fetch the MIR for a given def-id right after it's built - this includes + /// unreachable code. + [] fn mir_built: MirBuilt(DefId) -> &'tcx Steal>, + + /// Fetch the MIR for a given def-id up till the point where it is + /// ready for const evaluation. + /// + /// See the README for the `mir` module for details. + [] fn mir_const: MirConst(DefId) -> &'tcx Steal>, + + [] fn mir_validated: MirValidated(DefId) -> &'tcx Steal>, + + /// MIR after our optimization passes have run. This is MIR that is ready + /// for trans. This is also the only query that can fetch non-local MIR, at present. + [] fn optimized_mir: MirOptimized(DefId) -> &'tcx mir::Mir<'tcx>, + + /// Type of each closure. The def ID is the ID of the + /// expression defining the closure. + [] fn closure_kind: ClosureKind(DefId) -> ty::ClosureKind, + + /// The result of unsafety-checking this def-id. + [] fn unsafety_check_result: UnsafetyCheckResult(DefId) -> mir::UnsafetyCheckResult, + + /// The signature of functions and closures. + [] fn fn_sig: FnSignature(DefId) -> ty::PolyFnSig<'tcx>, + + /// Records the signature of each generator. The def ID is the ID of the + /// expression defining the closure. + [] fn generator_sig: GenSignature(DefId) -> Option>, + + /// Caches CoerceUnsized kinds for impls on custom types. + [] fn coerce_unsized_info: CoerceUnsizedInfo(DefId) + -> ty::adjustment::CoerceUnsizedInfo, + + [] fn typeck_item_bodies: typeck_item_bodies_dep_node(CrateNum) -> CompileResult, + + [] fn typeck_tables_of: TypeckTables(DefId) -> &'tcx ty::TypeckTables<'tcx>, + + [] fn has_typeck_tables: HasTypeckTables(DefId) -> bool, + + [] fn coherent_trait: coherent_trait_dep_node((CrateNum, DefId)) -> (), + + [] fn borrowck: BorrowCheck(DefId) -> (), + // FIXME: shouldn't this return a `Result<(), BorrowckErrors>` instead? + [] fn mir_borrowck: MirBorrowCheck(DefId) -> (), + + /// Gets a complete map from all types to their inherent impls. + /// Not meant to be used directly outside of coherence. + /// (Defined only for LOCAL_CRATE) + [] fn crate_inherent_impls: crate_inherent_impls_dep_node(CrateNum) -> CrateInherentImpls, + + /// Checks all types in the krate for overlap in their inherent impls. Reports errors. + /// Not meant to be used directly outside of coherence. + /// (Defined only for LOCAL_CRATE) + [] fn crate_inherent_impls_overlap_check: inherent_impls_overlap_check_dep_node(CrateNum) -> (), + + /// Results of evaluating const items or constants embedded in + /// other items (such as enum variant explicit discriminants). + [] fn const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) + -> const_val::EvalResult<'tcx>, + + /// Performs the privacy check and computes "access levels". + [] fn privacy_access_levels: PrivacyAccessLevels(CrateNum) -> Rc, + + [] fn reachable_set: reachability_dep_node(CrateNum) -> ReachableSet, + + /// Per-body `region::ScopeTree`. The `DefId` should be the owner-def-id for the body; + /// in the case of closures, this will be redirected to the enclosing function. + [] fn region_scope_tree: RegionScopeTree(DefId) -> Rc, + + [] fn mir_shims: mir_shim_dep_node(ty::InstanceDef<'tcx>) -> &'tcx mir::Mir<'tcx>, + + [] fn def_symbol_name: SymbolName(DefId) -> ty::SymbolName, + [] fn symbol_name: symbol_name_dep_node(ty::Instance<'tcx>) -> ty::SymbolName, + + [] fn describe_def: DescribeDef(DefId) -> Option, + [] fn def_span: DefSpan(DefId) -> Span, + [] fn lookup_stability: LookupStability(DefId) -> Option<&'tcx attr::Stability>, + [] fn lookup_deprecation_entry: LookupDeprecationEntry(DefId) -> Option, + [] fn item_attrs: ItemAttrs(DefId) -> Rc<[ast::Attribute]>, + [] fn fn_arg_names: FnArgNames(DefId) -> Vec, + [] fn impl_parent: ImplParent(DefId) -> Option, + [] fn trait_of_item: TraitOfItem(DefId) -> Option, + [] fn is_exported_symbol: IsExportedSymbol(DefId) -> bool, + [] fn item_body_nested_bodies: ItemBodyNestedBodies(DefId) -> ExternBodyNestedBodies, + [] fn const_is_rvalue_promotable_to_static: ConstIsRvaluePromotableToStatic(DefId) -> bool, + [] fn is_mir_available: IsMirAvailable(DefId) -> bool, + + [] fn trait_impls_of: TraitImpls(DefId) -> Rc, + [] fn specialization_graph_of: SpecializationGraph(DefId) -> Rc, + [] fn is_object_safe: ObjectSafety(DefId) -> bool, + + // Get the ParameterEnvironment for a given item; this environment + // will be in "user-facing" mode, meaning that it is suitabe for + // type-checking etc, and it does not normalize specializable + // associated types. This is almost always what you want, + // unless you are doing MIR optimizations, in which case you + // might want to use `reveal_all()` method to change modes. + [] fn param_env: ParamEnv(DefId) -> ty::ParamEnv<'tcx>, + + // Trait selection queries. These are best used by invoking `ty.moves_by_default()`, + // `ty.is_copy()`, etc, since that will prune the environment where possible. + [] fn is_copy_raw: is_copy_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, + [] fn is_sized_raw: is_sized_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, + [] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, + [] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, + [] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) + -> Result<&'tcx Layout, LayoutError<'tcx>>, + + [] fn dylib_dependency_formats: DylibDepFormats(CrateNum) + -> Rc>, + + [] fn is_panic_runtime: IsPanicRuntime(CrateNum) -> bool, + [] fn is_compiler_builtins: IsCompilerBuiltins(CrateNum) -> bool, + [] fn has_global_allocator: HasGlobalAllocator(CrateNum) -> bool, + [] fn is_sanitizer_runtime: IsSanitizerRuntime(CrateNum) -> bool, + [] fn is_profiler_runtime: IsProfilerRuntime(CrateNum) -> bool, + [] fn panic_strategy: GetPanicStrategy(CrateNum) -> PanicStrategy, + [] fn is_no_builtins: IsNoBuiltins(CrateNum) -> bool, + + [] fn extern_crate: ExternCrate(DefId) -> Rc>, + + [] fn specializes: specializes_node((DefId, DefId)) -> bool, + [] fn in_scope_traits_map: InScopeTraits(DefIndex) + -> Option>>>>, + [] fn module_exports: ModuleExports(DefId) -> Option>>, + [] fn lint_levels: lint_levels_node(CrateNum) -> Rc, + + [] fn impl_defaultness: ImplDefaultness(DefId) -> hir::Defaultness, + [] fn exported_symbol_ids: ExportedSymbolIds(CrateNum) -> Rc, + [] fn native_libraries: NativeLibraries(CrateNum) -> Rc>, + [] fn plugin_registrar_fn: PluginRegistrarFn(CrateNum) -> Option, + [] fn derive_registrar_fn: DeriveRegistrarFn(CrateNum) -> Option, + [] fn crate_disambiguator: CrateDisambiguator(CrateNum) -> Symbol, + [] fn crate_hash: CrateHash(CrateNum) -> Svh, + [] fn original_crate_name: OriginalCrateName(CrateNum) -> Symbol, + + [] fn implementations_of_trait: implementations_of_trait_node((CrateNum, DefId)) + -> Rc>, + [] fn all_trait_implementations: AllTraitImplementations(CrateNum) + -> Rc>, + + [] fn is_dllimport_foreign_item: IsDllimportForeignItem(DefId) -> bool, + [] fn is_statically_included_foreign_item: IsStaticallyIncludedForeignItem(DefId) -> bool, + [] fn native_library_kind: NativeLibraryKind(DefId) + -> Option, + [] fn link_args: link_args_node(CrateNum) -> Rc>, + + [] fn named_region_map: NamedRegion(DefIndex) -> + Option>>, + [] fn is_late_bound_map: IsLateBound(DefIndex) -> + Option>>, + [] fn object_lifetime_defaults_map: ObjectLifetimeDefaults(DefIndex) + -> Option>>>>, + + [] fn visibility: Visibility(DefId) -> ty::Visibility, + [] fn dep_kind: DepKind(CrateNum) -> DepKind, + [] fn crate_name: CrateName(CrateNum) -> Symbol, + [] fn item_children: ItemChildren(DefId) -> Rc>, + [] fn extern_mod_stmt_cnum: ExternModStmtCnum(DefId) -> Option, + + [] fn get_lang_items: get_lang_items_node(CrateNum) -> Rc, + [] fn defined_lang_items: DefinedLangItems(CrateNum) -> Rc>, + [] fn missing_lang_items: MissingLangItems(CrateNum) -> Rc>, + [] fn extern_const_body: ExternConstBody(DefId) -> ExternConstBody<'tcx>, + [] fn visible_parent_map: visible_parent_map_node(CrateNum) + -> Rc>, + [] fn missing_extern_crate_item: MissingExternCrateItem(CrateNum) -> bool, + [] fn used_crate_source: UsedCrateSource(CrateNum) -> Rc, + [] fn postorder_cnums: postorder_cnums_node(CrateNum) -> Rc>, + + [] fn freevars: Freevars(DefId) -> Option>>, + [] fn maybe_unused_trait_import: MaybeUnusedTraitImport(DefId) -> bool, + [] fn maybe_unused_extern_crates: maybe_unused_extern_crates_node(CrateNum) + -> Rc>, + + [] fn stability_index: stability_index_node(CrateNum) -> Rc>, + [] fn all_crate_nums: all_crate_nums_node(CrateNum) -> Rc>, + + [] fn exported_symbols: ExportedSymbols(CrateNum) + -> Arc, SymbolExportLevel)>>, + [] fn collect_and_partition_translation_items: + collect_and_partition_translation_items_node(CrateNum) + -> (Arc, Arc>>>), + [] fn export_name: ExportName(DefId) -> Option, + [] fn contains_extern_indicator: ContainsExternIndicator(DefId) -> bool, + [] fn is_translated_function: IsTranslatedFunction(DefId) -> bool, + [] fn codegen_unit: CodegenUnit(InternedString) -> Arc>, + [] fn compile_codegen_unit: CompileCodegenUnit(InternedString) -> Stats, + [] fn output_filenames: output_filenames_node(CrateNum) + -> Arc, + + [] fn has_copy_closures: HasCopyClosures(CrateNum) -> bool, + [] fn has_clone_closures: HasCloneClosures(CrateNum) -> bool, +} + +////////////////////////////////////////////////////////////////////// +// These functions are little shims used to find the dep-node for a +// given query when there is not a *direct* mapping: + +fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> { + DepConstructor::TypeParamPredicates { + item_id, + param_id + } +} + +fn coherent_trait_dep_node<'tcx>((_, def_id): (CrateNum, DefId)) -> DepConstructor<'tcx> { + DepConstructor::CoherenceCheckTrait(def_id) +} + +fn crate_inherent_impls_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::Coherence +} + +fn inherent_impls_overlap_check_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::CoherenceInherentImplOverlapCheck +} + +fn reachability_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::Reachability +} + +fn mir_shim_dep_node<'tcx>(instance_def: ty::InstanceDef<'tcx>) -> DepConstructor<'tcx> { + DepConstructor::MirShim { + instance_def + } +} + +fn symbol_name_dep_node<'tcx>(instance: ty::Instance<'tcx>) -> DepConstructor<'tcx> { + DepConstructor::InstanceSymbolName { instance } +} + +fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::TypeckBodiesKrate +} + +fn const_eval_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) + -> DepConstructor<'tcx> { + DepConstructor::ConstEval { param_env } +} + +fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::MirKeys +} + +fn crate_variances<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::CrateVariances +} + +fn is_copy_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsCopy { param_env } +} + +fn is_sized_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsSized { param_env } +} + +fn is_freeze_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsFreeze { param_env } +} + +fn needs_drop_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::NeedsDrop { param_env } +} + +fn layout_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::Layout { param_env } +} + +fn lint_levels_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::LintLevels +} + +fn specializes_node<'tcx>((a, b): (DefId, DefId)) -> DepConstructor<'tcx> { + DepConstructor::Specializes { impl1: a, impl2: b } +} + +fn implementations_of_trait_node<'tcx>((krate, trait_id): (CrateNum, DefId)) + -> DepConstructor<'tcx> +{ + DepConstructor::ImplementationsOfTrait { krate, trait_id } +} + +fn link_args_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::LinkArgs +} + +fn get_lang_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::GetLangItems +} + +fn visible_parent_map_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::VisibleParentMap +} + +fn postorder_cnums_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::PostorderCnums +} + +fn maybe_unused_extern_crates_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::MaybeUnusedExternCrates +} + +fn stability_index_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::StabilityIndex +} + +fn all_crate_nums_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::AllCrateNums +} + +fn collect_and_partition_translation_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::CollectAndPartitionTranslationItems +} + +fn output_filenames_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::OutputFilenames +} diff --git a/src/librustc/ty/maps/plumbing.rs b/src/librustc/ty/maps/plumbing.rs new file mode 100644 index 0000000000..a0cfa32213 --- /dev/null +++ b/src/librustc/ty/maps/plumbing.rs @@ -0,0 +1,865 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The implementation of the query system itself. Defines the macros +//! that generate the actual methods on tcx which find and execute the +//! provider, manage the caches, and so forth. + +use dep_graph::{DepNodeIndex, DepNode, DepKind}; +use errors::{Diagnostic, DiagnosticBuilder}; +use ty::{TyCtxt}; +use ty::maps::Query; // NB: actually generated by the macros in this file +use ty::maps::config::QueryDescription; +use ty::item_path; + +use rustc_data_structures::fx::{FxHashMap}; +use std::cell::{RefMut, Cell}; +use std::marker::PhantomData; +use std::mem; +use syntax_pos::Span; + +pub(super) struct QueryMap { + phantom: PhantomData, + pub(super) map: FxHashMap>, +} + +pub(super) struct QueryValue { + pub(super) value: T, + pub(super) index: DepNodeIndex, + pub(super) diagnostics: Option>, +} + +impl QueryValue { + pub(super) fn new(value: T, + dep_node_index: DepNodeIndex, + diagnostics: Vec) + -> QueryValue { + QueryValue { + value, + index: dep_node_index, + diagnostics: if diagnostics.len() == 0 { + None + } else { + Some(Box::new(QueryDiagnostics { + diagnostics, + emitted_diagnostics: Cell::new(true), + })) + }, + } + } +} + +pub(super) struct QueryDiagnostics { + pub(super) diagnostics: Vec, + pub(super) emitted_diagnostics: Cell, +} + +impl QueryMap { + pub(super) fn new() -> QueryMap { + QueryMap { + phantom: PhantomData, + map: FxHashMap(), + } + } +} + +pub(super) struct CycleError<'a, 'tcx: 'a> { + span: Span, + cycle: RefMut<'a, [(Span, Query<'tcx>)]>, +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub(super) fn report_cycle(self, CycleError { span, cycle }: CycleError) + -> DiagnosticBuilder<'a> + { + // Subtle: release the refcell lock before invoking `describe()` + // below by dropping `cycle`. + let stack = cycle.to_vec(); + mem::drop(cycle); + + assert!(!stack.is_empty()); + + // Disable naming impls with types in this path, since that + // sometimes cycles itself, leading to extra cycle errors. + // (And cycle errors around impls tend to occur during the + // collect/coherence phases anyhow.) + item_path::with_forced_impl_filename_line(|| { + let mut err = + struct_span_err!(self.sess, span, E0391, + "unsupported cyclic reference between types/traits detected"); + err.span_label(span, "cyclic reference"); + + err.span_note(stack[0].0, &format!("the cycle begins when {}...", + stack[0].1.describe(self))); + + for &(span, ref query) in &stack[1..] { + err.span_note(span, &format!("...which then requires {}...", + query.describe(self))); + } + + err.note(&format!("...which then again requires {}, completing the cycle.", + stack[0].1.describe(self))); + + return err + }) + } + + pub(super) fn cycle_check(self, span: Span, query: Query<'gcx>, compute: F) + -> Result> + where F: FnOnce() -> R + { + { + let mut stack = self.maps.query_stack.borrow_mut(); + if let Some((i, _)) = stack.iter().enumerate().rev() + .find(|&(_, &(_, ref q))| *q == query) { + return Err(CycleError { + span, + cycle: RefMut::map(stack, |stack| &mut stack[i..]) + }); + } + stack.push((span, query)); + } + + let result = compute(); + + self.maps.query_stack.borrow_mut().pop(); + + Ok(result) + } +} + +// If enabled, send a message to the profile-queries thread +macro_rules! profq_msg { + ($tcx:expr, $msg:expr) => { + if cfg!(debug_assertions) { + if $tcx.sess.profile_queries() { + profq_msg($msg) + } + } + } +} + +// If enabled, format a key using its debug string, which can be +// expensive to compute (in terms of time). +macro_rules! profq_key { + ($tcx:expr, $key:expr) => { + if cfg!(debug_assertions) { + if $tcx.sess.profile_queries_and_keys() { + Some(format!("{:?}", $key)) + } else { None } + } else { None } + } +} + +macro_rules! define_maps { + (<$tcx:tt> + $($(#[$attr:meta])* + [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { + + use dep_graph::DepNodeIndex; + use std::cell::RefCell; + + define_map_struct! { + tcx: $tcx, + input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) + } + + impl<$tcx> Maps<$tcx> { + pub fn new(providers: IndexVec>) + -> Self { + Maps { + providers, + query_stack: RefCell::new(vec![]), + $($name: RefCell::new(QueryMap::new())),* + } + } + } + + #[allow(bad_style)] + #[derive(Copy, Clone, Debug, PartialEq, Eq)] + pub enum Query<$tcx> { + $($(#[$attr])* $name($K)),* + } + + #[allow(bad_style)] + #[derive(Clone, Debug, PartialEq, Eq)] + pub enum QueryMsg { + $($name(Option)),* + } + + impl<$tcx> Query<$tcx> { + pub fn describe(&self, tcx: TyCtxt) -> String { + let (r, name) = match *self { + $(Query::$name(key) => { + (queries::$name::describe(tcx, key), stringify!($name)) + })* + }; + if tcx.sess.verbose() { + format!("{} [{}]", r, name) + } else { + r + } + } + } + + pub mod queries { + use std::marker::PhantomData; + + $(#[allow(bad_style)] + pub struct $name<$tcx> { + data: PhantomData<&$tcx ()> + })* + } + + $(impl<$tcx> QueryConfig for queries::$name<$tcx> { + type Key = $K; + type Value = $V; + } + + impl<'a, $tcx, 'lcx> queries::$name<$tcx> { + + #[allow(unused)] + fn to_dep_node(tcx: TyCtxt<'a, $tcx, 'lcx>, key: &$K) -> DepNode { + use dep_graph::DepConstructor::*; + + DepNode::new(tcx, $node(*key)) + } + + fn try_get_with(tcx: TyCtxt<'a, $tcx, 'lcx>, + mut span: Span, + key: $K) + -> Result<$V, CycleError<'a, $tcx>> + { + debug!("ty::queries::{}::try_get_with(key={:?}, span={:?})", + stringify!($name), + key, + span); + + profq_msg!(tcx, + ProfileQueriesMsg::QueryBegin( + span.data(), + QueryMsg::$name(profq_key!(tcx, key)) + ) + ); + + if let Some(value) = tcx.maps.$name.borrow().map.get(&key) { + if let Some(ref d) = value.diagnostics { + if !d.emitted_diagnostics.get() { + d.emitted_diagnostics.set(true); + let handle = tcx.sess.diagnostic(); + for diagnostic in d.diagnostics.iter() { + DiagnosticBuilder::new_diagnostic(handle, diagnostic.clone()) + .emit(); + } + } + } + profq_msg!(tcx, ProfileQueriesMsg::CacheHit); + tcx.dep_graph.read_index(value.index); + return Ok((&value.value).clone()); + } + + // FIXME(eddyb) Get more valid Span's on queries. + // def_span guard is necessary to prevent a recursive loop, + // default_span calls def_span query internally. + if span == DUMMY_SP && stringify!($name) != "def_span" { + span = key.default_span(tcx) + } + + // Fast path for when incr. comp. is off. `to_dep_node` is + // expensive for some DepKinds. + if !tcx.dep_graph.is_fully_enabled() { + let null_dep_node = DepNode::new_no_params(::dep_graph::DepKind::Null); + return Self::force(tcx, key, span, null_dep_node) + .map(|(v, _)| v); + } + + let dep_node = Self::to_dep_node(tcx, &key); + + if dep_node.kind.is_anon() { + profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin); + + let res = tcx.cycle_check(span, Query::$name(key), || { + tcx.sess.diagnostic().track_diagnostics(|| { + tcx.dep_graph.with_anon_task(dep_node.kind, || { + Self::compute_result(tcx.global_tcx(), key) + }) + }) + })?; + + profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd); + let ((result, dep_node_index), diagnostics) = res; + + tcx.dep_graph.read_index(dep_node_index); + let value = QueryValue::new(result, dep_node_index, diagnostics); + + return Ok((&tcx.maps + .$name + .borrow_mut() + .map + .entry(key) + .or_insert(value) + .value).clone()); + } + + if !dep_node.kind.is_input() { + use dep_graph::DepNodeColor; + if let Some(DepNodeColor::Green(dep_node_index)) = tcx.dep_graph + .node_color(&dep_node) { + profq_msg!(tcx, ProfileQueriesMsg::CacheHit); + tcx.dep_graph.read_index(dep_node_index); + return Self::load_from_disk_and_cache_in_memory(tcx, + key, + span, + dep_node_index) + } + + debug!("ty::queries::{}::try_get_with(key={:?}) - running try_mark_green", + stringify!($name), + key); + + if let Some(dep_node_index) = tcx.dep_graph.try_mark_green(tcx, &dep_node) { + debug_assert!(tcx.dep_graph.is_green(dep_node_index)); + profq_msg!(tcx, ProfileQueriesMsg::CacheHit); + tcx.dep_graph.read_index(dep_node_index); + return Self::load_from_disk_and_cache_in_memory(tcx, + key, + span, + dep_node_index) + } + } + + match Self::force(tcx, key, span, dep_node) { + Ok((result, dep_node_index)) => { + tcx.dep_graph.read_index(dep_node_index); + Ok(result) + } + Err(e) => Err(e) + } + } + + fn compute_result(tcx: TyCtxt<'a, $tcx, 'lcx>, key: $K) -> $V { + let provider = tcx.maps.providers[key.map_crate()].$name; + provider(tcx.global_tcx(), key) + } + + fn load_from_disk_and_cache_in_memory(tcx: TyCtxt<'a, $tcx, 'lcx>, + key: $K, + span: Span, + dep_node_index: DepNodeIndex) + -> Result<$V, CycleError<'a, $tcx>> + { + debug_assert!(tcx.dep_graph.is_green(dep_node_index)); + + // We don't do any caching yet, so recompute + let (result, diagnostics) = tcx.cycle_check(span, Query::$name(key), || { + tcx.sess.diagnostic().track_diagnostics(|| { + // The dep-graph for this computation is already in place + tcx.dep_graph.with_ignore(|| { + Self::compute_result(tcx, key) + }) + }) + })?; + + if tcx.sess.opts.debugging_opts.query_dep_graph { + tcx.dep_graph.mark_loaded_from_cache(dep_node_index, true); + } + + let value = QueryValue::new(result, dep_node_index, diagnostics); + + Ok((&tcx.maps + .$name + .borrow_mut() + .map + .entry(key) + .or_insert(value) + .value).clone()) + } + + fn force(tcx: TyCtxt<'a, $tcx, 'lcx>, + key: $K, + span: Span, + dep_node: DepNode) + -> Result<($V, DepNodeIndex), CycleError<'a, $tcx>> { + debug_assert!(tcx.dep_graph.node_color(&dep_node).is_none()); + + profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin); + let res = tcx.cycle_check(span, Query::$name(key), || { + tcx.sess.diagnostic().track_diagnostics(|| { + tcx.dep_graph.with_task(dep_node, + tcx, + key, + Self::compute_result) + }) + })?; + profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd); + + let ((result, dep_node_index), diagnostics) = res; + + if tcx.sess.opts.debugging_opts.query_dep_graph { + tcx.dep_graph.mark_loaded_from_cache(dep_node_index, false); + } + + let value = QueryValue::new(result, dep_node_index, diagnostics); + + Ok(((&tcx.maps + .$name + .borrow_mut() + .map + .entry(key) + .or_insert(value) + .value).clone(), + dep_node_index)) + } + + pub fn try_get(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K) + -> Result<$V, DiagnosticBuilder<'a>> { + match Self::try_get_with(tcx, span, key) { + Ok(e) => Ok(e), + Err(e) => Err(tcx.report_cycle(e)), + } + } + })* + + #[derive(Copy, Clone)] + pub struct TyCtxtAt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + pub span: Span, + } + + impl<'a, 'gcx, 'tcx> Deref for TyCtxtAt<'a, 'gcx, 'tcx> { + type Target = TyCtxt<'a, 'gcx, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.tcx + } + } + + impl<'a, $tcx, 'lcx> TyCtxt<'a, $tcx, 'lcx> { + /// Return a transparent wrapper for `TyCtxt` which uses + /// `span` as the location of queries performed through it. + pub fn at(self, span: Span) -> TyCtxtAt<'a, $tcx, 'lcx> { + TyCtxtAt { + tcx: self, + span + } + } + + $($(#[$attr])* + pub fn $name(self, key: $K) -> $V { + self.at(DUMMY_SP).$name(key) + })* + } + + impl<'a, $tcx, 'lcx> TyCtxtAt<'a, $tcx, 'lcx> { + $($(#[$attr])* + pub fn $name(self, key: $K) -> $V { + queries::$name::try_get(self.tcx, self.span, key).unwrap_or_else(|mut e| { + e.emit(); + Value::from_cycle_error(self.global_tcx()) + }) + })* + } + + define_provider_struct! { + tcx: $tcx, + input: ($(([$($modifiers)*] [$name] [$K] [$V]))*), + output: () + } + + impl<$tcx> Copy for Providers<$tcx> {} + impl<$tcx> Clone for Providers<$tcx> { + fn clone(&self) -> Self { *self } + } + } +} + +macro_rules! define_map_struct { + // Initial state + (tcx: $tcx:tt, + input: $input:tt) => { + define_map_struct! { + tcx: $tcx, + input: $input, + output: () + } + }; + + // Final output + (tcx: $tcx:tt, + input: (), + output: ($($output:tt)*)) => { + pub struct Maps<$tcx> { + providers: IndexVec>, + query_stack: RefCell)>>, + $($output)* + } + }; + + // Field recognized and ready to shift into the output + (tcx: $tcx:tt, + ready: ([$($pub:tt)*] [$($attr:tt)*] [$name:ident]), + input: $input:tt, + output: ($($output:tt)*)) => { + define_map_struct! { + tcx: $tcx, + input: $input, + output: ($($output)* + $(#[$attr])* $($pub)* $name: RefCell>>,) + } + }; + + // No modifiers left? This is a private item. + (tcx: $tcx:tt, + input: (([] $attrs:tt $name:tt) $($input:tt)*), + output: $output:tt) => { + define_map_struct! { + tcx: $tcx, + ready: ([] $attrs $name), + input: ($($input)*), + output: $output + } + }; + + // Skip other modifiers + (tcx: $tcx:tt, + input: (([$other_modifier:tt $($modifiers:tt)*] $($fields:tt)*) $($input:tt)*), + output: $output:tt) => { + define_map_struct! { + tcx: $tcx, + input: (([$($modifiers)*] $($fields)*) $($input)*), + output: $output + } + }; +} + +macro_rules! define_provider_struct { + // Initial state: + (tcx: $tcx:tt, input: $input:tt) => { + define_provider_struct! { + tcx: $tcx, + input: $input, + output: () + } + }; + + // Final state: + (tcx: $tcx:tt, + input: (), + output: ($(([$name:ident] [$K:ty] [$R:ty]))*)) => { + pub struct Providers<$tcx> { + $(pub $name: for<'a> fn(TyCtxt<'a, $tcx, $tcx>, $K) -> $R,)* + } + + impl<$tcx> Default for Providers<$tcx> { + fn default() -> Self { + $(fn $name<'a, $tcx>(_: TyCtxt<'a, $tcx, $tcx>, key: $K) -> $R { + bug!("tcx.maps.{}({:?}) unsupported by its crate", + stringify!($name), key); + })* + Providers { $($name),* } + } + } + }; + + // Something ready to shift: + (tcx: $tcx:tt, + ready: ($name:tt $K:tt $V:tt), + input: $input:tt, + output: ($($output:tt)*)) => { + define_provider_struct! { + tcx: $tcx, + input: $input, + output: ($($output)* ($name $K $V)) + } + }; + + // Regular queries produce a `V` only. + (tcx: $tcx:tt, + input: (([] $name:tt $K:tt $V:tt) $($input:tt)*), + output: $output:tt) => { + define_provider_struct! { + tcx: $tcx, + ready: ($name $K $V), + input: ($($input)*), + output: $output + } + }; + + // Skip modifiers. + (tcx: $tcx:tt, + input: (([$other_modifier:tt $($modifiers:tt)*] $($fields:tt)*) $($input:tt)*), + output: $output:tt) => { + define_provider_struct! { + tcx: $tcx, + input: (([$($modifiers)*] $($fields)*) $($input)*), + output: $output + } + }; +} + +pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, + dep_node: &DepNode) + -> bool { + use ty::maps::keys::Key; + use hir::def_id::LOCAL_CRATE; + + // We must avoid ever having to call force_from_dep_node() for a + // DepNode::CodegenUnit: + // Since we cannot reconstruct the query key of a DepNode::CodegenUnit, we + // would always end up having to evaluate the first caller of the + // `codegen_unit` query that *is* reconstructible. This might very well be + // the `compile_codegen_unit` query, thus re-translating the whole CGU just + // to re-trigger calling the `codegen_unit` query with the right key. At + // that point we would already have re-done all the work we are trying to + // avoid doing in the first place. + // The solution is simple: Just explicitly call the `codegen_unit` query for + // each CGU, right after partitioning. This way `try_mark_green` will always + // hit the cache instead of having to go through `force_from_dep_node`. + // This assertion makes sure, we actually keep applying the solution above. + debug_assert!(dep_node.kind != DepKind::CodegenUnit, + "calling force_from_dep_node() on DepKind::CodegenUnit"); + + if !dep_node.kind.can_reconstruct_query_key() { + return false + } + + macro_rules! def_id { + () => { + if let Some(def_id) = dep_node.extract_def_id(tcx) { + def_id + } else { + // return from the whole function + return false + } + } + }; + + macro_rules! krate { + () => { (def_id!()).krate } + }; + + macro_rules! force { + ($query:ident, $key:expr) => { + { + use $crate::util::common::{ProfileQueriesMsg, profq_msg}; + + // FIXME(eddyb) Get more valid Span's on queries. + // def_span guard is necessary to prevent a recursive loop, + // default_span calls def_span query internally. + let span = if stringify!($query) != "def_span" { + $key.default_span(tcx) + } else { + ::syntax_pos::DUMMY_SP + }; + + profq_msg!(tcx, + ProfileQueriesMsg::QueryBegin( + span.data(), + ::ty::maps::QueryMsg::$query(profq_key!(tcx, $key)) + ) + ); + + match ::ty::maps::queries::$query::force(tcx, $key, span, *dep_node) { + Ok(_) => {}, + Err(e) => { + tcx.report_cycle(e).emit(); + } + } + } + } + }; + + // FIXME(#45015): We should try move this boilerplate code into a macro + // somehow. + match dep_node.kind { + // These are inputs that are expected to be pre-allocated and that + // should therefore always be red or green already + DepKind::AllLocalTraitImpls | + DepKind::Krate | + DepKind::CrateMetadata | + DepKind::HirBody | + DepKind::Hir | + + // This are anonymous nodes + DepKind::IsCopy | + DepKind::IsSized | + DepKind::IsFreeze | + DepKind::NeedsDrop | + DepKind::Layout | + DepKind::TraitSelect | + DepKind::ConstEval | + + // We don't have enough information to reconstruct the query key of + // these + DepKind::InstanceSymbolName | + DepKind::MirShim | + DepKind::BorrowCheckKrate | + DepKind::Specializes | + DepKind::ImplementationsOfTrait | + DepKind::TypeParamPredicates | + DepKind::CodegenUnit | + DepKind::CompileCodegenUnit | + + // These are just odd + DepKind::Null | + DepKind::WorkProduct => { + bug!("force_from_dep_node() - Encountered {:?}", dep_node.kind) + } + + // These are not queries + DepKind::CoherenceCheckTrait | + DepKind::ItemVarianceConstraints => { + return false + } + + DepKind::RegionScopeTree => { force!(region_scope_tree, def_id!()); } + + DepKind::Coherence => { force!(crate_inherent_impls, LOCAL_CRATE); } + DepKind::CoherenceInherentImplOverlapCheck => { + force!(crate_inherent_impls_overlap_check, LOCAL_CRATE) + }, + DepKind::PrivacyAccessLevels => { force!(privacy_access_levels, LOCAL_CRATE); } + DepKind::MirBuilt => { force!(mir_built, def_id!()); } + DepKind::MirConstQualif => { force!(mir_const_qualif, def_id!()); } + DepKind::MirConst => { force!(mir_const, def_id!()); } + DepKind::MirValidated => { force!(mir_validated, def_id!()); } + DepKind::MirOptimized => { force!(optimized_mir, def_id!()); } + + DepKind::BorrowCheck => { force!(borrowck, def_id!()); } + DepKind::MirBorrowCheck => { force!(mir_borrowck, def_id!()); } + DepKind::UnsafetyCheckResult => { force!(unsafety_check_result, def_id!()); } + DepKind::Reachability => { force!(reachable_set, LOCAL_CRATE); } + DepKind::MirKeys => { force!(mir_keys, LOCAL_CRATE); } + DepKind::CrateVariances => { force!(crate_variances, LOCAL_CRATE); } + DepKind::AssociatedItems => { force!(associated_item, def_id!()); } + DepKind::TypeOfItem => { force!(type_of, def_id!()); } + DepKind::GenericsOfItem => { force!(generics_of, def_id!()); } + DepKind::PredicatesOfItem => { force!(predicates_of, def_id!()); } + DepKind::SuperPredicatesOfItem => { force!(super_predicates_of, def_id!()); } + DepKind::TraitDefOfItem => { force!(trait_def, def_id!()); } + DepKind::AdtDefOfItem => { force!(adt_def, def_id!()); } + DepKind::IsDefaultImpl => { force!(is_default_impl, def_id!()); } + DepKind::ImplTraitRef => { force!(impl_trait_ref, def_id!()); } + DepKind::ImplPolarity => { force!(impl_polarity, def_id!()); } + DepKind::ClosureKind => { force!(closure_kind, def_id!()); } + DepKind::FnSignature => { force!(fn_sig, def_id!()); } + DepKind::GenSignature => { force!(generator_sig, def_id!()); } + DepKind::CoerceUnsizedInfo => { force!(coerce_unsized_info, def_id!()); } + DepKind::ItemVariances => { force!(variances_of, def_id!()); } + DepKind::IsConstFn => { force!(is_const_fn, def_id!()); } + DepKind::IsForeignItem => { force!(is_foreign_item, def_id!()); } + DepKind::SizedConstraint => { force!(adt_sized_constraint, def_id!()); } + DepKind::DtorckConstraint => { force!(adt_dtorck_constraint, def_id!()); } + DepKind::AdtDestructor => { force!(adt_destructor, def_id!()); } + DepKind::AssociatedItemDefIds => { force!(associated_item_def_ids, def_id!()); } + DepKind::InherentImpls => { force!(inherent_impls, def_id!()); } + DepKind::TypeckBodiesKrate => { force!(typeck_item_bodies, LOCAL_CRATE); } + DepKind::TypeckTables => { force!(typeck_tables_of, def_id!()); } + DepKind::HasTypeckTables => { force!(has_typeck_tables, def_id!()); } + DepKind::SymbolName => { force!(def_symbol_name, def_id!()); } + DepKind::SpecializationGraph => { force!(specialization_graph_of, def_id!()); } + DepKind::ObjectSafety => { force!(is_object_safe, def_id!()); } + DepKind::TraitImpls => { force!(trait_impls_of, def_id!()); } + + DepKind::ParamEnv => { force!(param_env, def_id!()); } + DepKind::DescribeDef => { force!(describe_def, def_id!()); } + DepKind::DefSpan => { force!(def_span, def_id!()); } + DepKind::LookupStability => { force!(lookup_stability, def_id!()); } + DepKind::LookupDeprecationEntry => { + force!(lookup_deprecation_entry, def_id!()); + } + DepKind::ItemBodyNestedBodies => { force!(item_body_nested_bodies, def_id!()); } + DepKind::ConstIsRvaluePromotableToStatic => { + force!(const_is_rvalue_promotable_to_static, def_id!()); + } + DepKind::ImplParent => { force!(impl_parent, def_id!()); } + DepKind::TraitOfItem => { force!(trait_of_item, def_id!()); } + DepKind::IsExportedSymbol => { force!(is_exported_symbol, def_id!()); } + DepKind::IsMirAvailable => { force!(is_mir_available, def_id!()); } + DepKind::ItemAttrs => { force!(item_attrs, def_id!()); } + DepKind::FnArgNames => { force!(fn_arg_names, def_id!()); } + DepKind::DylibDepFormats => { force!(dylib_dependency_formats, krate!()); } + DepKind::IsPanicRuntime => { force!(is_panic_runtime, krate!()); } + DepKind::IsCompilerBuiltins => { force!(is_compiler_builtins, krate!()); } + DepKind::HasGlobalAllocator => { force!(has_global_allocator, krate!()); } + DepKind::ExternCrate => { force!(extern_crate, def_id!()); } + DepKind::LintLevels => { force!(lint_levels, LOCAL_CRATE); } + DepKind::InScopeTraits => { force!(in_scope_traits_map, def_id!().index); } + DepKind::ModuleExports => { force!(module_exports, def_id!()); } + DepKind::IsSanitizerRuntime => { force!(is_sanitizer_runtime, krate!()); } + DepKind::IsProfilerRuntime => { force!(is_profiler_runtime, krate!()); } + DepKind::GetPanicStrategy => { force!(panic_strategy, krate!()); } + DepKind::IsNoBuiltins => { force!(is_no_builtins, krate!()); } + DepKind::ImplDefaultness => { force!(impl_defaultness, def_id!()); } + DepKind::ExportedSymbolIds => { force!(exported_symbol_ids, krate!()); } + DepKind::NativeLibraries => { force!(native_libraries, krate!()); } + DepKind::PluginRegistrarFn => { force!(plugin_registrar_fn, krate!()); } + DepKind::DeriveRegistrarFn => { force!(derive_registrar_fn, krate!()); } + DepKind::CrateDisambiguator => { force!(crate_disambiguator, krate!()); } + DepKind::CrateHash => { force!(crate_hash, krate!()); } + DepKind::OriginalCrateName => { force!(original_crate_name, krate!()); } + + DepKind::AllTraitImplementations => { + force!(all_trait_implementations, krate!()); + } + + DepKind::IsDllimportForeignItem => { + force!(is_dllimport_foreign_item, def_id!()); + } + DepKind::IsStaticallyIncludedForeignItem => { + force!(is_statically_included_foreign_item, def_id!()); + } + DepKind::NativeLibraryKind => { force!(native_library_kind, def_id!()); } + DepKind::LinkArgs => { force!(link_args, LOCAL_CRATE); } + + DepKind::NamedRegion => { force!(named_region_map, def_id!().index); } + DepKind::IsLateBound => { force!(is_late_bound_map, def_id!().index); } + DepKind::ObjectLifetimeDefaults => { + force!(object_lifetime_defaults_map, def_id!().index); + } + + DepKind::Visibility => { force!(visibility, def_id!()); } + DepKind::DepKind => { force!(dep_kind, krate!()); } + DepKind::CrateName => { force!(crate_name, krate!()); } + DepKind::ItemChildren => { force!(item_children, def_id!()); } + DepKind::ExternModStmtCnum => { force!(extern_mod_stmt_cnum, def_id!()); } + DepKind::GetLangItems => { force!(get_lang_items, LOCAL_CRATE); } + DepKind::DefinedLangItems => { force!(defined_lang_items, krate!()); } + DepKind::MissingLangItems => { force!(missing_lang_items, krate!()); } + DepKind::ExternConstBody => { force!(extern_const_body, def_id!()); } + DepKind::VisibleParentMap => { force!(visible_parent_map, LOCAL_CRATE); } + DepKind::MissingExternCrateItem => { + force!(missing_extern_crate_item, krate!()); + } + DepKind::UsedCrateSource => { force!(used_crate_source, krate!()); } + DepKind::PostorderCnums => { force!(postorder_cnums, LOCAL_CRATE); } + DepKind::HasCloneClosures => { force!(has_clone_closures, krate!()); } + DepKind::HasCopyClosures => { force!(has_copy_closures, krate!()); } + + DepKind::Freevars => { force!(freevars, def_id!()); } + DepKind::MaybeUnusedTraitImport => { + force!(maybe_unused_trait_import, def_id!()); + } + DepKind::MaybeUnusedExternCrates => { force!(maybe_unused_extern_crates, LOCAL_CRATE); } + DepKind::StabilityIndex => { force!(stability_index, LOCAL_CRATE); } + DepKind::AllCrateNums => { force!(all_crate_nums, LOCAL_CRATE); } + DepKind::ExportedSymbols => { force!(exported_symbols, krate!()); } + DepKind::CollectAndPartitionTranslationItems => { + force!(collect_and_partition_translation_items, LOCAL_CRATE); + } + DepKind::ExportName => { force!(export_name, def_id!()); } + DepKind::ContainsExternIndicator => { + force!(contains_extern_indicator, def_id!()); + } + DepKind::IsTranslatedFunction => { force!(is_translated_function, def_id!()); } + DepKind::OutputFilenames => { force!(output_filenames, LOCAL_CRATE); } + } + + true +} diff --git a/src/librustc/ty/maps/values.rs b/src/librustc/ty/maps/values.rs new file mode 100644 index 0000000000..165798d19f --- /dev/null +++ b/src/librustc/ty/maps/values.rs @@ -0,0 +1,49 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ty::{self, Ty, TyCtxt}; + +use syntax::symbol::Symbol; + +pub(super) trait Value<'tcx>: Sized { + fn from_cycle_error<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self; +} + +impl<'tcx, T> Value<'tcx> for T { + default fn from_cycle_error<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> T { + tcx.sess.abort_if_errors(); + bug!("Value::from_cycle_error called without errors"); + } +} + +impl<'tcx, T: Default> Value<'tcx> for T { + default fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> T { + T::default() + } +} + +impl<'tcx> Value<'tcx> for Ty<'tcx> { + fn from_cycle_error<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { + tcx.types.err + } +} + +impl<'tcx> Value<'tcx> for ty::DtorckConstraint<'tcx> { + fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> Self { + Self::empty() + } +} + +impl<'tcx> Value<'tcx> for ty::SymbolName { + fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> Self { + ty::SymbolName { name: Symbol::intern("").as_str() } + } +} + diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index f0a194752c..c4f526d801 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -23,8 +23,8 @@ use middle::const_val::ConstVal; use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem}; use middle::privacy::AccessLevels; use middle::resolve_lifetime::ObjectLifetimeDefault; -use middle::region::CodeExtent; use mir::Mir; +use mir::GeneratorLayout; use traits; use ty; use ty::subst::{Subst, Substs}; @@ -59,15 +59,14 @@ use rustc_data_structures::transitive_relation::TransitiveRelation; use hir; pub use self::sty::{Binder, DebruijnIndex}; -pub use self::sty::{FnSig, PolyFnSig}; +pub use self::sty::{FnSig, GenSig, PolyFnSig, PolyGenSig}; pub use self::sty::{InferTy, ParamTy, ProjectionTy, ExistentialPredicate}; -pub use self::sty::{ClosureSubsts, TypeAndMut}; +pub use self::sty::{ClosureSubsts, GeneratorInterior, TypeAndMut}; pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef}; pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef}; -pub use self::sty::{ExistentialProjection, PolyExistentialProjection}; +pub use self::sty::{ExistentialProjection, PolyExistentialProjection, Const}; pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region}; pub use self::sty::RegionKind; -pub use self::sty::Issue32330; pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid}; pub use self::sty::BoundRegion::*; pub use self::sty::InferTy::*; @@ -122,7 +121,6 @@ mod sty; #[derive(Clone)] pub struct CrateAnalysis { pub access_levels: Rc, - pub reachable: Rc, pub name: String, pub glob_map: Option, } @@ -400,33 +398,35 @@ pub struct CReaderCacheKey { // check whether the type has various kinds of types in it without // recursing over the type itself. bitflags! { - flags TypeFlags: u32 { - const HAS_PARAMS = 1 << 0, - const HAS_SELF = 1 << 1, - const HAS_TY_INFER = 1 << 2, - const HAS_RE_INFER = 1 << 3, - const HAS_RE_SKOL = 1 << 4, - const HAS_RE_EARLY_BOUND = 1 << 5, - const HAS_FREE_REGIONS = 1 << 6, - const HAS_TY_ERR = 1 << 7, - const HAS_PROJECTION = 1 << 8, - const HAS_TY_CLOSURE = 1 << 9, + pub struct TypeFlags: u32 { + const HAS_PARAMS = 1 << 0; + const HAS_SELF = 1 << 1; + const HAS_TY_INFER = 1 << 2; + const HAS_RE_INFER = 1 << 3; + const HAS_RE_SKOL = 1 << 4; + const HAS_RE_EARLY_BOUND = 1 << 5; + const HAS_FREE_REGIONS = 1 << 6; + const HAS_TY_ERR = 1 << 7; + const HAS_PROJECTION = 1 << 8; + + // FIXME: Rename this to the actual property since it's used for generators too + const HAS_TY_CLOSURE = 1 << 9; // true if there are "names" of types and regions and so forth // that are local to a particular fn - const HAS_LOCAL_NAMES = 1 << 10, + const HAS_LOCAL_NAMES = 1 << 10; // Present if the type belongs in a local type context. // Only set for TyInfer other than Fresh. - const KEEP_IN_LOCAL_TCX = 1 << 11, + const KEEP_IN_LOCAL_TCX = 1 << 11; // Is there a projection that does not involve a bound region? // Currently we can't normalize projections w/ bound regions. - const HAS_NORMALIZABLE_PROJECTION = 1 << 12, + const HAS_NORMALIZABLE_PROJECTION = 1 << 12; const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits | TypeFlags::HAS_SELF.bits | - TypeFlags::HAS_RE_EARLY_BOUND.bits, + TypeFlags::HAS_RE_EARLY_BOUND.bits; // Flags representing the nominal content of a type, // computed by FlagsComputation. If you add a new nominal @@ -442,7 +442,7 @@ bitflags! { TypeFlags::HAS_PROJECTION.bits | TypeFlags::HAS_TY_CLOSURE.bits | TypeFlags::HAS_LOCAL_NAMES.bits | - TypeFlags::KEEP_IN_LOCAL_TCX.bits, + TypeFlags::KEEP_IN_LOCAL_TCX.bits; } } @@ -500,9 +500,9 @@ impl<'tcx> TyS<'tcx> { } } -impl<'a, 'gcx, 'tcx> HashStable> for ty::TyS<'gcx> { +impl<'gcx> HashStable> for ty::TyS<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let ty::TyS { ref sty, @@ -574,7 +574,7 @@ impl Slice { /// by the upvar) and the id of the closure expression. #[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct UpvarId { - pub var_id: DefIndex, + pub var_id: hir::HirId, pub closure_expr_id: DefIndex, } @@ -675,6 +675,8 @@ pub struct TypeParameterDef { /// on generic parameter `T`, asserts data behind the parameter /// `T` won't be accessed during the parent type's `Drop` impl. pub pure_wrt_drop: bool, + + pub synthetic: Option, } #[derive(Copy, Clone, RustcEncodable, RustcDecodable)] @@ -682,7 +684,6 @@ pub struct RegionParameterDef { pub name: Name, pub def_id: DefId, pub index: u32, - pub issue_32330: Option, /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute /// on generic parameter `'a`, asserts data of lifetime `'a` @@ -712,6 +713,13 @@ impl ty::EarlyBoundRegion { /// Information about the formal type/lifetime parameters associated /// with an item or method. Analogous to hir::Generics. +/// +/// Note that in the presence of a `Self` parameter, the ordering here +/// is different from the ordering in a Substs. Substs are ordered as +/// Self, *Regions, *Other Type Params, (...child generics) +/// while this struct is ordered as +/// regions = Regions +/// types = [Self, *Other Type Params] #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub struct Generics { pub parent: Option, @@ -728,7 +736,7 @@ pub struct Generics { pub has_late_bound_regions: Option, } -impl Generics { +impl<'a, 'gcx, 'tcx> Generics { pub fn parent_count(&self) -> usize { self.parent_regions as usize + self.parent_types as usize } @@ -741,14 +749,52 @@ impl Generics { self.parent_count() + self.own_count() } - pub fn region_param(&self, param: &EarlyBoundRegion) -> &RegionParameterDef { - assert_eq!(self.parent_count(), 0); - &self.regions[param.index as usize - self.has_self as usize] - } - - pub fn type_param(&self, param: &ParamTy) -> &TypeParameterDef { - assert_eq!(self.parent_count(), 0); - &self.types[param.idx as usize - self.has_self as usize - self.regions.len()] + pub fn region_param(&'tcx self, + param: &EarlyBoundRegion, + tcx: TyCtxt<'a, 'gcx, 'tcx>) + -> &'tcx RegionParameterDef + { + if let Some(index) = param.index.checked_sub(self.parent_count() as u32) { + &self.regions[index as usize - self.has_self as usize] + } else { + tcx.generics_of(self.parent.expect("parent_count>0 but no parent?")) + .region_param(param, tcx) + } + } + + /// Returns the `TypeParameterDef` associated with this `ParamTy`. + pub fn type_param(&'tcx self, + param: &ParamTy, + tcx: TyCtxt<'a, 'gcx, 'tcx>) + -> &TypeParameterDef { + if let Some(idx) = param.idx.checked_sub(self.parent_count() as u32) { + // non-Self type parameters are always offset by exactly + // `self.regions.len()`. In the absence of a Self, this is obvious, + // but even in the absence of a `Self` we just have to "compensate" + // for the regions: + // + // For example, for `trait Foo<'a, 'b, T1, T2>`, the + // situation is: + // Substs: + // 0 1 2 3 4 + // Self 'a 'b T1 T2 + // generics.types: + // 0 1 2 + // Self T1 T2 + // And it can be seen that to move from a substs offset to a + // generics offset you just have to offset by the number of regions. + let type_param_offset = self.regions.len(); + if let Some(idx) = (idx as usize).checked_sub(type_param_offset) { + assert!(!(self.has_self && idx == 0)); + &self.types[idx] + } else { + assert!(self.has_self && idx == 0); + &self.types[0] + } + } else { + tcx.generics_of(self.parent.expect("parent_count>0 but no parent?")) + .type_param(param, tcx) + } } } @@ -846,6 +892,9 @@ pub enum Predicate<'tcx> { /// `T1 <: T2` Subtype(PolySubtypePredicate<'tcx>), + + /// Constant initializer must evaluate successfully. + ConstEvaluatable(DefId, &'tcx Substs<'tcx>), } impl<'a, 'gcx, 'tcx> Predicate<'tcx> { @@ -938,6 +987,8 @@ impl<'a, 'gcx, 'tcx> Predicate<'tcx> { Predicate::ObjectSafe(trait_def_id), Predicate::ClosureKind(closure_def_id, kind) => Predicate::ClosureKind(closure_def_id, kind), + Predicate::ConstEvaluatable(def_id, const_substs) => + Predicate::ConstEvaluatable(def_id, const_substs.subst(tcx, substs)), } } } @@ -1120,6 +1171,9 @@ impl<'tcx> Predicate<'tcx> { ty::Predicate::ClosureKind(_closure_def_id, _kind) => { vec![] } + ty::Predicate::ConstEvaluatable(_, substs) => { + substs.types().collect() + } }; // The only reason to collect into a vector here is that I was @@ -1142,7 +1196,8 @@ impl<'tcx> Predicate<'tcx> { Predicate::WellFormed(..) | Predicate::ObjectSafe(..) | Predicate::ClosureKind(..) | - Predicate::TypeOutlives(..) => { + Predicate::TypeOutlives(..) | + Predicate::ConstEvaluatable(..) => { None } } @@ -1243,6 +1298,22 @@ impl<'tcx, T> ParamEnvAnd<'tcx, T> { } } +impl<'gcx, T> HashStable> for ParamEnvAnd<'gcx, T> + where T: HashStable> +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + let ParamEnvAnd { + ref param_env, + ref value + } = *self; + + param_env.hash_stable(hcx, hasher); + value.hash_stable(hcx, hasher); + } +} + #[derive(Copy, Clone, Debug)] pub struct Destructor { /// The def-id of the destructor method @@ -1250,13 +1321,13 @@ pub struct Destructor { } bitflags! { - flags AdtFlags: u32 { - const NO_ADT_FLAGS = 0, - const IS_ENUM = 1 << 0, - const IS_PHANTOM_DATA = 1 << 1, - const IS_FUNDAMENTAL = 1 << 2, - const IS_UNION = 1 << 3, - const IS_BOX = 1 << 4, + pub struct AdtFlags: u32 { + const NO_ADT_FLAGS = 0; + const IS_ENUM = 1 << 0; + const IS_PHANTOM_DATA = 1 << 1; + const IS_FUNDAMENTAL = 1 << 2; + const IS_UNION = 1 << 3; + const IS_BOX = 1 << 4; } } @@ -1326,9 +1397,9 @@ impl<'tcx> serialize::UseSpecializedEncodable for &'tcx AdtDef { impl<'tcx> serialize::UseSpecializedDecodable for &'tcx AdtDef {} -impl<'a, 'gcx, 'tcx> HashStable> for AdtDef { +impl<'gcx> HashStable> for AdtDef { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let ty::AdtDef { did, @@ -1349,18 +1420,18 @@ pub enum AdtKind { Struct, Union, Enum } bitflags! { #[derive(RustcEncodable, RustcDecodable, Default)] - flags ReprFlags: u8 { - const IS_C = 1 << 0, - const IS_PACKED = 1 << 1, - const IS_SIMD = 1 << 2, + pub struct ReprFlags: u8 { + const IS_C = 1 << 0; + const IS_PACKED = 1 << 1; + const IS_SIMD = 1 << 2; // Internal only for now. If true, don't reorder fields. - const IS_LINEAR = 1 << 3, + const IS_LINEAR = 1 << 3; // Any of these flags being set prevent field reordering optimisation. const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits | ReprFlags::IS_PACKED.bits | ReprFlags::IS_SIMD.bits | - ReprFlags::IS_LINEAR.bits, + ReprFlags::IS_LINEAR.bits; } } @@ -1451,10 +1522,10 @@ impl<'a, 'gcx, 'tcx> AdtDef { if attr::contains_name(&attrs, "fundamental") { flags = flags | AdtFlags::IS_FUNDAMENTAL; } - if Some(did) == tcx.lang_items.phantom_data() { + if Some(did) == tcx.lang_items().phantom_data() { flags = flags | AdtFlags::IS_PHANTOM_DATA; } - if Some(did) == tcx.lang_items.owned_box() { + if Some(did) == tcx.lang_items().owned_box() { flags = flags | AdtFlags::IS_BOX; } match kind { @@ -1601,7 +1672,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { if let VariantDiscr::Explicit(expr_did) = v.discr { let substs = Substs::identity_for_item(tcx.global_tcx(), expr_did); match tcx.const_eval(param_env.and((expr_did, substs))) { - Ok(ConstVal::Integral(v)) => { + Ok(&ty::Const { val: ConstVal::Integral(v), .. }) => { discr = v; } err => { @@ -1641,7 +1712,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { ty::VariantDiscr::Explicit(expr_did) => { let substs = Substs::identity_for_item(tcx.global_tcx(), expr_did); match tcx.const_eval(param_env.and((expr_did, substs))) { - Ok(ConstVal::Integral(v)) => { + Ok(&ty::Const { val: ConstVal::Integral(v), .. }) => { explicit_value = v; break; } @@ -1665,11 +1736,11 @@ impl<'a, 'gcx, 'tcx> AdtDef { match repr_type { attr::UnsignedInt(ty) => { ConstInt::new_unsigned_truncating(discr, ty, - tcx.sess.target.uint_type) + tcx.sess.target.usize_ty) } attr::SignedInt(ty) => { ConstInt::new_signed_truncating(discr as i128, ty, - tcx.sess.target.int_type) + tcx.sess.target.isize_ty) } } } @@ -1712,7 +1783,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { let result = match ty.sty { TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | TyRawPtr(..) | TyRef(..) | TyFnDef(..) | TyFnPtr(_) | - TyArray(..) | TyClosure(..) | TyNever => { + TyArray(..) | TyClosure(..) | TyGenerator(..) | TyNever => { vec![] } @@ -1750,7 +1821,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { // we know that `T` is Sized and do not need to check // it on the impl. - let sized_trait = match tcx.lang_items.sized_trait() { + let sized_trait = match tcx.lang_items().sized_trait() { Some(x) => x, _ => return vec![ty] }; @@ -1979,25 +2050,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - pub fn local_var_name_str(self, id: NodeId) -> InternedString { - match self.hir.find(id) { - Some(hir_map::NodeBinding(pat)) => { - match pat.node { - hir::PatKind::Binding(_, _, ref path1, _) => path1.node.as_str(), - _ => { - bug!("Variable id {} maps to {:?}, not local", id, pat); - }, - } - }, - r => bug!("Variable id {} maps to {:?}, not local", id, r), - } - } - - pub fn local_var_name_str_def_index(self, def_index: DefIndex) -> InternedString { - let node_id = self.hir.as_local_node_id(DefId::local(def_index)).unwrap(); - self.local_var_name_str(node_id) - } - pub fn expr_is_lval(self, expr: &hir::Expr) -> bool { match expr.node { hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { @@ -2045,6 +2097,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { hir::ExprBox(..) | hir::ExprAddrOf(..) | hir::ExprBinary(..) | + hir::ExprYield(..) | hir::ExprCast(..) => { false } @@ -2179,43 +2232,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - pub fn def_key(self, id: DefId) -> hir_map::DefKey { - if id.is_local() { - self.hir.def_key(id) - } else { - self.sess.cstore.def_key(id) - } - } - - /// Convert a `DefId` into its fully expanded `DefPath` (every - /// `DefId` is really just an interned def-path). - /// - /// Note that if `id` is not local to this crate, the result will - /// be a non-local `DefPath`. - pub fn def_path(self, id: DefId) -> hir_map::DefPath { - if id.is_local() { - self.hir.def_path(id) - } else { - self.sess.cstore.def_path(id) - } - } - - #[inline] - pub fn def_path_hash(self, def_id: DefId) -> hir_map::DefPathHash { - if def_id.is_local() { - self.hir.definitions().def_path_hash(def_id.index) - } else { - self.sess.cstore.def_path_hash(def_id) - } - } - - pub fn item_name(self, id: DefId) -> ast::Name { + pub fn item_name(self, id: DefId) -> InternedString { if let Some(id) = self.hir.as_local_node_id(id) { - self.hir.name(id) + self.hir.name(id).as_str() } else if id.index == CRATE_DEF_INDEX { - self.sess.cstore.original_crate_name(id.krate) + self.original_crate_name(id.krate).as_str() } else { - let def_key = self.sess.cstore.def_key(id); + let def_key = self.def_key(id); // The name of a StructCtor is that of its struct parent. if let hir_map::DefPathData::StructCtor = def_key.disambiguated_data.data { self.item_name(DefId { @@ -2277,6 +2300,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.trait_def(trait_def_id).has_default_impl } + pub fn generator_layout(self, def_id: DefId) -> &'tcx GeneratorLayout<'tcx> { + self.optimized_mir(def_id).generator_layout.as_ref().unwrap() + } + /// Given the def_id of an impl, return the def_id of the trait it implements. /// If it implements no trait, return `None`. pub fn trait_id_of_impl(self, def_id: DefId) -> Option { @@ -2307,10 +2334,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - pub fn node_scope_region(self, id: NodeId) -> Region<'tcx> { - self.mk_region(ty::ReScope(CodeExtent::Misc(id))) - } - /// Looks up the span of `impl_did` if the impl is local; otherwise returns `Err` /// with the name of the crate containing the impl. pub fn span_of_impl(self, impl_did: DefId) -> Result { @@ -2318,10 +2341,17 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let node_id = self.hir.as_local_node_id(impl_did).unwrap(); Ok(self.hir.span(node_id)) } else { - Err(self.sess.cstore.crate_name(impl_did.krate)) + Err(self.crate_name(impl_did.krate)) } } + // Hygienically compare a use-site name (`use_name`) for a field or an associated item with its + // supposed definition name (`def_name`). The method also needs `DefId` of the supposed + // definition's parent/scope to perform comparison. + pub fn hygienic_eq(self, use_name: Name, def_name: Name, def_parent_def_id: DefId) -> bool { + self.adjust(use_name, def_parent_def_id, DUMMY_NODE_ID).0 == def_name.to_ident() + } + pub fn adjust(self, name: Name, scope: DefId, block: NodeId) -> (Ident, DefId) { self.adjust_ident(name.to_ident(), scope, block) } @@ -2333,6 +2363,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }; let scope = match ident.ctxt.adjust(expansion) { Some(macro_def) => self.hir.definitions().macro_def_scope(macro_def), + None if block == DUMMY_NODE_ID => DefId::local(CRATE_DEF_INDEX), // Dummy DefId None => self.hir.get_module_parent(block), }; (ident, scope) @@ -2343,9 +2374,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn with_freevars(self, fid: NodeId, f: F) -> T where F: FnOnce(&[hir::Freevar]) -> T, { - match self.freevars.borrow().get(&fid) { + let def_id = self.hir.local_def_id(fid); + match self.freevars(def_id) { None => f(&[]), - Some(d) => f(&d[..]) + Some(d) => f(&d), } } } @@ -2513,8 +2545,21 @@ fn param_env<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, traits::normalize_param_env_or_error(tcx, def_id, unnormalized_env, cause) } +fn crate_disambiguator<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + crate_num: CrateNum) -> Symbol { + assert_eq!(crate_num, LOCAL_CRATE); + tcx.sess.local_crate_disambiguator() +} + +fn original_crate_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + crate_num: CrateNum) -> Symbol { + assert_eq!(crate_num, LOCAL_CRATE); + tcx.crate_name.clone() +} + pub fn provide(providers: &mut ty::maps::Providers) { util::provide(providers); + context::provide(providers); *providers = ty::maps::Providers { associated_item, associated_item_def_ids, @@ -2523,6 +2568,8 @@ pub fn provide(providers: &mut ty::maps::Providers) { def_span, param_env, trait_of_item, + crate_disambiguator, + original_crate_name, trait_impls_of: trait_def::trait_impls_of_provider, ..*providers }; @@ -2599,6 +2646,10 @@ pub struct SymbolName { pub name: InternedString } +impl_stable_hash_for!(struct self::SymbolName { + name +}); + impl Deref for SymbolName { type Target = str; diff --git a/src/librustc/ty/outlives.rs b/src/librustc/ty/outlives.rs index ab1b1b3857..657ed40779 100644 --- a/src/librustc/ty/outlives.rs +++ b/src/librustc/ty/outlives.rs @@ -115,6 +115,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } + ty::TyGenerator(def_id, ref substs, ref interior) => { + // Same as the closure case + for upvar_ty in substs.upvar_tys(def_id, *self) { + self.compute_components(upvar_ty, out); + } + + // But generators can have additional interior types + self.compute_components(interior.witness, out); + } + // OutlivesTypeParameterEnv -- the actual checking that `X:'a` // is implied by the environment is done in regionck. ty::TyParam(p) => { diff --git a/src/librustc/ty/relate.rs b/src/librustc/ty/relate.rs index c035817d66..309880ba06 100644 --- a/src/librustc/ty/relate.rs +++ b/src/librustc/ty/relate.rs @@ -14,9 +14,12 @@ //! type equality, etc. use hir::def_id::DefId; +use middle::const_val::ConstVal; +use traits::Reveal; use ty::subst::{Kind, Substs}; use ty::{self, Ty, TyCtxt, TypeFoldable}; use ty::error::{ExpectedFound, TypeError}; +use util::common::ErrorReported; use std::rc::Rc; use std::iter; use syntax::abi; @@ -389,6 +392,18 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound)) } + (&ty::TyGenerator(a_id, a_substs, a_interior), + &ty::TyGenerator(b_id, b_substs, b_interior)) + if a_id == b_id => + { + // All TyGenerator types with the same id represent + // the (anonymous) type of the same generator expression. So + // all of their regions should be equated. + let substs = relation.relate(&a_substs, &b_substs)?; + let interior = relation.relate(&a_interior, &b_interior)?; + Ok(tcx.mk_generator(a_id, substs, interior)) + } + (&ty::TyClosure(a_id, a_substs), &ty::TyClosure(b_id, b_substs)) if a_id == b_id => @@ -416,10 +431,45 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, (&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) => { let t = relation.relate(&a_t, &b_t)?; - if sz_a == sz_b { - Ok(tcx.mk_array(t, sz_a)) - } else { - Err(TypeError::FixedArraySize(expected_found(relation, &sz_a, &sz_b))) + assert_eq!(sz_a.ty, tcx.types.usize); + assert_eq!(sz_b.ty, tcx.types.usize); + let to_u64 = |x: &'tcx ty::Const<'tcx>| -> Result { + match x.val { + ConstVal::Integral(x) => Ok(x.to_u64().unwrap()), + ConstVal::Unevaluated(def_id, substs) => { + // FIXME(eddyb) get the right param_env. + let param_env = ty::ParamEnv::empty(Reveal::UserFacing); + match tcx.lift_to_global(&substs) { + Some(substs) => { + match tcx.const_eval(param_env.and((def_id, substs))) { + Ok(&ty::Const { val: ConstVal::Integral(x), .. }) => { + return Ok(x.to_u64().unwrap()); + } + _ => {} + } + } + None => {} + } + tcx.sess.delay_span_bug(tcx.def_span(def_id), + "array length could not be evaluated"); + Err(ErrorReported) + } + _ => bug!("arrays should not have {:?} as length", x) + } + }; + match (to_u64(sz_a), to_u64(sz_b)) { + (Ok(sz_a_u64), Ok(sz_b_u64)) => { + if sz_a_u64 == sz_b_u64 { + Ok(tcx.mk_ty(ty::TyArray(t, sz_a))) + } else { + Err(TypeError::FixedArraySize( + expected_found(relation, &sz_a_u64, &sz_b_u64))) + } + } + // We reported an error or will ICE, so we can return TyError. + (Err(ErrorReported), _) | (_, Err(ErrorReported)) => { + Ok(tcx.types.err) + } } } @@ -512,6 +562,18 @@ impl<'tcx> Relate<'tcx> for ty::ClosureSubsts<'tcx> { } } +impl<'tcx> Relate<'tcx> for ty::GeneratorInterior<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::GeneratorInterior<'tcx>, + b: &ty::GeneratorInterior<'tcx>) + -> RelateResult<'tcx, ty::GeneratorInterior<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + let interior = relation.relate(&a.witness, &b.witness)?; + Ok(ty::GeneratorInterior::new(interior)) + } +} + impl<'tcx> Relate<'tcx> for &'tcx Substs<'tcx> { fn relate<'a, 'gcx, R>(relation: &mut R, a: &&'tcx Substs<'tcx>, diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index 34be80164d..54d55748c8 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -9,6 +9,7 @@ // except according to those terms. use infer::type_variable; +use middle::const_val::{self, ConstVal, ConstAggregate, ConstEvalErr}; use ty::{self, Lift, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use rustc_data_structures::accumulate_vec::AccumulateVec; @@ -29,6 +30,15 @@ impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) { } } +impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>, C: Lift<'tcx>> Lift<'tcx> for (A, B, C) { + type Lifted = (A::Lifted, B::Lifted, C::Lifted); + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.0).and_then(|a| { + tcx.lift(&self.1).and_then(|b| tcx.lift(&self.2).map(|c| (a, b, c))) + }) + } +} + impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option { type Lifted = Option; fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { @@ -49,6 +59,13 @@ impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result { } } +impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Box { + type Lifted = Box; + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + tcx.lift(&**self).map(Box::new) + } +} + impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for [T] { type Lifted = Vec; fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { @@ -200,6 +217,11 @@ impl<'a, 'tcx> Lift<'tcx> for ty::Predicate<'a> { ty::Predicate::ObjectSafe(trait_def_id) => { Some(ty::Predicate::ObjectSafe(trait_def_id)) } + ty::Predicate::ConstEvaluatable(def_id, substs) => { + tcx.lift(&substs).map(|substs| { + ty::Predicate::ConstEvaluatable(def_id, substs) + }) + } } } } @@ -211,6 +233,32 @@ impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder { } } +impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> { + type Lifted = ty::ParamEnv<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.caller_bounds).map(|caller_bounds| { + ty::ParamEnv { + reveal: self.reveal, + caller_bounds, + } + }) + } +} + +impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::ParamEnvAnd<'a, T> { + type Lifted = ty::ParamEnvAnd<'tcx, T::Lifted>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.param_env).and_then(|param_env| { + tcx.lift(&self.value).map(|value| { + ty::ParamEnvAnd { + param_env, + value, + } + }) + }) + } +} + impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> { type Lifted = ty::ClosureSubsts<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { @@ -220,6 +268,15 @@ impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> { } } +impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorInterior<'a> { + type Lifted = ty::GeneratorInterior<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.witness).map(|witness| { + ty::GeneratorInterior { witness } + }) + } +} + impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjustment<'a> { type Lifted = ty::adjustment::Adjustment<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { @@ -283,6 +340,19 @@ impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoBorrow<'a> { } } +impl<'a, 'tcx> Lift<'tcx> for ty::GenSig<'a> { + type Lifted = ty::GenSig<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&(self.yield_ty, self.return_ty)) + .map(|(yield_ty, return_ty)| { + ty::GenSig { + yield_ty, + return_ty, + } + }) + } +} + impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> { type Lifted = ty::FnSig<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { @@ -340,19 +410,11 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { RegionsDoesNotOutlive(a, b) => { return tcx.lift(&(a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b)) } - RegionsNotSame(a, b) => { - return tcx.lift(&(a, b)).map(|(a, b)| RegionsNotSame(a, b)) + RegionsInsufficientlyPolymorphic(a, b) => { + return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b)) } - RegionsNoOverlap(a, b) => { - return tcx.lift(&(a, b)).map(|(a, b)| RegionsNoOverlap(a, b)) - } - RegionsInsufficientlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b, c)) - } - RegionsOverlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b, c)) + RegionsOverlyPolymorphic(a, b) => { + return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b)) } IntMismatch(x) => IntMismatch(x), FloatMismatch(x) => FloatMismatch(x), @@ -371,6 +433,64 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { } } +impl<'a, 'tcx> Lift<'tcx> for ConstEvalErr<'a> { + type Lifted = ConstEvalErr<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.kind).map(|kind| { + ConstEvalErr { + span: self.span, + kind, + } + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for const_val::ErrKind<'a> { + type Lifted = const_val::ErrKind<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + use middle::const_val::ErrKind::*; + + Some(match *self { + CannotCast => CannotCast, + MissingStructField => MissingStructField, + NonConstPath => NonConstPath, + UnimplementedConstVal(s) => UnimplementedConstVal(s), + ExpectedConstTuple => ExpectedConstTuple, + ExpectedConstStruct => ExpectedConstStruct, + IndexedNonVec => IndexedNonVec, + IndexNotUsize => IndexNotUsize, + IndexOutOfBounds { len, index } => IndexOutOfBounds { len, index }, + MiscBinaryOp => MiscBinaryOp, + MiscCatchAll => MiscCatchAll, + IndexOpFeatureGated => IndexOpFeatureGated, + Math(ref e) => Math(e.clone()), + + LayoutError(ref e) => { + return tcx.lift(e).map(LayoutError) + } + ErroneousReferencedConstant(ref e) => { + return tcx.lift(e).map(ErroneousReferencedConstant) + } + + TypeckError => TypeckError, + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::layout::LayoutError<'a> { + type Lifted = ty::layout::LayoutError<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + match *self { + ty::layout::LayoutError::Unknown(ref ty) => { + tcx.lift(ty).map(ty::layout::LayoutError::Unknown) + } + ty::layout::LayoutError::SizeOverflow(ref ty) => { + tcx.lift(ty).map(ty::layout::LayoutError::SizeOverflow) + } + } + } +} + /////////////////////////////////////////////////////////////////////////// // TypeFoldable implementations. // @@ -385,6 +505,13 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { macro_rules! CopyImpls { ($($ty:ty),+) => { $( + impl<'tcx> Lift<'tcx> for $ty { + type Lifted = Self; + fn lift_to_tcx<'a, 'gcx>(&self, _: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + Some(*self) + } + } + impl<'tcx> TypeFoldable<'tcx> for $ty { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> $ty { *self @@ -398,7 +525,7 @@ macro_rules! CopyImpls { } } -CopyImpls! { (), hir::Unsafety, abi::Abi, hir::def_id::DefId } +CopyImpls! { (), hir::Unsafety, abi::Abi, hir::def_id::DefId, ::mir::Local } impl<'tcx, T:TypeFoldable<'tcx>, U:TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> (T, U) { @@ -528,7 +655,7 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { let sty = match self.sty { ty::TyRawPtr(tm) => ty::TyRawPtr(tm.fold_with(folder)), - ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz), + ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz.fold_with(folder)), ty::TySlice(typ) => ty::TySlice(typ.fold_with(folder)), ty::TyAdt(tid, substs) => ty::TyAdt(tid, substs.fold_with(folder)), ty::TyDynamic(ref trait_ty, ref region) => @@ -541,6 +668,9 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::TyRef(ref r, tm) => { ty::TyRef(r.fold_with(folder), tm.fold_with(folder)) } + ty::TyGenerator(did, substs, interior) => { + ty::TyGenerator(did, substs.fold_with(folder), interior.fold_with(folder)) + } ty::TyClosure(did, substs) => ty::TyClosure(did, substs.fold_with(folder)), ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)), ty::TyAnon(did, substs) => ty::TyAnon(did, substs.fold_with(folder)), @@ -563,7 +693,7 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { fn super_visit_with>(&self, visitor: &mut V) -> bool { match self.sty { ty::TyRawPtr(ref tm) => tm.visit_with(visitor), - ty::TyArray(typ, _sz) => typ.visit_with(visitor), + ty::TyArray(typ, sz) => typ.visit_with(visitor) || sz.visit_with(visitor), ty::TySlice(typ) => typ.visit_with(visitor), ty::TyAdt(_, substs) => substs.visit_with(visitor), ty::TyDynamic(ref trait_ty, ref reg) => @@ -572,6 +702,9 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::TyFnDef(_, substs) => substs.visit_with(visitor), ty::TyFnPtr(ref f) => f.visit_with(visitor), ty::TyRef(r, ref tm) => r.visit_with(visitor) || tm.visit_with(visitor), + ty::TyGenerator(_did, ref substs, ref interior) => { + substs.visit_with(visitor) || interior.visit_with(visitor) + } ty::TyClosure(_did, ref substs) => substs.visit_with(visitor), ty::TyProjection(ref data) => data.visit_with(visitor), ty::TyAnon(_, ref substs) => substs.visit_with(visitor), @@ -596,6 +729,20 @@ impl<'tcx> TypeFoldable<'tcx> for ty::TypeAndMut<'tcx> { } } +impl<'tcx> TypeFoldable<'tcx> for ty::GenSig<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::GenSig { + yield_ty: self.yield_ty.fold_with(folder), + return_ty: self.return_ty.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.yield_ty.visit_with(visitor) || + self.return_ty.visit_with(visitor) + } +} + impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { let inputs_and_output = self.inputs_and_output.fold_with(folder); @@ -686,6 +833,16 @@ impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> { } } +impl<'tcx> TypeFoldable<'tcx> for ty::GeneratorInterior<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::GeneratorInterior::new(self.witness.fold_with(folder)) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.witness.visit_with(visitor) + } +} + impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::Adjustment<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { ty::adjustment::Adjustment { @@ -812,6 +969,8 @@ impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { ty::Predicate::ClosureKind(closure_def_id, kind), ty::Predicate::ObjectSafe(trait_def_id) => ty::Predicate::ObjectSafe(trait_def_id), + ty::Predicate::ConstEvaluatable(def_id, substs) => + ty::Predicate::ConstEvaluatable(def_id, substs.fold_with(folder)), } } @@ -826,6 +985,7 @@ impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { ty::Predicate::WellFormed(data) => data.visit_with(visitor), ty::Predicate::ClosureKind(_closure_def_id, _kind) => false, ty::Predicate::ObjectSafe(_trait_def_id) => false, + ty::Predicate::ConstEvaluatable(_def_id, substs) => substs.visit_with(visitor), } } } @@ -998,19 +1158,11 @@ impl<'tcx> TypeFoldable<'tcx> for ty::error::TypeError<'tcx> { RegionsDoesNotOutlive(a, b) => { RegionsDoesNotOutlive(a.fold_with(folder), b.fold_with(folder)) }, - RegionsNotSame(a, b) => { - RegionsNotSame(a.fold_with(folder), b.fold_with(folder)) - }, - RegionsNoOverlap(a, b) => { - RegionsNoOverlap(a.fold_with(folder), b.fold_with(folder)) + RegionsInsufficientlyPolymorphic(a, b) => { + RegionsInsufficientlyPolymorphic(a, b.fold_with(folder)) }, - RegionsInsufficientlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - RegionsInsufficientlyPolymorphic(a, b.fold_with(folder), c) - }, - RegionsOverlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - RegionsOverlyPolymorphic(a, b.fold_with(folder), c) + RegionsOverlyPolymorphic(a, b) => { + RegionsOverlyPolymorphic(a, b.fold_with(folder)) }, IntMismatch(x) => IntMismatch(x), FloatMismatch(x) => FloatMismatch(x), @@ -1031,13 +1183,11 @@ impl<'tcx> TypeFoldable<'tcx> for ty::error::TypeError<'tcx> { match *self { UnsafetyMismatch(x) => x.visit_with(visitor), AbiMismatch(x) => x.visit_with(visitor), - RegionsDoesNotOutlive(a, b) | - RegionsNotSame(a, b) | - RegionsNoOverlap(a, b) => { + RegionsDoesNotOutlive(a, b) => { a.visit_with(visitor) || b.visit_with(visitor) }, - RegionsInsufficientlyPolymorphic(_, b, _) | - RegionsOverlyPolymorphic(_, b, _) => { + RegionsInsufficientlyPolymorphic(_, b) | + RegionsOverlyPolymorphic(_, b) => { b.visit_with(visitor) }, Sorts(x) => x.visit_with(visitor), @@ -1058,3 +1208,107 @@ impl<'tcx> TypeFoldable<'tcx> for ty::error::TypeError<'tcx> { } } } + +impl<'tcx> TypeFoldable<'tcx> for ConstVal<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + ConstVal::Integral(i) => ConstVal::Integral(i), + ConstVal::Float(f) => ConstVal::Float(f), + ConstVal::Str(s) => ConstVal::Str(s), + ConstVal::ByteStr(b) => ConstVal::ByteStr(b), + ConstVal::Bool(b) => ConstVal::Bool(b), + ConstVal::Char(c) => ConstVal::Char(c), + ConstVal::Variant(def_id) => ConstVal::Variant(def_id), + ConstVal::Function(def_id, substs) => { + ConstVal::Function(def_id, substs.fold_with(folder)) + } + ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { + let new_fields: Vec<_> = fields.iter().map(|&(name, v)| { + (name, v.fold_with(folder)) + }).collect(); + let fields = if new_fields == fields { + fields + } else { + folder.tcx().alloc_name_const_slice(&new_fields) + }; + ConstVal::Aggregate(ConstAggregate::Struct(fields)) + } + ConstVal::Aggregate(ConstAggregate::Tuple(fields)) => { + let new_fields: Vec<_> = fields.iter().map(|v| { + v.fold_with(folder) + }).collect(); + let fields = if new_fields == fields { + fields + } else { + folder.tcx().alloc_const_slice(&new_fields) + }; + ConstVal::Aggregate(ConstAggregate::Tuple(fields)) + } + ConstVal::Aggregate(ConstAggregate::Array(fields)) => { + let new_fields: Vec<_> = fields.iter().map(|v| { + v.fold_with(folder) + }).collect(); + let fields = if new_fields == fields { + fields + } else { + folder.tcx().alloc_const_slice(&new_fields) + }; + ConstVal::Aggregate(ConstAggregate::Array(fields)) + } + ConstVal::Aggregate(ConstAggregate::Repeat(v, count)) => { + let v = v.fold_with(folder); + ConstVal::Aggregate(ConstAggregate::Repeat(v, count)) + } + ConstVal::Unevaluated(def_id, substs) => { + ConstVal::Unevaluated(def_id, substs.fold_with(folder)) + } + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + ConstVal::Integral(_) | + ConstVal::Float(_) | + ConstVal::Str(_) | + ConstVal::ByteStr(_) | + ConstVal::Bool(_) | + ConstVal::Char(_) | + ConstVal::Variant(_) => false, + ConstVal::Function(_, substs) => substs.visit_with(visitor), + ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { + fields.iter().any(|&(_, v)| v.visit_with(visitor)) + } + ConstVal::Aggregate(ConstAggregate::Tuple(fields)) | + ConstVal::Aggregate(ConstAggregate::Array(fields)) => { + fields.iter().any(|v| v.visit_with(visitor)) + } + ConstVal::Aggregate(ConstAggregate::Repeat(v, _)) => { + v.visit_with(visitor) + } + ConstVal::Unevaluated(_, substs) => substs.visit_with(visitor), + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Const<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + let ty = self.ty.fold_with(folder); + let val = self.val.fold_with(folder); + folder.tcx().mk_const(ty::Const { + ty, + val + }) + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_const(*self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) || self.val.visit_with(visitor) + } + + fn visit_with>(&self, visitor: &mut V) -> bool { + visitor.visit_const(self) + } +} diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 2e4be870e9..10e1286465 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -12,13 +12,13 @@ use hir::def_id::DefId; +use middle::const_val::ConstVal; use middle::region; use ty::subst::{Substs, Subst}; use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable}; use ty::{Slice, TyS}; use ty::subst::Kind; -use std::fmt; use std::iter; use std::cmp::Ordering; use syntax::abi; @@ -77,20 +77,6 @@ impl BoundRegion { } } -/// When a region changed from late-bound to early-bound when #32330 -/// was fixed, its `RegionParameterDef` will have one of these -/// structures that we can use to give nicer errors. -#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, - RustcEncodable, RustcDecodable)] -pub struct Issue32330 { - /// fn where is region declared - pub fn_def_id: DefId, - - /// name of region; duplicates the info in BrNamed but convenient - /// to have it here, and this code is only temporary - pub region_name: ast::Name, -} - /// NB: If you change this, you'll probably want to change the corresponding /// AST structure in libsyntax/ast.rs as well. #[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] @@ -123,7 +109,7 @@ pub enum TypeVariants<'tcx> { TyStr, /// An array with the given length. Written as `[T; n]`. - TyArray(Ty<'tcx>, usize), + TyArray(Ty<'tcx>, &'tcx ty::Const<'tcx>), /// The pointee of an array slice. Written as `[T]`. TySlice(Ty<'tcx>), @@ -149,6 +135,10 @@ pub enum TypeVariants<'tcx> { /// `|a| a`. TyClosure(DefId, ClosureSubsts<'tcx>), + /// The anonymous type of a generator. Used to represent the type of + /// `|a| yield a`. + TyGenerator(DefId, ClosureSubsts<'tcx>, GeneratorInterior<'tcx>), + /// The never type `!` TyNever, @@ -223,8 +213,8 @@ pub enum TypeVariants<'tcx> { /// as extra type parameters? The reason for this design is that the /// upvar types can reference lifetimes that are internal to the /// creating function. In my example above, for example, the lifetime -/// `'b` represents the extent of the closure itself; this is some -/// subset of `foo`, probably just the extent of the call to the to +/// `'b` represents the scope of the closure itself; this is some +/// subset of `foo`, probably just the scope of the call to the to /// `do()`. If we just had the lifetime/type parameters from the /// enclosing function, we couldn't name this lifetime `'b`. Note that /// there can also be lifetimes in the types of the upvars themselves, @@ -275,6 +265,51 @@ impl<'a, 'gcx, 'acx, 'tcx> ClosureSubsts<'tcx> { } } +impl<'a, 'gcx, 'tcx> ClosureSubsts<'tcx> { + /// This returns the types of the MIR locals which had to be stored across suspension points. + /// It is calculated in rustc_mir::transform::generator::StateTransform. + /// All the types here must be in the tuple in GeneratorInterior. + pub fn state_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> + impl Iterator> + 'a + { + let state = tcx.generator_layout(def_id).fields.iter(); + state.map(move |d| d.ty.subst(tcx, self.substs)) + } + + /// This is the types of all the fields stored in a generator. + /// It includes the upvars, state types and the state discriminant which is u32. + pub fn field_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> + impl Iterator> + 'a + { + let upvars = self.upvar_tys(def_id, tcx); + let state = self.state_tys(def_id, tcx); + upvars.chain(iter::once(tcx.types.u32)).chain(state) + } +} + +/// This describes the types that can be contained in a generator. +/// It will be a type variable initially and unified in the last stages of typeck of a body. +/// It contains a tuple of all the types that could end up on a generator frame. +/// The state transformation MIR pass may only produce layouts which mention types in this tuple. +/// Upvars are not counted here. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct GeneratorInterior<'tcx> { + pub witness: Ty<'tcx>, +} + +impl<'tcx> GeneratorInterior<'tcx> { + pub fn new(witness: Ty<'tcx>) -> GeneratorInterior<'tcx> { + GeneratorInterior { witness } + } + + pub fn as_slice(&self) -> &'tcx Slice> { + match self.witness.sty { + ty::TyTuple(s, _) => s, + _ => bug!(), + } + } +} + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum ExistentialPredicate<'tcx> { /// e.g. Iterator @@ -541,12 +576,6 @@ impl Binder { } } -impl fmt::Debug for TypeFlags { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:x}", self.bits) - } -} - /// Represents the projection of an associated type. In explicit UFCS /// form this would be written `>::N`. #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] @@ -567,9 +596,10 @@ impl<'a, 'tcx> ProjectionTy<'tcx> { pub fn from_ref_and_name( tcx: TyCtxt, trait_ref: ty::TraitRef<'tcx>, item_name: Name ) -> ProjectionTy<'tcx> { - let item_def_id = tcx.associated_items(trait_ref.def_id).find( - |item| item.name == item_name && item.kind == ty::AssociatedKind::Type - ).unwrap().def_id; + let item_def_id = tcx.associated_items(trait_ref.def_id).find(|item| { + item.kind == ty::AssociatedKind::Type && + tcx.hygienic_eq(item_name, item.name, trait_ref.def_id) + }).unwrap().def_id; ProjectionTy { substs: trait_ref.substs, @@ -593,6 +623,22 @@ impl<'a, 'tcx> ProjectionTy<'tcx> { } } +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct GenSig<'tcx> { + pub yield_ty: Ty<'tcx>, + pub return_ty: Ty<'tcx>, +} + +pub type PolyGenSig<'tcx> = Binder>; + +impl<'tcx> PolyGenSig<'tcx> { + pub fn yield_ty(&self) -> ty::Binder> { + self.map_bound_ref(|sig| sig.yield_ty) + } + pub fn return_ty(&self) -> ty::Binder> { + self.map_bound_ref(|sig| sig.return_ty) + } +} /// Signature of a function type, which I have arbitrarily /// decided to use to refer to the input/output types. @@ -794,10 +840,10 @@ pub enum RegionKind { /// region parameters. ReFree(FreeRegion), - /// A concrete region naming some statically determined extent + /// A concrete region naming some statically determined scope /// (e.g. an expression or sequence of statements) within the /// current function. - ReScope(region::CodeExtent), + ReScope(region::Scope), /// Static data that has an "infinite" lifetime. Top in the region lattice. ReStatic, @@ -990,19 +1036,6 @@ impl RegionKind { flags } - - // This method returns whether the given Region is Named - pub fn is_named_region(&self) -> bool { - match *self { - ty::ReFree(ref free_region) => { - match free_region.bound_region { - ty::BrNamed(..) => true, - _ => false, - } - } - _ => false, - } - } } /// Type utilities @@ -1393,7 +1426,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { TyAdt(_, substs) | TyAnon(_, substs) => { substs.regions().collect() } - TyClosure(_, ref substs) => { + TyClosure(_, ref substs) | TyGenerator(_, ref substs, _) => { substs.substs.regions().collect() } TyProjection(ref data) => { @@ -1420,3 +1453,14 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } } + +/// Typed constant value. +#[derive(Copy, Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq)] +pub struct Const<'tcx> { + pub ty: Ty<'tcx>, + + // FIXME(eddyb) Replace this with a miri value. + pub val: ConstVal<'tcx>, +} + +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Const<'tcx> {} diff --git a/src/librustc/ty/trait_def.rs b/src/librustc/ty/trait_def.rs index 9990472c6b..e0b05c2ba3 100644 --- a/src/librustc/ty/trait_def.rs +++ b/src/librustc/ty/trait_def.rs @@ -11,13 +11,15 @@ use hir; use hir::def_id::DefId; use hir::map::DefPathHash; +use ich::{self, StableHashingContext}; use traits::specialization_graph; use ty::fast_reject; use ty::fold::TypeFoldable; use ty::{Ty, TyCtxt}; use rustc_data_structures::fx::FxHashMap; - +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, + StableHasherResult}; use std::rc::Rc; /// A trait's definition with type information. @@ -141,13 +143,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub(super) fn trait_impls_of_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_id: DefId) -> Rc { - let remote_impls = if trait_id.is_local() { - // Traits defined in the current crate can't have impls in upstream - // crates, so we don't bother querying the cstore. - Vec::new() - } else { - tcx.sess.cstore.implementations_of_trait(Some(trait_id)) - }; + let mut remote_impls = Vec::new(); + + // Traits defined in the current crate can't have impls in upstream + // crates, so we don't bother querying the cstore. + if !trait_id.is_local() { + for &cnum in tcx.crates().iter() { + let impls = tcx.implementations_of_trait((cnum, trait_id)); + remote_impls.extend(impls.iter().cloned()); + } + } let mut blanket_impls = Vec::new(); let mut non_blanket_impls = FxHashMap(); @@ -180,3 +185,16 @@ pub(super) fn trait_impls_of_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, non_blanket_impls: non_blanket_impls, }) } + +impl<'gcx> HashStable> for TraitImpls { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + let TraitImpls { + ref blanket_impls, + ref non_blanket_impls, + } = *self; + + ich::hash_stable_trait_impls(hcx, hasher, blanket_impls, non_blanket_impls); + } +} diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index bbbb8611f9..c8037ce081 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -12,7 +12,8 @@ use hir::def_id::{DefId, LOCAL_CRATE}; use hir::map::DefPathData; -use ich::{StableHashingContext, NodeIdHashingMode}; +use ich::NodeIdHashingMode; +use middle::const_val::ConstVal; use traits::{self, Reveal}; use ty::{self, Ty, TyCtxt, TypeFoldable}; use ty::fold::TypeVisitor; @@ -27,6 +28,7 @@ use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult, HashStable}; use rustc_data_structures::fx::FxHashMap; use std::cmp; +use std::iter; use std::hash::Hash; use std::intrinsics; use syntax::ast::{self, Name}; @@ -52,7 +54,7 @@ macro_rules! typed_literal { SignedInt(ast::IntTy::I32) => ConstInt::I32($lit), SignedInt(ast::IntTy::I64) => ConstInt::I64($lit), SignedInt(ast::IntTy::I128) => ConstInt::I128($lit), - SignedInt(ast::IntTy::Is) => match $tcx.sess.target.int_type { + SignedInt(ast::IntTy::Is) => match $tcx.sess.target.isize_ty { ast::IntTy::I16 => ConstInt::Isize(ConstIsize::Is16($lit)), ast::IntTy::I32 => ConstInt::Isize(ConstIsize::Is32($lit)), ast::IntTy::I64 => ConstInt::Isize(ConstIsize::Is64($lit)), @@ -63,7 +65,7 @@ macro_rules! typed_literal { UnsignedInt(ast::UintTy::U32) => ConstInt::U32($lit), UnsignedInt(ast::UintTy::U64) => ConstInt::U64($lit), UnsignedInt(ast::UintTy::U128) => ConstInt::U128($lit), - UnsignedInt(ast::UintTy::Us) => match $tcx.sess.target.uint_type { + UnsignedInt(ast::UintTy::Us) => match $tcx.sess.target.usize_ty { ast::UintTy::U16 => ConstInt::Usize(ConstUsize::Us16($lit)), ast::UintTy::U32 => ConstInt::Usize(ConstUsize::Us32($lit)), ast::UintTy::U64 => ConstInt::Usize(ConstUsize::Us64($lit)), @@ -212,7 +214,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { /// context it's calculated within. This is used by the `type_id` intrinsic. pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 { let mut hasher = StableHasher::new(); - let mut hcx = StableHashingContext::new(self); + let mut hcx = self.create_stable_hashing_context(); // We want the type_id be independent of the types free regions, so we // erase them. The erase_regions() call will also anonymize bound @@ -387,7 +389,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | ty::Predicate::ClosureKind(..) | - ty::Predicate::RegionOutlives(..) => { + ty::Predicate::RegionOutlives(..) | + ty::Predicate::ConstEvaluatable(..) => { None } ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(t, r))) => { @@ -417,7 +420,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { adt_did: DefId, validate: &mut FnMut(Self, DefId) -> Result<(), ErrorReported> ) -> Option { - let drop_trait = if let Some(def_id) = self.lang_items.drop_trait() { + let drop_trait = if let Some(def_id) = self.lang_items().drop_trait() { def_id } else { return None; @@ -512,11 +515,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let result = item_substs.iter().zip(impl_substs.iter()) .filter(|&(_, &k)| { if let Some(&ty::RegionKind::ReEarlyBound(ref ebr)) = k.as_region() { - !impl_generics.region_param(ebr).pure_wrt_drop + !impl_generics.region_param(ebr, self).pure_wrt_drop } else if let Some(&ty::TyS { sty: ty::TypeVariants::TyParam(ref pt), .. }) = k.as_type() { - !impl_generics.type_param(pt).pure_wrt_drop + !impl_generics.type_param(pt, self).pure_wrt_drop } else { // not a type or region param - this should be reported // as an error. @@ -573,6 +576,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }).collect() } + ty::TyGenerator(def_id, substs, interior) => { + substs.upvar_tys(def_id, self).chain(iter::once(interior.witness)).map(|ty| { + self.dtorck_constraint_for_ty(span, for_ty, depth+1, ty) + }).collect() + } + ty::TyAdt(def, substs) => { let ty::DtorckConstraint { dtorck_types, outlives @@ -631,7 +640,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub fn const_usize(&self, val: u16) -> ConstInt { - match self.sess.target.uint_type { + match self.sess.target.usize_ty { ast::UintTy::U16 => ConstInt::Usize(ConstUsize::Us16(val as u16)), ast::UintTy::U32 => ConstInt::Usize(ConstUsize::Us32(val as u32)), ast::UintTy::U64 => ConstInt::Usize(ConstUsize::Us64(val as u64)), @@ -690,10 +699,18 @@ impl<'a, 'gcx, 'tcx, W> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx, W> TyInt(i) => self.hash(i), TyUint(u) => self.hash(u), TyFloat(f) => self.hash(f), - TyArray(_, n) => self.hash(n), + TyArray(_, n) => { + self.hash_discriminant_u8(&n.val); + match n.val { + ConstVal::Integral(x) => self.hash(x.to_u64().unwrap()), + ConstVal::Unevaluated(def_id, _) => self.def_id(def_id), + _ => bug!("arrays should not have {:?} as length", n) + } + } TyRawPtr(m) | TyRef(_, m) => self.hash(m.mutbl), TyClosure(def_id, _) | + TyGenerator(def_id, _, _) | TyAnon(def_id, _) | TyFnDef(def_id, _) => self.def_id(def_id), TyAdt(d, _) => self.def_id(d.did), @@ -1120,6 +1137,11 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty::TyClosure(def_id, ref substs) => substs.upvar_tys(def_id, tcx).any(needs_drop), + // Pessimistically assume that all generators will require destructors + // as we don't know if a destructor is a noop or not until after the MIR + // state transformation pass + ty::TyGenerator(..) => true, + ty::TyTuple(ref tys, _) => tys.iter().cloned().any(needs_drop), // unions don't have destructors regardless of the child types diff --git a/src/librustc/ty/walk.rs b/src/librustc/ty/walk.rs index a7f0bafe9b..df07844cce 100644 --- a/src/librustc/ty/walk.rs +++ b/src/librustc/ty/walk.rs @@ -11,6 +11,7 @@ //! An iterator over the type substructure. //! WARNING: this does not keep track of the region depth. +use middle::const_val::{ConstVal, ConstAggregate}; use ty::{self, Ty}; use rustc_data_structures::small_vec::SmallVec; use rustc_data_structures::accumulate_vec::IntoIter as AccIntoIter; @@ -83,7 +84,11 @@ fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyInfer(_) | ty::TyParam(_) | ty::TyNever | ty::TyError => { } - ty::TyArray(ty, _) | ty::TySlice(ty) => { + ty::TyArray(ty, len) => { + push_const(stack, len); + stack.push(ty); + } + ty::TySlice(ty) => { stack.push(ty); } ty::TyRawPtr(ref mt) | ty::TyRef(_, ref mt) => { @@ -112,19 +117,52 @@ fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { ty::TyClosure(_, ref substs) => { stack.extend(substs.substs.types().rev()); } + ty::TyGenerator(_, ref substs, ref interior) => { + stack.extend(substs.substs.types().rev()); + stack.push(interior.witness); + } ty::TyTuple(ts, _) => { stack.extend(ts.iter().cloned().rev()); } ty::TyFnDef(_, substs) => { stack.extend(substs.types().rev()); } - ty::TyFnPtr(ft) => { - push_sig_subtypes(stack, ft); + ty::TyFnPtr(sig) => { + stack.push(sig.skip_binder().output()); + stack.extend(sig.skip_binder().inputs().iter().cloned().rev()); } } } -fn push_sig_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, sig: ty::PolyFnSig<'tcx>) { - stack.push(sig.skip_binder().output()); - stack.extend(sig.skip_binder().inputs().iter().cloned().rev()); +fn push_const<'tcx>(stack: &mut TypeWalkerStack<'tcx>, constant: &'tcx ty::Const<'tcx>) { + match constant.val { + ConstVal::Integral(_) | + ConstVal::Float(_) | + ConstVal::Str(_) | + ConstVal::ByteStr(_) | + ConstVal::Bool(_) | + ConstVal::Char(_) | + ConstVal::Variant(_) => {} + ConstVal::Function(_, substs) => { + stack.extend(substs.types().rev()); + } + ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { + for &(_, v) in fields.iter().rev() { + push_const(stack, v); + } + } + ConstVal::Aggregate(ConstAggregate::Tuple(fields)) | + ConstVal::Aggregate(ConstAggregate::Array(fields)) => { + for v in fields.iter().rev() { + push_const(stack, v); + } + } + ConstVal::Aggregate(ConstAggregate::Repeat(v, _)) => { + push_const(stack, v); + } + ConstVal::Unevaluated(_, substs) => { + stack.extend(substs.types().rev()); + } + } + stack.push(constant.ty); } diff --git a/src/librustc/ty/wf.rs b/src/librustc/ty/wf.rs index 6d9e648452..41e27fca3f 100644 --- a/src/librustc/ty/wf.rs +++ b/src/librustc/ty/wf.rs @@ -9,6 +9,7 @@ // except according to those terms. use hir::def_id::DefId; +use middle::const_val::{ConstVal, ConstAggregate}; use infer::InferCtxt; use ty::subst::Substs; use traits; @@ -101,6 +102,14 @@ pub fn predicate_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, wf.compute(data.skip_binder().a); // (*) wf.compute(data.skip_binder().b); // (*) } + ty::Predicate::ConstEvaluatable(def_id, substs) => { + let obligations = wf.nominal_obligations(def_id, substs); + wf.out.extend(obligations); + + for ty in substs.types() { + wf.compute(ty); + } + } } wf.normalize() @@ -207,6 +216,46 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { } } + /// Pushes the obligations required for a constant value to be WF + /// into `self.out`. + fn compute_const(&mut self, constant: &'tcx ty::Const<'tcx>) { + self.require_sized(constant.ty, traits::ConstSized); + match constant.val { + ConstVal::Integral(_) | + ConstVal::Float(_) | + ConstVal::Str(_) | + ConstVal::ByteStr(_) | + ConstVal::Bool(_) | + ConstVal::Char(_) | + ConstVal::Variant(_) | + ConstVal::Function(..) => {} + ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { + for &(_, v) in fields { + self.compute_const(v); + } + } + ConstVal::Aggregate(ConstAggregate::Tuple(fields)) | + ConstVal::Aggregate(ConstAggregate::Array(fields)) => { + for v in fields { + self.compute_const(v); + } + } + ConstVal::Aggregate(ConstAggregate::Repeat(v, _)) => { + self.compute_const(v); + } + ConstVal::Unevaluated(def_id, substs) => { + let obligations = self.nominal_obligations(def_id, substs); + self.out.extend(obligations); + + let predicate = ty::Predicate::ConstEvaluatable(def_id, substs); + let cause = self.cause(traits::MiscObligation); + self.out.push(traits::Obligation::new(cause, + self.param_env, + predicate)); + } + } + } + fn require_sized(&mut self, subty: Ty<'tcx>, cause: traits::ObligationCauseCode<'tcx>) { if !subty.has_escaping_regions() { let cause = self.cause(cause); @@ -239,9 +288,14 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { // WfScalar, WfParameter, etc } - ty::TySlice(subty) | - ty::TyArray(subty, _) => { + ty::TySlice(subty) => { + self.require_sized(subty, traits::SliceOrArrayElem); + } + + ty::TyArray(subty, len) => { self.require_sized(subty, traits::SliceOrArrayElem); + assert_eq!(len.ty, self.infcx.tcx.types.usize); + self.compute_const(len); } ty::TyTuple(ref tys, _) => { @@ -281,8 +335,8 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { } } - ty::TyClosure(..) => { - // the types in a closure are always the types of + ty::TyGenerator(..) | ty::TyClosure(..) => { + // the types in a closure or generator are always the types of // local variables (or possibly references to local // variables), we'll walk those. // diff --git a/src/librustc/util/common.rs b/src/librustc/util/common.rs index 618a4ed331..9e566d2b90 100644 --- a/src/librustc/util/common.rs +++ b/src/librustc/util/common.rs @@ -20,7 +20,7 @@ use std::path::Path; use std::time::{Duration, Instant}; use std::sync::mpsc::{Sender}; -use syntax_pos::{Span}; +use syntax_pos::{SpanData}; use ty::maps::{QueryMsg}; use dep_graph::{DepNode}; @@ -61,7 +61,8 @@ pub enum ProfileQueriesMsg { /// end a task TaskEnd, /// begin a new query - QueryBegin(Span, QueryMsg), + /// can't use `Span` because queries are sent to other thread + QueryBegin(SpanData, QueryMsg), /// query is satisfied by using an already-known value for the given key CacheHit, /// query requires running a provider; providers may nest, permitting queries to nest. diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index 184fd75135..214973e308 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -8,16 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use hir::BodyId; use hir::def_id::DefId; use hir::map::definitions::DefPathData; -use middle::region::{CodeExtent, BlockRemainder}; +use middle::const_val::ConstVal; +use middle::region::{self, BlockRemainder}; use ty::subst::{self, Subst}; use ty::{BrAnon, BrEnv, BrFresh, BrNamed}; use ty::{TyBool, TyChar, TyAdt}; use ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyFnDef, TyFnPtr}; use ty::{TyParam, TyRawPtr, TyRef, TyNever, TyTuple}; -use ty::{TyClosure, TyProjection, TyAnon}; +use ty::{TyClosure, TyGenerator, TyProjection, TyAnon}; use ty::{TyDynamic, TyInt, TyUint, TyInfer}; use ty::{self, Ty, TyCtxt, TypeFoldable}; @@ -25,6 +25,8 @@ use std::cell::Cell; use std::fmt; use std::usize; +use rustc_const_math::ConstInt; +use rustc_data_structures::indexed_vec::Idx; use syntax::abi::Abi; use syntax::ast::CRATE_NODE_ID; use syntax::symbol::Symbol; @@ -160,7 +162,7 @@ pub fn parameterized(f: &mut fmt::Formatter, } write!(f, "{}", tcx.item_path_str(path_def_id))?; - Ok(tcx.lang_items.fn_trait_kind(path_def_id)) + Ok(tcx.lang_items().fn_trait_kind(path_def_id)) })?; if !verbose && fn_trait_kind.is_some() && projections.len() == 1 { @@ -429,6 +431,9 @@ impl<'tcx> fmt::Debug for ty::Predicate<'tcx> { ty::Predicate::ClosureKind(closure_def_id, kind) => { write!(f, "ClosureKind({:?}, {:?})", closure_def_id, kind) } + ty::Predicate::ConstEvaluatable(def_id, substs) => { + write!(f, "ConstEvaluatable({:?}, {:?})", def_id, substs) + } } } } @@ -525,18 +530,18 @@ impl fmt::Display for ty::RegionKind { ty::ReSkolemized(_, br) => { write!(f, "{}", br) } - ty::ReScope(code_extent) if identify_regions() => { - match code_extent { - CodeExtent::Misc(node_id) => - write!(f, "'{}mce", node_id.as_u32()), - CodeExtent::CallSiteScope(BodyId { node_id }) => - write!(f, "'{}cce", node_id.as_u32()), - CodeExtent::ParameterScope(BodyId { node_id }) => - write!(f, "'{}pce", node_id.as_u32()), - CodeExtent::DestructionScope(node_id) => - write!(f, "'{}dce", node_id.as_u32()), - CodeExtent::Remainder(BlockRemainder { block, first_statement_index }) => - write!(f, "'{}_{}rce", block, first_statement_index), + ty::ReScope(scope) if identify_regions() => { + match scope.data() { + region::ScopeData::Node(id) => + write!(f, "'{}s", id.as_usize()), + region::ScopeData::CallSite(id) => + write!(f, "'{}cs", id.as_usize()), + region::ScopeData::Arguments(id) => + write!(f, "'{}as", id.as_usize()), + region::ScopeData::Destruction(id) => + write!(f, "'{}ds", id.as_usize()), + region::ScopeData::Remainder(BlockRemainder { block, first_statement_index }) => + write!(f, "'{}_{}rs", block.as_usize(), first_statement_index.index()), } } ty::ReVar(region_vid) if identify_regions() => { @@ -672,6 +677,12 @@ impl<'tcx> fmt::Display for ty::Binder> { } } +impl<'tcx> fmt::Display for ty::Binder> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self))) + } +} + impl<'tcx> fmt::Display for ty::Binder> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self))) @@ -715,6 +726,12 @@ impl<'tcx> fmt::Display for ty::TraitRef<'tcx> { } } +impl<'tcx> fmt::Display for ty::GeneratorInterior<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.witness.fmt(f) + } +} + impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { @@ -797,7 +814,7 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { for predicate in bounds.predicates { if let Some(trait_ref) = predicate.to_opt_poly_trait_ref() { // Don't print +Sized, but rather +?Sized if absent. - if Some(trait_ref.def_id()) == tcx.lang_items.sized_trait() { + if Some(trait_ref.def_id()) == tcx.lang_items().sized_trait() { is_sized = true; continue; } @@ -813,6 +830,39 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { }) } TyStr => write!(f, "str"), + TyGenerator(did, substs, interior) => ty::tls::with(|tcx| { + let upvar_tys = substs.upvar_tys(did, tcx); + write!(f, "[generator")?; + + if let Some(node_id) = tcx.hir.as_local_node_id(did) { + write!(f, "@{:?}", tcx.hir.span(node_id))?; + let mut sep = " "; + tcx.with_freevars(node_id, |freevars| { + for (freevar, upvar_ty) in freevars.iter().zip(upvar_tys) { + write!(f, + "{}{}:{}", + sep, + tcx.hir.name(freevar.var_id()), + upvar_ty)?; + sep = ", "; + } + Ok(()) + })? + } else { + // cross-crate closure types should only be + // visible in trans bug reports, I imagine. + write!(f, "@{:?}", did)?; + let mut sep = " "; + for (index, upvar_ty) in upvar_tys.enumerate() { + write!(f, "{}{}:{}", sep, index, upvar_ty)?; + sep = ", "; + } + } + + write!(f, " {}", interior)?; + + write!(f, "]") + }), TyClosure(did, substs) => ty::tls::with(|tcx| { let upvar_tys = substs.upvar_tys(did, tcx); write!(f, "[closure")?; @@ -826,12 +876,10 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { let mut sep = " "; tcx.with_freevars(node_id, |freevars| { for (freevar, upvar_ty) in freevars.iter().zip(upvar_tys) { - let def_id = freevar.def.def_id(); - let node_id = tcx.hir.as_local_node_id(def_id).unwrap(); write!(f, "{}{}:{}", sep, - tcx.local_var_name_str(node_id), + tcx.hir.name(freevar.var_id()), upvar_ty)?; sep = ", "; } @@ -850,7 +898,21 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { write!(f, "]") }), - TyArray(ty, sz) => write!(f, "[{}; {}]", ty, sz), + TyArray(ty, sz) => { + write!(f, "[{}; ", ty)?; + match sz.val { + ConstVal::Integral(ConstInt::Usize(sz)) => { + write!(f, "{}", sz)?; + } + ConstVal::Unevaluated(_def_id, substs) => { + write!(f, "", &substs[..])?; + } + _ => { + write!(f, "{:?}", sz)?; + } + } + write!(f, "]") + } TySlice(ty) => write!(f, "[{}]", ty) } } @@ -866,7 +928,7 @@ impl fmt::Debug for ty::UpvarId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "UpvarId({:?};`{}`;{:?})", self.var_id, - ty::tls::with(|tcx| tcx.local_var_name_str_def_index(self.var_id)), + ty::tls::with(|tcx| tcx.hir.name(tcx.hir.hir_to_node_id(self.var_id))), self.closure_expr_id) } } @@ -999,6 +1061,11 @@ impl<'tcx> fmt::Display for ty::Predicate<'tcx> { write!(f, "the closure `{}` implements the trait `{}`", tcx.item_path_str(closure_def_id), kind) }), + ty::Predicate::ConstEvaluatable(def_id, substs) => { + write!(f, "the constant `")?; + parameterized(f, substs, def_id, &[])?; + write!(f, "` can be evaluated") + } } } } diff --git a/src/librustc_allocator/expand.rs b/src/librustc_allocator/expand.rs index f99c6a29ff..eafb4c5c80 100644 --- a/src/librustc_allocator/expand.rs +++ b/src/librustc_allocator/expand.rs @@ -82,10 +82,7 @@ impl<'a> Folder for ExpandAllocatorDirectives<'a> { allow_internal_unsafe: false, } }); - let span = Span { - ctxt: SyntaxContext::empty().apply_mark(mark), - ..item.span - }; + let span = item.span.with_ctxt(SyntaxContext::empty().apply_mark(mark)); let ecfg = ExpansionConfig::default(name.to_string()); let mut f = AllocFnFactory { span, diff --git a/src/librustc_apfloat/Cargo.toml b/src/librustc_apfloat/Cargo.toml index b8f8488e30..735b74f156 100644 --- a/src/librustc_apfloat/Cargo.toml +++ b/src/librustc_apfloat/Cargo.toml @@ -8,4 +8,5 @@ name = "rustc_apfloat" path = "lib.rs" [dependencies] -rustc_bitflags = { path = "../librustc_bitflags" } +bitflags = "1.0" +rustc_cratesio_shim = { path = "../librustc_cratesio_shim" } diff --git a/src/librustc_apfloat/lib.rs b/src/librustc_apfloat/lib.rs index d4a0206576..9e3e622e25 100644 --- a/src/librustc_apfloat/lib.rs +++ b/src/librustc_apfloat/lib.rs @@ -45,18 +45,25 @@ #![deny(warnings)] #![forbid(unsafe_code)] -#![feature(const_fn)] #![feature(i128_type)] #![feature(slice_patterns)] #![feature(try_from)] +#![cfg_attr(stage0, feature(const_fn))] +#![cfg_attr(not(stage0), feature(const_min_value))] +#![cfg_attr(not(stage0), feature(const_max_value))] + +// See librustc_cratesio_shim/Cargo.toml for a comment explaining this. +#[allow(unused_extern_crates)] +extern crate rustc_cratesio_shim; + #[macro_use] -extern crate rustc_bitflags; +extern crate bitflags; use std::cmp::Ordering; use std::fmt; use std::ops::{Neg, Add, Sub, Mul, Div, Rem}; -use std::ops::{AddAssign, SubAssign, MulAssign, DivAssign, RemAssign, BitOrAssign}; +use std::ops::{AddAssign, SubAssign, MulAssign, DivAssign, RemAssign}; use std::str::FromStr; bitflags! { @@ -64,20 +71,13 @@ bitflags! { /// /// UNDERFLOW or OVERFLOW are always returned or-ed with INEXACT. #[must_use] - #[derive(Debug)] - flags Status: u8 { - const OK = 0x00, - const INVALID_OP = 0x01, - const DIV_BY_ZERO = 0x02, - const OVERFLOW = 0x04, - const UNDERFLOW = 0x08, - const INEXACT = 0x10 - } -} - -impl BitOrAssign for Status { - fn bitor_assign(&mut self, rhs: Self) { - *self = *self | rhs; + pub struct Status: u8 { + const OK = 0x00; + const INVALID_OP = 0x01; + const DIV_BY_ZERO = 0x02; + const OVERFLOW = 0x04; + const UNDERFLOW = 0x08; + const INEXACT = 0x10; } } diff --git a/src/librustc_back/README.md b/src/librustc_back/README.md new file mode 100644 index 0000000000..bd99c687bb --- /dev/null +++ b/src/librustc_back/README.md @@ -0,0 +1,6 @@ +NB: This crate is part of the Rust compiler. For an overview of the +compiler as a whole, see +[the README.md file found in `librustc`](../librustc/README.md). + +`librustc_back` contains some very low-level details that are +specific to different LLVM targets and so forth. diff --git a/src/librustc_back/target/aarch64_apple_ios.rs b/src/librustc_back/target/aarch64_apple_ios.rs index 802a8c77db..cff6eb534b 100644 --- a/src/librustc_back/target/aarch64_apple_ios.rs +++ b/src/librustc_back/target/aarch64_apple_ios.rs @@ -18,6 +18,7 @@ pub fn target() -> TargetResult { llvm_target: "arm64-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), target_os: "ios".to_string(), diff --git a/src/librustc_back/target/aarch64_linux_android.rs b/src/librustc_back/target/aarch64_linux_android.rs index 7d8610b4a3..2c0d6a55ed 100644 --- a/src/librustc_back/target/aarch64_linux_android.rs +++ b/src/librustc_back/target/aarch64_linux_android.rs @@ -24,6 +24,7 @@ pub fn target() -> TargetResult { llvm_target: "aarch64-linux-android".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), target_os: "android".to_string(), diff --git a/src/librustc_back/target/aarch64_unknown_freebsd.rs b/src/librustc_back/target/aarch64_unknown_freebsd.rs index c5427a13e4..1ce8d600c0 100644 --- a/src/librustc_back/target/aarch64_unknown_freebsd.rs +++ b/src/librustc_back/target/aarch64_unknown_freebsd.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "aarch64-unknown-freebsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), target_os: "freebsd".to_string(), diff --git a/src/librustc_back/target/aarch64_unknown_fuchsia.rs b/src/librustc_back/target/aarch64_unknown_fuchsia.rs index 5d680504a0..73cd9c9270 100644 --- a/src/librustc_back/target/aarch64_unknown_fuchsia.rs +++ b/src/librustc_back/target/aarch64_unknown_fuchsia.rs @@ -19,6 +19,7 @@ pub fn target() -> TargetResult { llvm_target: "aarch64-unknown-fuchsia".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), target_os: "fuchsia".to_string(), diff --git a/src/librustc_back/target/aarch64_unknown_linux_gnu.rs b/src/librustc_back/target/aarch64_unknown_linux_gnu.rs index 7c2c45a284..5c9c9a0c55 100644 --- a/src/librustc_back/target/aarch64_unknown_linux_gnu.rs +++ b/src/librustc_back/target/aarch64_unknown_linux_gnu.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "aarch64-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), target_env: "gnu".to_string(), data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), diff --git a/src/librustc_back/target/aarch64_unknown_linux_musl.rs b/src/librustc_back/target/aarch64_unknown_linux_musl.rs new file mode 100644 index 0000000000..d39ad97bbc --- /dev/null +++ b/src/librustc_back/target/aarch64_unknown_linux_musl.rs @@ -0,0 +1,37 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use LinkerFlavor; +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::linux_musl_base::opts(); + base.max_atomic_width = Some(128); + + // see #36994 + base.exe_allocation_crate = None; + + Ok(Target { + llvm_target: "aarch64-unknown-linux-musl".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), + target_env: "musl".to_string(), + data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), + arch: "aarch64".to_string(), + target_os: "linux".to_string(), + target_vendor: "unknown".to_string(), + linker_flavor: LinkerFlavor::Gcc, + options: TargetOptions { + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + }, + }) +} diff --git a/src/librustc_back/target/arm_linux_androideabi.rs b/src/librustc_back/target/arm_linux_androideabi.rs index 6bfe90af2c..ba21b1df03 100644 --- a/src/librustc_back/target/arm_linux_androideabi.rs +++ b/src/librustc_back/target/arm_linux_androideabi.rs @@ -14,13 +14,14 @@ use target::{Target, TargetOptions, TargetResult}; pub fn target() -> TargetResult { let mut base = super::android_base::opts(); // https://developer.android.com/ndk/guides/abis.html#armeabi - base.features = "+v5te".to_string(); + base.features = "+strict-align,+v5te".to_string(); base.max_atomic_width = Some(64); Ok(Target { llvm_target: "arm-linux-androideabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "android".to_string(), diff --git a/src/librustc_back/target/arm_unknown_linux_gnueabi.rs b/src/librustc_back/target/arm_unknown_linux_gnueabi.rs index 165d34fe6c..e630376a67 100644 --- a/src/librustc_back/target/arm_unknown_linux_gnueabi.rs +++ b/src/librustc_back/target/arm_unknown_linux_gnueabi.rs @@ -18,6 +18,7 @@ pub fn target() -> TargetResult { llvm_target: "arm-unknown-linux-gnueabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "linux".to_string(), @@ -26,7 +27,7 @@ pub fn target() -> TargetResult { linker_flavor: LinkerFlavor::Gcc, options: TargetOptions { - features: "+v6".to_string(), + features: "+strict-align,+v6".to_string(), abi_blacklist: super::arm_base::abi_blacklist(), .. base }, diff --git a/src/librustc_back/target/arm_unknown_linux_gnueabihf.rs b/src/librustc_back/target/arm_unknown_linux_gnueabihf.rs index 731021d979..178a948b2b 100644 --- a/src/librustc_back/target/arm_unknown_linux_gnueabihf.rs +++ b/src/librustc_back/target/arm_unknown_linux_gnueabihf.rs @@ -18,6 +18,7 @@ pub fn target() -> TargetResult { llvm_target: "arm-unknown-linux-gnueabihf".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "linux".to_string(), @@ -26,7 +27,7 @@ pub fn target() -> TargetResult { linker_flavor: LinkerFlavor::Gcc, options: TargetOptions { - features: "+v6,+vfp2".to_string(), + features: "+strict-align,+v6,+vfp2".to_string(), abi_blacklist: super::arm_base::abi_blacklist(), .. base } diff --git a/src/librustc_back/target/arm_unknown_linux_musleabi.rs b/src/librustc_back/target/arm_unknown_linux_musleabi.rs index f81bcd78b0..29720ec5ef 100644 --- a/src/librustc_back/target/arm_unknown_linux_musleabi.rs +++ b/src/librustc_back/target/arm_unknown_linux_musleabi.rs @@ -16,7 +16,7 @@ pub fn target() -> TargetResult { // Most of these settings are copied from the arm_unknown_linux_gnueabi // target. - base.features = "+v6".to_string(); + base.features = "+strict-align,+v6".to_string(); base.max_atomic_width = Some(64); Ok(Target { // It's important we use "gnueabi" and not "musleabi" here. LLVM uses it @@ -25,6 +25,7 @@ pub fn target() -> TargetResult { llvm_target: "arm-unknown-linux-gnueabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/arm_unknown_linux_musleabihf.rs b/src/librustc_back/target/arm_unknown_linux_musleabihf.rs index 6c47678ede..fc8313877f 100644 --- a/src/librustc_back/target/arm_unknown_linux_musleabihf.rs +++ b/src/librustc_back/target/arm_unknown_linux_musleabihf.rs @@ -16,7 +16,7 @@ pub fn target() -> TargetResult { // Most of these settings are copied from the arm_unknown_linux_gnueabihf // target. - base.features = "+v6,+vfp2".to_string(); + base.features = "+strict-align,+v6,+vfp2".to_string(); base.max_atomic_width = Some(64); Ok(Target { // It's important we use "gnueabihf" and not "musleabihf" here. LLVM @@ -25,6 +25,7 @@ pub fn target() -> TargetResult { llvm_target: "arm-unknown-linux-gnueabihf".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/armv5te_unknown_linux_gnueabi.rs b/src/librustc_back/target/armv5te_unknown_linux_gnueabi.rs index ef00c9a327..97397ca496 100644 --- a/src/librustc_back/target/armv5te_unknown_linux_gnueabi.rs +++ b/src/librustc_back/target/armv5te_unknown_linux_gnueabi.rs @@ -17,6 +17,7 @@ pub fn target() -> TargetResult { llvm_target: "armv5te-unknown-linux-gnueabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "linux".to_string(), @@ -26,8 +27,12 @@ pub fn target() -> TargetResult { options: TargetOptions { features: "+soft-float,+strict-align".to_string(), - // No atomic instructions on ARMv5 - max_atomic_width: Some(0), + + // Atomic operations provided when linked with libgcc. + // FIXME: If the following PR is merged, the atomic operations would be + // provided by compiler-builtins instead with no change of behavior: + // https://github.com/rust-lang-nursery/compiler-builtins/pull/115/files + max_atomic_width: Some(32), abi_blacklist: super::arm_base::abi_blacklist(), .. base } diff --git a/src/librustc_back/target/armv7_apple_ios.rs b/src/librustc_back/target/armv7_apple_ios.rs index 4d87458283..67d3d12fb5 100644 --- a/src/librustc_back/target/armv7_apple_ios.rs +++ b/src/librustc_back/target/armv7_apple_ios.rs @@ -18,6 +18,7 @@ pub fn target() -> TargetResult { llvm_target: "armv7-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(), arch: "arm".to_string(), target_os: "ios".to_string(), diff --git a/src/librustc_back/target/armv7_linux_androideabi.rs b/src/librustc_back/target/armv7_linux_androideabi.rs index b49b1d1c21..9e3eec13ab 100644 --- a/src/librustc_back/target/armv7_linux_androideabi.rs +++ b/src/librustc_back/target/armv7_linux_androideabi.rs @@ -16,7 +16,7 @@ use target::{Target, TargetOptions, TargetResult}; pub fn target() -> TargetResult { let mut base = super::android_base::opts(); - base.features = "+v7,+thumb2,+vfp3,+d16,-neon".to_string(); + base.features = "+v7,+thumb-mode,+thumb2,+vfp3,+d16,-neon".to_string(); base.max_atomic_width = Some(64); base.pre_link_args .get_mut(&LinkerFlavor::Gcc).unwrap().push("-march=armv7-a".to_string()); @@ -25,6 +25,7 @@ pub fn target() -> TargetResult { llvm_target: "armv7-none-linux-android".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "android".to_string(), diff --git a/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs b/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs index d3a6a68449..569c721473 100644 --- a/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs +++ b/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs @@ -17,6 +17,7 @@ pub fn target() -> TargetResult { llvm_target: "armv7-unknown-linux-gnueabihf".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/armv7_unknown_linux_musleabihf.rs b/src/librustc_back/target/armv7_unknown_linux_musleabihf.rs index 5086cd44f7..a36e26c0b7 100644 --- a/src/librustc_back/target/armv7_unknown_linux_musleabihf.rs +++ b/src/librustc_back/target/armv7_unknown_linux_musleabihf.rs @@ -26,6 +26,7 @@ pub fn target() -> TargetResult { llvm_target: "armv7-unknown-linux-gnueabihf".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/armv7s_apple_ios.rs b/src/librustc_back/target/armv7s_apple_ios.rs index 96c89a7ed3..e4cc89ab21 100644 --- a/src/librustc_back/target/armv7s_apple_ios.rs +++ b/src/librustc_back/target/armv7s_apple_ios.rs @@ -18,6 +18,7 @@ pub fn target() -> TargetResult { llvm_target: "armv7s-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(), arch: "arm".to_string(), target_os: "ios".to_string(), diff --git a/src/librustc_back/target/asmjs_unknown_emscripten.rs b/src/librustc_back/target/asmjs_unknown_emscripten.rs index b884d4e541..033e840f20 100644 --- a/src/librustc_back/target/asmjs_unknown_emscripten.rs +++ b/src/librustc_back/target/asmjs_unknown_emscripten.rs @@ -38,6 +38,7 @@ pub fn target() -> Result { llvm_target: "asmjs-unknown-emscripten".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), target_os: "emscripten".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), diff --git a/src/librustc_back/target/i386_apple_ios.rs b/src/librustc_back/target/i386_apple_ios.rs index 0e4e690002..82eae1a31a 100644 --- a/src/librustc_back/target/i386_apple_ios.rs +++ b/src/librustc_back/target/i386_apple_ios.rs @@ -18,6 +18,7 @@ pub fn target() -> TargetResult { llvm_target: "i386-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "ios".to_string(), diff --git a/src/librustc_back/target/i686_apple_darwin.rs b/src/librustc_back/target/i686_apple_darwin.rs index 8c931f1841..14937f9aa5 100644 --- a/src/librustc_back/target/i686_apple_darwin.rs +++ b/src/librustc_back/target/i686_apple_darwin.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-apple-darwin".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "macos".to_string(), diff --git a/src/librustc_back/target/i686_linux_android.rs b/src/librustc_back/target/i686_linux_android.rs index 565fbe37bf..bf27bce79a 100644 --- a/src/librustc_back/target/i686_linux_android.rs +++ b/src/librustc_back/target/i686_linux_android.rs @@ -28,6 +28,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-linux-android".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "android".to_string(), diff --git a/src/librustc_back/target/i686_pc_windows_gnu.rs b/src/librustc_back/target/i686_pc_windows_gnu.rs index 4a736a93be..5f20a620b6 100644 --- a/src/librustc_back/target/i686_pc_windows_gnu.rs +++ b/src/librustc_back/target/i686_pc_windows_gnu.rs @@ -26,6 +26,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-pc-windows-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32".to_string(), arch: "x86".to_string(), target_os: "windows".to_string(), diff --git a/src/librustc_back/target/i686_pc_windows_msvc.rs b/src/librustc_back/target/i686_pc_windows_msvc.rs index 17fe306804..48cee04457 100644 --- a/src/librustc_back/target/i686_pc_windows_msvc.rs +++ b/src/librustc_back/target/i686_pc_windows_msvc.rs @@ -30,6 +30,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-pc-windows-msvc".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32".to_string(), arch: "x86".to_string(), target_os: "windows".to_string(), diff --git a/src/librustc_back/target/i686_unknown_dragonfly.rs b/src/librustc_back/target/i686_unknown_dragonfly.rs index 9eda49a370..891127b9d3 100644 --- a/src/librustc_back/target/i686_unknown_dragonfly.rs +++ b/src/librustc_back/target/i686_unknown_dragonfly.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-unknown-dragonfly".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "dragonfly".to_string(), diff --git a/src/librustc_back/target/i686_unknown_freebsd.rs b/src/librustc_back/target/i686_unknown_freebsd.rs index 041f3070c9..076acb8ed3 100644 --- a/src/librustc_back/target/i686_unknown_freebsd.rs +++ b/src/librustc_back/target/i686_unknown_freebsd.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-unknown-freebsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "freebsd".to_string(), diff --git a/src/librustc_back/target/i686_unknown_haiku.rs b/src/librustc_back/target/i686_unknown_haiku.rs index f21c2f8c77..02a15d6445 100644 --- a/src/librustc_back/target/i686_unknown_haiku.rs +++ b/src/librustc_back/target/i686_unknown_haiku.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-unknown-haiku".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "haiku".to_string(), diff --git a/src/librustc_back/target/i686_unknown_linux_gnu.rs b/src/librustc_back/target/i686_unknown_linux_gnu.rs index f7b916816b..b509e019c7 100644 --- a/src/librustc_back/target/i686_unknown_linux_gnu.rs +++ b/src/librustc_back/target/i686_unknown_linux_gnu.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/i686_unknown_linux_musl.rs b/src/librustc_back/target/i686_unknown_linux_musl.rs index 00567d70fd..99c0d4c817 100644 --- a/src/librustc_back/target/i686_unknown_linux_musl.rs +++ b/src/librustc_back/target/i686_unknown_linux_musl.rs @@ -37,6 +37,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-unknown-linux-musl".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/i686_unknown_netbsd.rs b/src/librustc_back/target/i686_unknown_netbsd.rs index 7a9de52956..dd21c20510 100644 --- a/src/librustc_back/target/i686_unknown_netbsd.rs +++ b/src/librustc_back/target/i686_unknown_netbsd.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-unknown-netbsdelf".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "netbsd".to_string(), diff --git a/src/librustc_back/target/i686_unknown_openbsd.rs b/src/librustc_back/target/i686_unknown_openbsd.rs index b19bdbe049..8daa5fcb88 100644 --- a/src/librustc_back/target/i686_unknown_openbsd.rs +++ b/src/librustc_back/target/i686_unknown_openbsd.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "i686-unknown-openbsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "openbsd".to_string(), diff --git a/src/librustc_back/target/le32_unknown_nacl.rs b/src/librustc_back/target/le32_unknown_nacl.rs deleted file mode 100644 index 51eeae50e2..0000000000 --- a/src/librustc_back/target/le32_unknown_nacl.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use LinkerFlavor; -use super::{LinkArgs, Target, TargetOptions, TargetResult}; - -pub fn target() -> TargetResult { - let mut pre_link_args = LinkArgs::new(); - pre_link_args.insert(LinkerFlavor::Gcc, - vec!["--pnacl-exceptions=sjlj".to_string(), - "--target=le32-unknown-nacl".to_string(), - "-Wl,--start-group".to_string()]); - let mut post_link_args = LinkArgs::new(); - post_link_args.insert(LinkerFlavor::Gcc, - vec!["-Wl,--end-group".to_string()]); - - let opts = TargetOptions { - linker: "pnacl-clang".to_string(), - ar: "pnacl-ar".to_string(), - - pre_link_args, - post_link_args, - dynamic_linking: false, - executables: true, - exe_suffix: ".pexe".to_string(), - linker_is_gnu: true, - allow_asm: false, - max_atomic_width: Some(32), - .. Default::default() - }; - Ok(Target { - llvm_target: "le32-unknown-nacl".to_string(), - target_endian: "little".to_string(), - target_pointer_width: "32".to_string(), - target_os: "nacl".to_string(), - target_env: "newlib".to_string(), - target_vendor: "unknown".to_string(), - data_layout: "e-i64:64:64-p:32:32:32-v128:32:32".to_string(), - arch: "le32".to_string(), - linker_flavor: LinkerFlavor::Gcc, - options: opts, - }) -} diff --git a/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs b/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs index 2d77902046..5c3cf31b3e 100644 --- a/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs +++ b/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs @@ -16,6 +16,7 @@ pub fn target() -> TargetResult { llvm_target: "mips64-unknown-linux-gnuabi64".to_string(), target_endian: "big".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), arch: "mips64".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs b/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs index c26780b9e6..96988388e8 100644 --- a/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs +++ b/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs @@ -16,6 +16,7 @@ pub fn target() -> TargetResult { llvm_target: "mips64el-unknown-linux-gnuabi64".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), arch: "mips64".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/mips_unknown_linux_gnu.rs b/src/librustc_back/target/mips_unknown_linux_gnu.rs index 24649851d7..5a43e1c4c7 100644 --- a/src/librustc_back/target/mips_unknown_linux_gnu.rs +++ b/src/librustc_back/target/mips_unknown_linux_gnu.rs @@ -16,6 +16,7 @@ pub fn target() -> TargetResult { llvm_target: "mips-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/mips_unknown_linux_musl.rs b/src/librustc_back/target/mips_unknown_linux_musl.rs index 6303722945..3f6b984272 100644 --- a/src/librustc_back/target/mips_unknown_linux_musl.rs +++ b/src/librustc_back/target/mips_unknown_linux_musl.rs @@ -16,6 +16,7 @@ pub fn target() -> TargetResult { llvm_target: "mips-unknown-linux-musl".to_string(), target_endian: "big".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/mips_unknown_linux_uclibc.rs b/src/librustc_back/target/mips_unknown_linux_uclibc.rs index 1a7a56a977..c851cab069 100644 --- a/src/librustc_back/target/mips_unknown_linux_uclibc.rs +++ b/src/librustc_back/target/mips_unknown_linux_uclibc.rs @@ -16,6 +16,7 @@ pub fn target() -> TargetResult { llvm_target: "mips-unknown-linux-uclibc".to_string(), target_endian: "big".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/mipsel_unknown_linux_gnu.rs b/src/librustc_back/target/mipsel_unknown_linux_gnu.rs index cbf8339993..2c38444d05 100644 --- a/src/librustc_back/target/mipsel_unknown_linux_gnu.rs +++ b/src/librustc_back/target/mipsel_unknown_linux_gnu.rs @@ -16,6 +16,7 @@ pub fn target() -> TargetResult { llvm_target: "mipsel-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/mipsel_unknown_linux_musl.rs b/src/librustc_back/target/mipsel_unknown_linux_musl.rs index b367bce75a..464f0bfe48 100644 --- a/src/librustc_back/target/mipsel_unknown_linux_musl.rs +++ b/src/librustc_back/target/mipsel_unknown_linux_musl.rs @@ -16,6 +16,7 @@ pub fn target() -> TargetResult { llvm_target: "mipsel-unknown-linux-musl".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs b/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs index 686dfbe987..5d2ba54876 100644 --- a/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs +++ b/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs @@ -16,6 +16,7 @@ pub fn target() -> TargetResult { llvm_target: "mipsel-unknown-linux-uclibc".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/mod.rs b/src/librustc_back/target/mod.rs index 130e1b695d..039e015365 100644 --- a/src/librustc_back/target/mod.rs +++ b/src/librustc_back/target/mod.rs @@ -153,6 +153,7 @@ supported_targets! { ("armv7-unknown-linux-gnueabihf", armv7_unknown_linux_gnueabihf), ("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf), ("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu), + ("aarch64-unknown-linux-musl", aarch64_unknown_linux_musl), ("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl), ("i686-unknown-linux-musl", i686_unknown_linux_musl), ("mips-unknown-linux-musl", mips_unknown_linux_musl), @@ -214,7 +215,6 @@ supported_targets! { ("i686-pc-windows-msvc", i686_pc_windows_msvc), ("i586-pc-windows-msvc", i586_pc_windows_msvc), - ("le32-unknown-nacl", le32_unknown_nacl), ("asmjs-unknown-emscripten", asmjs_unknown_emscripten), ("wasm32-unknown-emscripten", wasm32_unknown_emscripten), ("wasm32-experimental-emscripten", wasm32_experimental_emscripten), @@ -238,6 +238,8 @@ pub struct Target { pub target_endian: String, /// String to use as the `target_pointer_width` `cfg` variable. pub target_pointer_width: String, + /// Width of c_int type + pub target_c_int_width: String, /// OS name to use for conditional compilation. pub target_os: String, /// Environment name to use for conditional compilation. @@ -425,6 +427,9 @@ pub struct TargetOptions { /// Whether or not stack probes (__rust_probestack) are enabled pub stack_probes: bool, + + /// The minimum alignment for global symbols. + pub min_global_align: Option, } impl Default for TargetOptions { @@ -486,6 +491,7 @@ impl Default for TargetOptions { crt_static_default: false, crt_static_respected: false, stack_probes: false, + min_global_align: None, } } } @@ -551,6 +557,7 @@ impl Target { llvm_target: get_req_field("llvm-target")?, target_endian: get_req_field("target-endian")?, target_pointer_width: get_req_field("target-pointer-width")?, + target_c_int_width: get_req_field("target-c-int-width")?, data_layout: get_req_field("data-layout")?, arch: get_req_field("arch")?, target_os: get_req_field("os")?, @@ -724,6 +731,7 @@ impl Target { key!(crt_static_default, bool); key!(crt_static_respected, bool); key!(stack_probes, bool); + key!(min_global_align, Option); if let Some(array) = obj.find("abi-blacklist").and_then(Json::as_array) { for name in array.iter().filter_map(|abi| abi.as_string()) { @@ -854,6 +862,7 @@ impl ToJson for Target { target_val!(llvm_target); target_val!(target_endian); target_val!(target_pointer_width); + target_val!(target_c_int_width); target_val!(arch); target_val!(target_os, "os"); target_val!(target_env, "env"); @@ -914,6 +923,7 @@ impl ToJson for Target { target_option_val!(crt_static_default); target_option_val!(crt_static_respected); target_option_val!(stack_probes); + target_option_val!(min_global_align); if default.abi_blacklist != self.options.abi_blacklist { d.insert("abi-blacklist".to_string(), self.options.abi_blacklist.iter() diff --git a/src/librustc_back/target/msp430_none_elf.rs b/src/librustc_back/target/msp430_none_elf.rs index 588a8bde79..9227a96e75 100644 --- a/src/librustc_back/target/msp430_none_elf.rs +++ b/src/librustc_back/target/msp430_none_elf.rs @@ -16,6 +16,7 @@ pub fn target() -> TargetResult { llvm_target: "msp430-none-elf".to_string(), target_endian: "little".to_string(), target_pointer_width: "16".to_string(), + target_c_int_width: "16".to_string(), data_layout: "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16".to_string(), arch: "msp430".to_string(), target_os: "none".to_string(), diff --git a/src/librustc_back/target/powerpc64_unknown_linux_gnu.rs b/src/librustc_back/target/powerpc64_unknown_linux_gnu.rs index 7b038ac007..1f119c7204 100644 --- a/src/librustc_back/target/powerpc64_unknown_linux_gnu.rs +++ b/src/librustc_back/target/powerpc64_unknown_linux_gnu.rs @@ -28,6 +28,7 @@ pub fn target() -> TargetResult { llvm_target: "powerpc64-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:e-i64:64-n32:64".to_string(), arch: "powerpc64".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/powerpc64le_unknown_linux_gnu.rs b/src/librustc_back/target/powerpc64le_unknown_linux_gnu.rs index 5b50b96837..13c59785d4 100644 --- a/src/librustc_back/target/powerpc64le_unknown_linux_gnu.rs +++ b/src/librustc_back/target/powerpc64le_unknown_linux_gnu.rs @@ -24,6 +24,7 @@ pub fn target() -> TargetResult { llvm_target: "powerpc64le-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-n32:64".to_string(), arch: "powerpc64".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/powerpc_unknown_linux_gnu.rs b/src/librustc_back/target/powerpc_unknown_linux_gnu.rs index 8d4ad5f0b4..1797126b31 100644 --- a/src/librustc_back/target/powerpc_unknown_linux_gnu.rs +++ b/src/librustc_back/target/powerpc_unknown_linux_gnu.rs @@ -23,6 +23,7 @@ pub fn target() -> TargetResult { llvm_target: "powerpc-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(), arch: "powerpc".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/s390x_unknown_linux_gnu.rs b/src/librustc_back/target/s390x_unknown_linux_gnu.rs index 78a6bb7933..d96379547f 100644 --- a/src/librustc_back/target/s390x_unknown_linux_gnu.rs +++ b/src/librustc_back/target/s390x_unknown_linux_gnu.rs @@ -22,11 +22,13 @@ pub fn target() -> TargetResult { base.max_atomic_width = Some(64); // see #36994 base.exe_allocation_crate = None; + base.min_global_align = Some(16); Ok(Target { llvm_target: "s390x-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".to_string(), arch: "s390x".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/sparc64_unknown_linux_gnu.rs b/src/librustc_back/target/sparc64_unknown_linux_gnu.rs index 7f710ad402..aed40e9df4 100644 --- a/src/librustc_back/target/sparc64_unknown_linux_gnu.rs +++ b/src/librustc_back/target/sparc64_unknown_linux_gnu.rs @@ -21,6 +21,7 @@ pub fn target() -> TargetResult { llvm_target: "sparc64-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:e-i64:64-n32:64-S128".to_string(), arch: "sparc64".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/sparc64_unknown_netbsd.rs b/src/librustc_back/target/sparc64_unknown_netbsd.rs index bc65a17ce6..483c879094 100644 --- a/src/librustc_back/target/sparc64_unknown_netbsd.rs +++ b/src/librustc_back/target/sparc64_unknown_netbsd.rs @@ -21,6 +21,7 @@ pub fn target() -> TargetResult { llvm_target: "sparc64-unknown-netbsd".to_string(), target_endian: "big".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:e-i64:64-n32:64-S128".to_string(), arch: "sparc64".to_string(), target_os: "netbsd".to_string(), diff --git a/src/librustc_back/target/sparcv9_sun_solaris.rs b/src/librustc_back/target/sparcv9_sun_solaris.rs index 122b38968a..1d9027275d 100644 --- a/src/librustc_back/target/sparcv9_sun_solaris.rs +++ b/src/librustc_back/target/sparcv9_sun_solaris.rs @@ -17,11 +17,13 @@ pub fn target() -> TargetResult { // llvm calls this "v9" base.cpu = "v9".to_string(); base.max_atomic_width = Some(64); + base.exe_allocation_crate = None; Ok(Target { llvm_target: "sparcv9-sun-solaris".to_string(), target_endian: "big".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "E-m:e-i64:64-n32:64-S128".to_string(), // Use "sparc64" instead of "sparcv9" here, since the former is already // used widely in the source base. If we ever needed ABI diff --git a/src/librustc_back/target/thumbv6m_none_eabi.rs b/src/librustc_back/target/thumbv6m_none_eabi.rs index 08bf145e55..d164fbf9d9 100644 --- a/src/librustc_back/target/thumbv6m_none_eabi.rs +++ b/src/librustc_back/target/thumbv6m_none_eabi.rs @@ -18,6 +18,7 @@ pub fn target() -> TargetResult { llvm_target: "thumbv6m-none-eabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "none".to_string(), diff --git a/src/librustc_back/target/thumbv7em_none_eabi.rs b/src/librustc_back/target/thumbv7em_none_eabi.rs index 13f9cc5f65..7e66ddf7b0 100644 --- a/src/librustc_back/target/thumbv7em_none_eabi.rs +++ b/src/librustc_back/target/thumbv7em_none_eabi.rs @@ -27,6 +27,7 @@ pub fn target() -> TargetResult { llvm_target: "thumbv7em-none-eabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "none".to_string(), diff --git a/src/librustc_back/target/thumbv7em_none_eabihf.rs b/src/librustc_back/target/thumbv7em_none_eabihf.rs index 929b6db6fb..31835de36d 100644 --- a/src/librustc_back/target/thumbv7em_none_eabihf.rs +++ b/src/librustc_back/target/thumbv7em_none_eabihf.rs @@ -26,6 +26,7 @@ pub fn target() -> TargetResult { llvm_target: "thumbv7em-none-eabihf".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "none".to_string(), diff --git a/src/librustc_back/target/thumbv7m_none_eabi.rs b/src/librustc_back/target/thumbv7m_none_eabi.rs index 8d46e7cb90..8f16ae4ea1 100644 --- a/src/librustc_back/target/thumbv7m_none_eabi.rs +++ b/src/librustc_back/target/thumbv7m_none_eabi.rs @@ -18,6 +18,7 @@ pub fn target() -> TargetResult { llvm_target: "thumbv7m-none-eabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "none".to_string(), diff --git a/src/librustc_back/target/wasm32_experimental_emscripten.rs b/src/librustc_back/target/wasm32_experimental_emscripten.rs index 42ab194040..71668444d9 100644 --- a/src/librustc_back/target/wasm32_experimental_emscripten.rs +++ b/src/librustc_back/target/wasm32_experimental_emscripten.rs @@ -46,6 +46,7 @@ pub fn target() -> Result { llvm_target: "wasm32-unknown-unknown".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), target_os: "emscripten".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), diff --git a/src/librustc_back/target/wasm32_unknown_emscripten.rs b/src/librustc_back/target/wasm32_unknown_emscripten.rs index a0a2699d8f..e6584addf4 100644 --- a/src/librustc_back/target/wasm32_unknown_emscripten.rs +++ b/src/librustc_back/target/wasm32_unknown_emscripten.rs @@ -42,6 +42,7 @@ pub fn target() -> Result { llvm_target: "asmjs-unknown-emscripten".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), target_os: "emscripten".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), diff --git a/src/librustc_back/target/x86_64_apple_darwin.rs b/src/librustc_back/target/x86_64_apple_darwin.rs index 8ac7667900..71ac360eb9 100644 --- a/src/librustc_back/target/x86_64_apple_darwin.rs +++ b/src/librustc_back/target/x86_64_apple_darwin.rs @@ -23,6 +23,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-apple-darwin".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "macos".to_string(), diff --git a/src/librustc_back/target/x86_64_apple_ios.rs b/src/librustc_back/target/x86_64_apple_ios.rs index 61a71da216..eed99e3784 100644 --- a/src/librustc_back/target/x86_64_apple_ios.rs +++ b/src/librustc_back/target/x86_64_apple_ios.rs @@ -18,6 +18,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "ios".to_string(), diff --git a/src/librustc_back/target/x86_64_linux_android.rs b/src/librustc_back/target/x86_64_linux_android.rs index 158e2b1360..2aae739415 100644 --- a/src/librustc_back/target/x86_64_linux_android.rs +++ b/src/librustc_back/target/x86_64_linux_android.rs @@ -24,6 +24,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-linux-android".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "android".to_string(), diff --git a/src/librustc_back/target/x86_64_pc_windows_gnu.rs b/src/librustc_back/target/x86_64_pc_windows_gnu.rs index 10e88d88ee..70062d1363 100644 --- a/src/librustc_back/target/x86_64_pc_windows_gnu.rs +++ b/src/librustc_back/target/x86_64_pc_windows_gnu.rs @@ -21,6 +21,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-pc-windows-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:w-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "windows".to_string(), diff --git a/src/librustc_back/target/x86_64_pc_windows_msvc.rs b/src/librustc_back/target/x86_64_pc_windows_msvc.rs index 7eb673d8b3..813d0f1bad 100644 --- a/src/librustc_back/target/x86_64_pc_windows_msvc.rs +++ b/src/librustc_back/target/x86_64_pc_windows_msvc.rs @@ -21,6 +21,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-pc-windows-msvc".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:w-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "windows".to_string(), diff --git a/src/librustc_back/target/x86_64_rumprun_netbsd.rs b/src/librustc_back/target/x86_64_rumprun_netbsd.rs index c7e5edde63..ab5a6f71eb 100644 --- a/src/librustc_back/target/x86_64_rumprun_netbsd.rs +++ b/src/librustc_back/target/x86_64_rumprun_netbsd.rs @@ -31,6 +31,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-rumprun-netbsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "netbsd".to_string(), diff --git a/src/librustc_back/target/x86_64_sun_solaris.rs b/src/librustc_back/target/x86_64_sun_solaris.rs index 38a38ed68b..d554138539 100644 --- a/src/librustc_back/target/x86_64_sun_solaris.rs +++ b/src/librustc_back/target/x86_64_sun_solaris.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-pc-solaris".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "solaris".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_bitrig.rs b/src/librustc_back/target/x86_64_unknown_bitrig.rs index cf4b019dce..1ea985d70e 100644 --- a/src/librustc_back/target/x86_64_unknown_bitrig.rs +++ b/src/librustc_back/target/x86_64_unknown_bitrig.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-bitrig".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "bitrig".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_dragonfly.rs b/src/librustc_back/target/x86_64_unknown_dragonfly.rs index 8885d89c6f..56e4685fed 100644 --- a/src/librustc_back/target/x86_64_unknown_dragonfly.rs +++ b/src/librustc_back/target/x86_64_unknown_dragonfly.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-dragonfly".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "dragonfly".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_freebsd.rs b/src/librustc_back/target/x86_64_unknown_freebsd.rs index 95870f2be5..3d26592530 100644 --- a/src/librustc_back/target/x86_64_unknown_freebsd.rs +++ b/src/librustc_back/target/x86_64_unknown_freebsd.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-freebsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "freebsd".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_fuchsia.rs b/src/librustc_back/target/x86_64_unknown_fuchsia.rs index 1aebb88559..6e97d53cfa 100644 --- a/src/librustc_back/target/x86_64_unknown_fuchsia.rs +++ b/src/librustc_back/target/x86_64_unknown_fuchsia.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-fuchsia".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "fuchsia".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_haiku.rs b/src/librustc_back/target/x86_64_unknown_haiku.rs index 3794a516ec..f42c480e7a 100644 --- a/src/librustc_back/target/x86_64_unknown_haiku.rs +++ b/src/librustc_back/target/x86_64_unknown_haiku.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-haiku".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "haiku".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs b/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs index 99d3171e1c..6e849f19cf 100644 --- a/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs +++ b/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs @@ -20,6 +20,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-l4re-uclibc".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "l4re".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_linux_gnu.rs b/src/librustc_back/target/x86_64_unknown_linux_gnu.rs index d2135f8a0b..cfe80c9673 100644 --- a/src/librustc_back/target/x86_64_unknown_linux_gnu.rs +++ b/src/librustc_back/target/x86_64_unknown_linux_gnu.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_linux_musl.rs b/src/librustc_back/target/x86_64_unknown_linux_musl.rs index 7d542b4d3c..7e304748e3 100644 --- a/src/librustc_back/target/x86_64_unknown_linux_musl.rs +++ b/src/librustc_back/target/x86_64_unknown_linux_musl.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-linux-musl".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "linux".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_netbsd.rs b/src/librustc_back/target/x86_64_unknown_netbsd.rs index 5d49fcbd64..7afb446f5d 100644 --- a/src/librustc_back/target/x86_64_unknown_netbsd.rs +++ b/src/librustc_back/target/x86_64_unknown_netbsd.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-netbsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "netbsd".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_openbsd.rs b/src/librustc_back/target/x86_64_unknown_openbsd.rs index aa289fb577..e4bbdbec4c 100644 --- a/src/librustc_back/target/x86_64_unknown_openbsd.rs +++ b/src/librustc_back/target/x86_64_unknown_openbsd.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-openbsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "openbsd".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_redox.rs b/src/librustc_back/target/x86_64_unknown_redox.rs index 8d2a7afeea..401fa4ca2c 100644 --- a/src/librustc_back/target/x86_64_unknown_redox.rs +++ b/src/librustc_back/target/x86_64_unknown_redox.rs @@ -22,6 +22,7 @@ pub fn target() -> TargetResult { llvm_target: "x86_64-unknown-redox".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "redox".to_string(), diff --git a/src/librustc_bitflags/Cargo.toml b/src/librustc_bitflags/Cargo.toml deleted file mode 100644 index d82a72994c..0000000000 --- a/src/librustc_bitflags/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -authors = ["The Rust Project Developers"] -name = "rustc_bitflags" -version = "0.0.0" - -[lib] -name = "rustc_bitflags" -path = "lib.rs" -doctest = false diff --git a/src/librustc_bitflags/lib.rs b/src/librustc_bitflags/lib.rs deleted file mode 100644 index eb47144d1f..0000000000 --- a/src/librustc_bitflags/lib.rs +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - -#![no_std] -#![deny(warnings)] - -//! A typesafe bitmask flag generator. - -#[cfg(test)] -#[macro_use] -extern crate std; - -/// The `bitflags!` macro generates a `struct` that holds a set of C-style -/// bitmask flags. It is useful for creating typesafe wrappers for C APIs. -/// -/// The flags should only be defined for integer types, otherwise unexpected -/// type errors may occur at compile time. -/// -/// # Examples -/// -/// ```{.rust} -/// #![feature(rustc_private)] -/// #[macro_use] extern crate rustc_bitflags; -/// -/// bitflags! { -/// flags Flags: u32 { -/// const FLAG_A = 0b00000001, -/// const FLAG_B = 0b00000010, -/// const FLAG_C = 0b00000100, -/// const FLAG_ABC = Flags::FLAG_A.bits -/// | Flags::FLAG_B.bits -/// | Flags::FLAG_C.bits, -/// } -/// } -/// -/// fn main() { -/// let e1 = Flags::FLAG_A | Flags::FLAG_C; -/// let e2 = Flags::FLAG_B | Flags::FLAG_C; -/// assert!((e1 | e2) == Flags::FLAG_ABC); // union -/// assert!((e1 & e2) == Flags::FLAG_C); // intersection -/// assert!((e1 - e2) == Flags::FLAG_A); // set difference -/// assert!(!e2 == Flags::FLAG_A); // set complement -/// } -/// ``` -/// -/// The generated `struct`s can also be extended with type and trait implementations: -/// -/// ```{.rust} -/// #![feature(rustc_private)] -/// #[macro_use] extern crate rustc_bitflags; -/// -/// use std::fmt; -/// -/// bitflags! { -/// flags Flags: u32 { -/// const FLAG_A = 0b00000001, -/// const FLAG_B = 0b00000010, -/// } -/// } -/// -/// impl Flags { -/// pub fn clear(&mut self) { -/// self.bits = 0; // The `bits` field can be accessed from within the -/// // same module where the `bitflags!` macro was invoked. -/// } -/// } -/// -/// impl fmt::Debug for Flags { -/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { -/// write!(f, "hi!") -/// } -/// } -/// -/// fn main() { -/// let mut flags = Flags::FLAG_A | Flags::FLAG_B; -/// flags.clear(); -/// assert!(flags.is_empty()); -/// assert_eq!(format!("{:?}", flags), "hi!"); -/// } -/// ``` -/// -/// # Attributes -/// -/// Attributes can be attached to the generated `struct` by placing them -/// before the `flags` keyword. -/// -/// # Derived traits -/// -/// The `PartialEq` and `Clone` traits are automatically derived for the `struct` using -/// the `deriving` attribute. Additional traits can be derived by providing an -/// explicit `deriving` attribute on `flags`. -/// -/// # Operators -/// -/// The following operator traits are implemented for the generated `struct`: -/// -/// - `BitOr`: union -/// - `BitAnd`: intersection -/// - `BitXor`: toggle -/// - `Sub`: set difference -/// - `Not`: set complement -/// -/// # Methods -/// -/// The following methods are defined for the generated `struct`: -/// -/// - `empty`: an empty set of flags -/// - `all`: the set of all flags -/// - `bits`: the raw value of the flags currently stored -/// - `from_bits`: convert from underlying bit representation, unless that -/// representation contains bits that do not correspond to a flag -/// - `from_bits_truncate`: convert from underlying bit representation, dropping -/// any bits that do not correspond to flags -/// - `is_empty`: `true` if no flags are currently stored -/// - `is_all`: `true` if all flags are currently set -/// - `intersects`: `true` if there are flags common to both `self` and `other` -/// - `contains`: `true` all of the flags in `other` are contained within `self` -/// - `insert`: inserts the specified flags in-place -/// - `remove`: removes the specified flags in-place -/// - `toggle`: the specified flags will be inserted if not present, and removed -/// if they are. -#[macro_export] -macro_rules! bitflags { - ($(#[$attr:meta])* flags $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+ - }) => { - #[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] - $(#[$attr])* - pub struct $BitFlags { - bits: $T, - } - - impl $BitFlags { - $($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { bits: $value };)+ - - /// Returns an empty set of flags. - #[inline] - pub fn empty() -> $BitFlags { - $BitFlags { bits: 0 } - } - - /// Returns the set containing all flags. - #[inline] - pub fn all() -> $BitFlags { - $BitFlags { bits: $($value)|+ } - } - - /// Returns the raw value of the flags currently stored. - #[inline] - pub fn bits(&self) -> $T { - self.bits - } - - /// Convert from underlying bit representation, unless that - /// representation contains bits that do not correspond to a flag. - #[inline] - pub fn from_bits(bits: $T) -> ::std::option::Option<$BitFlags> { - if (bits & !$BitFlags::all().bits()) != 0 { - ::std::option::Option::None - } else { - ::std::option::Option::Some($BitFlags { bits: bits }) - } - } - - /// Convert from underlying bit representation, dropping any bits - /// that do not correspond to flags. - #[inline] - pub fn from_bits_truncate(bits: $T) -> $BitFlags { - $BitFlags { bits: bits } & $BitFlags::all() - } - - /// Returns `true` if no flags are currently stored. - #[inline] - pub fn is_empty(&self) -> bool { - *self == $BitFlags::empty() - } - - /// Returns `true` if all flags are currently set. - #[inline] - pub fn is_all(&self) -> bool { - *self == $BitFlags::all() - } - - /// Returns `true` if there are flags common to both `self` and `other`. - #[inline] - pub fn intersects(&self, other: $BitFlags) -> bool { - !(*self & other).is_empty() - } - - /// Returns `true` if all of the flags in `other` are contained within `self`. - #[inline] - pub fn contains(&self, other: $BitFlags) -> bool { - (*self & other) == other - } - - /// Inserts the specified flags in-place. - #[inline] - pub fn insert(&mut self, other: $BitFlags) { - self.bits |= other.bits; - } - - /// Removes the specified flags in-place. - #[inline] - pub fn remove(&mut self, other: $BitFlags) { - self.bits &= !other.bits; - } - - /// Toggles the specified flags in-place. - #[inline] - pub fn toggle(&mut self, other: $BitFlags) { - self.bits ^= other.bits; - } - } - - impl ::std::ops::BitOr for $BitFlags { - type Output = $BitFlags; - - /// Returns the union of the two sets of flags. - #[inline] - fn bitor(self, other: $BitFlags) -> $BitFlags { - $BitFlags { bits: self.bits | other.bits } - } - } - - impl ::std::ops::BitXor for $BitFlags { - type Output = $BitFlags; - - /// Returns the left flags, but with all the right flags toggled. - #[inline] - fn bitxor(self, other: $BitFlags) -> $BitFlags { - $BitFlags { bits: self.bits ^ other.bits } - } - } - - impl ::std::ops::BitAnd for $BitFlags { - type Output = $BitFlags; - - /// Returns the intersection between the two sets of flags. - #[inline] - fn bitand(self, other: $BitFlags) -> $BitFlags { - $BitFlags { bits: self.bits & other.bits } - } - } - - impl ::std::ops::Sub for $BitFlags { - type Output = $BitFlags; - - /// Returns the set difference of the two sets of flags. - #[inline] - fn sub(self, other: $BitFlags) -> $BitFlags { - $BitFlags { bits: self.bits & !other.bits } - } - } - - impl ::std::ops::Not for $BitFlags { - type Output = $BitFlags; - - /// Returns the complement of this set of flags. - #[inline] - fn not(self) -> $BitFlags { - $BitFlags { bits: !self.bits } & $BitFlags::all() - } - } - }; - ($(#[$attr:meta])* flags $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+, - }) => { - bitflags! { - $(#[$attr])* - flags $BitFlags: $T { - $($(#[$Flag_attr])* const $Flag = $value),+ - } - } - }; -} - -#[cfg(test)] -#[allow(non_upper_case_globals)] -mod tests { - use std::hash::{Hash, Hasher}; - use std::collections::hash_map::DefaultHasher; - use std::option::Option::{None, Some}; - - bitflags! { - #[doc = "> The first principle is that you must not fool yourself — and"] - #[doc = "> you are the easiest person to fool."] - #[doc = "> "] - #[doc = "> - Richard Feynman"] - flags Flags: u32 { - const FlagA = 0b00000001, - #[doc = " macros are way better at generating code than trans is"] - const FlagB = 0b00000010, - const FlagC = 0b00000100, - #[doc = "* cmr bed"] - #[doc = "* strcat table"] - #[doc = " wait what?"] - const FlagABC = Flags::FlagA.bits - | Flags::FlagB.bits - | Flags::FlagC.bits, - } - } - - bitflags! { - flags AnotherSetOfFlags: i8 { - const AnotherFlag = -1, - } - } - - #[test] - fn test_bits() { - assert_eq!(Flags::empty().bits(), 0b00000000); - assert_eq!(Flags::FlagA.bits(), 0b00000001); - assert_eq!(Flags::FlagABC.bits(), 0b00000111); - - assert_eq!(AnotherSetOfFlags::empty().bits(), 0b00); - assert_eq!(AnotherSetOfFlags::AnotherFlag.bits(), !0); - } - - #[test] - fn test_from_bits() { - assert!(Flags::from_bits(0) == Some(Flags::empty())); - assert!(Flags::from_bits(0b1) == Some(Flags::FlagA)); - assert!(Flags::from_bits(0b10) == Some(Flags::FlagB)); - assert!(Flags::from_bits(0b11) == Some(Flags::FlagA | Flags::FlagB)); - assert!(Flags::from_bits(0b1000) == None); - - assert!(AnotherSetOfFlags::from_bits(!0) == Some(AnotherSetOfFlags::AnotherFlag)); - } - - #[test] - fn test_from_bits_truncate() { - assert!(Flags::from_bits_truncate(0) == Flags::empty()); - assert!(Flags::from_bits_truncate(0b1) == Flags::FlagA); - assert!(Flags::from_bits_truncate(0b10) == Flags::FlagB); - assert!(Flags::from_bits_truncate(0b11) == (Flags::FlagA | Flags::FlagB)); - assert!(Flags::from_bits_truncate(0b1000) == Flags::empty()); - assert!(Flags::from_bits_truncate(0b1001) == Flags::FlagA); - - assert!(AnotherSetOfFlags::from_bits_truncate(0) == AnotherSetOfFlags::empty()); - } - - #[test] - fn test_is_empty() { - assert!(Flags::empty().is_empty()); - assert!(!Flags::FlagA.is_empty()); - assert!(!Flags::FlagABC.is_empty()); - - assert!(!AnotherSetOfFlags::AnotherFlag.is_empty()); - } - - #[test] - fn test_is_all() { - assert!(Flags::all().is_all()); - assert!(!Flags::FlagA.is_all()); - assert!(Flags::FlagABC.is_all()); - - assert!(AnotherSetOfFlags::AnotherFlag.is_all()); - } - - #[test] - fn test_two_empties_do_not_intersect() { - let e1 = Flags::empty(); - let e2 = Flags::empty(); - assert!(!e1.intersects(e2)); - - assert!(AnotherSetOfFlags::AnotherFlag.intersects(AnotherSetOfFlags::AnotherFlag)); - } - - #[test] - fn test_empty_does_not_intersect_with_full() { - let e1 = Flags::empty(); - let e2 = Flags::FlagABC; - assert!(!e1.intersects(e2)); - } - - #[test] - fn test_disjoint_intersects() { - let e1 = Flags::FlagA; - let e2 = Flags::FlagB; - assert!(!e1.intersects(e2)); - } - - #[test] - fn test_overlapping_intersects() { - let e1 = Flags::FlagA; - let e2 = Flags::FlagA | Flags::FlagB; - assert!(e1.intersects(e2)); - } - - #[test] - fn test_contains() { - let e1 = Flags::FlagA; - let e2 = Flags::FlagA | Flags::FlagB; - assert!(!e1.contains(e2)); - assert!(e2.contains(e1)); - assert!(Flags::FlagABC.contains(e2)); - - assert!(AnotherSetOfFlags::AnotherFlag.contains(AnotherSetOfFlags::AnotherFlag)); - } - - #[test] - fn test_insert() { - let mut e1 = Flags::FlagA; - let e2 = Flags::FlagA | Flags::FlagB; - e1.insert(e2); - assert!(e1 == e2); - - let mut e3 = AnotherSetOfFlags::empty(); - e3.insert(AnotherSetOfFlags::AnotherFlag); - assert!(e3 == AnotherSetOfFlags::AnotherFlag); - } - - #[test] - fn test_remove() { - let mut e1 = Flags::FlagA | Flags::FlagB; - let e2 = Flags::FlagA | Flags::FlagC; - e1.remove(e2); - assert!(e1 == Flags::FlagB); - - let mut e3 = AnotherSetOfFlags::AnotherFlag; - e3.remove(AnotherSetOfFlags::AnotherFlag); - assert!(e3 == AnotherSetOfFlags::empty()); - } - - #[test] - fn test_operators() { - let e1 = Flags::FlagA | Flags::FlagC; - let e2 = Flags::FlagB | Flags::FlagC; - assert!((e1 | e2) == Flags::FlagABC); // union - assert!((e1 & e2) == Flags::FlagC); // intersection - assert!((e1 - e2) == Flags::FlagA); // set difference - assert!(!e2 == Flags::FlagA); // set complement - assert!(e1 ^ e2 == Flags::FlagA | Flags::FlagB); // toggle - let mut e3 = e1; - e3.toggle(e2); - assert!(e3 == Flags::FlagA | Flags::FlagB); - - let mut m4 = AnotherSetOfFlags::empty(); - m4.toggle(AnotherSetOfFlags::empty()); - assert!(m4 == AnotherSetOfFlags::empty()); - } - - #[test] - fn test_lt() { - let mut a = Flags::empty(); - let mut b = Flags::empty(); - - assert!(!(a < b) && !(b < a)); - b = Flags::FlagB; - assert!(a < b); - a = Flags::FlagC; - assert!(!(a < b) && b < a); - b = Flags::FlagC | Flags::FlagB; - assert!(a < b); - } - - #[test] - fn test_ord() { - let mut a = Flags::empty(); - let mut b = Flags::empty(); - - assert!(a <= b && a >= b); - a = Flags::FlagA; - assert!(a > b && a >= b); - assert!(b < a && b <= a); - b = Flags::FlagB; - assert!(b > a && b >= a); - assert!(a < b && a <= b); - } - - #[test] - fn test_hash() { - let mut x = Flags::empty(); - let mut y = Flags::empty(); - assert!(hash(&x) == hash(&y)); - x = Flags::all(); - y = Flags::FlagABC; - assert!(hash(&x) == hash(&y)); - } - - fn hash(t: &T) -> u64 { - let mut s = DefaultHasher::new(); - t.hash(&mut s); - s.finish() - } -} diff --git a/src/librustc_borrowck/borrowck/check_loans.rs b/src/librustc_borrowck/borrowck/check_loans.rs index 4058f3198a..fea662e21f 100644 --- a/src/librustc_borrowck/borrowck/check_loans.rs +++ b/src/librustc_borrowck/borrowck/check_loans.rs @@ -103,7 +103,8 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { debug!("consume(consume_id={}, cmt={:?}, mode={:?})", consume_id, cmt, mode); - self.consume_common(consume_id, consume_span, cmt, mode); + let hir_id = self.tcx().hir.node_to_hir_id(consume_id); + self.consume_common(hir_id.local_id, consume_span, cmt, mode); } fn matched_pat(&mut self, @@ -120,7 +121,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { cmt, mode); - self.consume_common(consume_pat.id, consume_pat.span, cmt, mode); + self.consume_common(consume_pat.hir_id.local_id, consume_pat.span, cmt, mode); } fn borrow(&mut self, @@ -136,15 +137,16 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { borrow_id, cmt, loan_region, bk, loan_cause); + let hir_id = self.tcx().hir.node_to_hir_id(borrow_id); if let Some(lp) = opt_loan_path(&cmt) { let moved_value_use_kind = match loan_cause { euv::ClosureCapture(_) => MovedInCapture, _ => MovedInUse, }; - self.check_if_path_is_moved(borrow_id, borrow_span, moved_value_use_kind, &lp); + self.check_if_path_is_moved(hir_id.local_id, borrow_span, moved_value_use_kind, &lp); } - self.check_for_conflicting_loans(borrow_id); + self.check_for_conflicting_loans(hir_id.local_id); } fn mutate(&mut self, @@ -163,7 +165,8 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { // have to be *FULLY* initialized, but we still // must be careful lest it contains derefs of // pointers. - self.check_if_assigned_path_is_moved(assignee_cmt.id, + let hir_id = self.tcx().hir.node_to_hir_id(assignee_cmt.id); + self.check_if_assigned_path_is_moved(hir_id.local_id, assignment_span, MovedInUse, &lp); @@ -172,14 +175,16 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { // In a case like `path += 1`, then path must be // fully initialized, since we will read it before // we write it. - self.check_if_path_is_moved(assignee_cmt.id, + let hir_id = self.tcx().hir.node_to_hir_id(assignee_cmt.id); + self.check_if_path_is_moved(hir_id.local_id, assignment_span, MovedInUse, &lp); } } } - self.check_assignment(assignment_id, assignment_span, assignee_cmt); + self.check_assignment(self.tcx().hir.node_to_hir_id(assignment_id).local_id, + assignment_span, assignee_cmt); } fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) { } @@ -201,7 +206,7 @@ pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, all_loans, param_env, }; - euv::ExprUseVisitor::new(&mut clcx, bccx.tcx, param_env, &bccx.region_maps, bccx.tables) + euv::ExprUseVisitor::new(&mut clcx, bccx.tcx, param_env, &bccx.region_scope_tree, bccx.tables) .consume_body(body); } @@ -220,7 +225,7 @@ fn compatible_borrow_kinds(borrow_kind1: ty::BorrowKind, impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.bccx.tcx } - pub fn each_issued_loan(&self, node: ast::NodeId, mut op: F) -> bool where + pub fn each_issued_loan(&self, node: hir::ItemLocalId, mut op: F) -> bool where F: FnMut(&Loan<'tcx>) -> bool, { //! Iterates over each loan that has been issued @@ -235,14 +240,14 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { }) } - pub fn each_in_scope_loan(&self, scope: region::CodeExtent, mut op: F) -> bool where + pub fn each_in_scope_loan(&self, scope: region::Scope, mut op: F) -> bool where F: FnMut(&Loan<'tcx>) -> bool, { //! Like `each_issued_loan()`, but only considers loans that are //! currently in scope. - self.each_issued_loan(scope.node_id(), |loan| { - if self.bccx.region_maps.is_subscope_of(scope, loan.kill_scope) { + self.each_issued_loan(scope.item_local_id(), |loan| { + if self.bccx.region_scope_tree.is_subscope_of(scope, loan.kill_scope) { op(loan) } else { true @@ -251,7 +256,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { } fn each_in_scope_loan_affecting_path(&self, - scope: region::CodeExtent, + scope: region::Scope, loan_path: &LoanPath<'tcx>, mut op: F) -> bool where @@ -325,7 +330,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { return true; } - pub fn loans_generated_by(&self, node: ast::NodeId) -> Vec { + pub fn loans_generated_by(&self, node: hir::ItemLocalId) -> Vec { //! Returns a vector of the loans that are generated as //! we enter `node`. @@ -337,7 +342,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { return result; } - pub fn check_for_conflicting_loans(&self, node: ast::NodeId) { + pub fn check_for_conflicting_loans(&self, node: hir::ItemLocalId) { //! Checks to see whether any of the loans that are issued //! on entrance to `node` conflict with loans that have already been //! issued when we enter `node` (for example, we do not @@ -381,7 +386,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { new_loan); // Should only be called for loans that are in scope at the same time. - assert!(self.bccx.region_maps.scopes_intersect(old_loan.kill_scope, + assert!(self.bccx.region_scope_tree.scopes_intersect(old_loan.kill_scope, new_loan.kill_scope)); self.report_error_if_loan_conflicts_with_restriction( @@ -462,105 +467,32 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { // 3. Where does old loan expire. let previous_end_span = - self.tcx().hir.span(old_loan.kill_scope.node_id()).end_point(); + old_loan.kill_scope.span(self.tcx(), &self.bccx.region_scope_tree).end_point(); let mut err = match (new_loan.kind, old_loan.kind) { - (ty::MutBorrow, ty::MutBorrow) => { - let mut err = self.bccx.cannot_mutably_borrow_multiply( - new_loan.span, &nl, &new_loan_msg, Origin::Ast); - - if new_loan.span == old_loan.span { - // Both borrows are happening in the same place - // Meaning the borrow is occurring in a loop - err.span_label( - new_loan.span, - format!("mutable borrow starts here in previous \ - iteration of loop{}", new_loan_msg)); - err.span_label( - previous_end_span, - "mutable borrow ends here"); - err - } else { - err.span_label( - old_loan.span, - format!("first mutable borrow occurs here{}", old_loan_msg)); - err.span_label( - new_loan.span, - format!("second mutable borrow occurs here{}", new_loan_msg)); - err.span_label( - previous_end_span, - "first borrow ends here"); - err - } - } - - (ty::UniqueImmBorrow, ty::UniqueImmBorrow) => { - let mut err = self.bccx.cannot_uniquely_borrow_by_two_closures( - new_loan.span, &nl, Origin::Ast); - err.span_label( - old_loan.span, - "first closure is constructed here"); - err.span_label( - new_loan.span, - "second closure is constructed here"); - err.span_label( - previous_end_span, - "borrow from first closure ends here"); - err - } - - (ty::UniqueImmBorrow, _) => { - let mut err = self.bccx.cannot_uniquely_borrow_by_one_closure( - new_loan.span, &nl, &ol_pronoun, &old_loan_msg, Origin::Ast); - err.span_label( - new_loan.span, - format!("closure construction occurs here{}", new_loan_msg)); - err.span_label( - old_loan.span, - format!("borrow occurs here{}", old_loan_msg)); - err.span_label( - previous_end_span, - "borrow ends here"); - err - } - + (ty::MutBorrow, ty::MutBorrow) => + self.bccx.cannot_mutably_borrow_multiply( + new_loan.span, &nl, &new_loan_msg, old_loan.span, &old_loan_msg, + previous_end_span, Origin::Ast), + (ty::UniqueImmBorrow, ty::UniqueImmBorrow) => + self.bccx.cannot_uniquely_borrow_by_two_closures( + new_loan.span, &nl, old_loan.span, previous_end_span, Origin::Ast), + (ty::UniqueImmBorrow, _) => + self.bccx.cannot_uniquely_borrow_by_one_closure( + new_loan.span, &nl, &new_loan_msg, + old_loan.span, &ol_pronoun, &old_loan_msg, previous_end_span, Origin::Ast), (_, ty::UniqueImmBorrow) => { let new_loan_str = &new_loan.kind.to_user_str(); - let mut err = self.bccx.cannot_reborrow_already_uniquely_borrowed( - new_loan.span, &nl, &new_loan_msg, new_loan_str, Origin::Ast); - err.span_label( - new_loan.span, - format!("borrow occurs here{}", new_loan_msg)); - err.span_label( - old_loan.span, - format!("closure construction occurs here{}", old_loan_msg)); - err.span_label( - previous_end_span, - "borrow from closure ends here"); - err + self.bccx.cannot_reborrow_already_uniquely_borrowed( + new_loan.span, &nl, &new_loan_msg, new_loan_str, + old_loan.span, &old_loan_msg, previous_end_span, Origin::Ast) } - - (..) => { - let mut err = self.bccx.cannot_reborrow_already_borrowed( + (..) => + self.bccx.cannot_reborrow_already_borrowed( new_loan.span, &nl, &new_loan_msg, &new_loan.kind.to_user_str(), - &ol_pronoun, &old_loan.kind.to_user_str(), &old_loan_msg, Origin::Ast); - err.span_label( - new_loan.span, - format!("{} borrow occurs here{}", - new_loan.kind.to_user_str(), - new_loan_msg)); - err.span_label( - old_loan.span, - format!("{} borrow occurs here{}", - old_loan.kind.to_user_str(), - old_loan_msg)); - err.span_label( - previous_end_span, - format!("{} borrow ends here", - old_loan.kind.to_user_str())); - err - } + old_loan.span, &ol_pronoun, &old_loan.kind.to_user_str(), &old_loan_msg, + previous_end_span, Origin::Ast) }; match new_loan.cause { @@ -590,7 +522,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { } fn consume_common(&self, - id: ast::NodeId, + id: hir::ItemLocalId, span: Span, cmt: mc::cmt<'tcx>, mode: euv::ConsumeMode) { @@ -628,28 +560,24 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { } fn check_for_copy_of_frozen_path(&self, - id: ast::NodeId, + id: hir::ItemLocalId, span: Span, copy_path: &LoanPath<'tcx>) { match self.analyze_restrictions_on_use(id, copy_path, ty::ImmBorrow) { UseOk => { } UseWhileBorrowed(loan_path, loan_span) => { let desc = self.bccx.loan_path_to_string(copy_path); - self.bccx.cannot_use_when_mutably_borrowed(span, &desc, Origin::Ast) - .span_label(loan_span, - format!("borrow of `{}` occurs here", - &self.bccx.loan_path_to_string(&loan_path)) - ) - .span_label(span, - format!("use of borrowed `{}`", - &self.bccx.loan_path_to_string(&loan_path))) + self.bccx.cannot_use_when_mutably_borrowed( + span, &desc, + loan_span, &self.bccx.loan_path_to_string(&loan_path), + Origin::Ast) .emit(); } } } fn check_for_move_of_borrowed_path(&self, - id: ast::NodeId, + id: hir::ItemLocalId, span: Span, move_path: &LoanPath<'tcx>, move_kind: move_data::MoveKind) { @@ -699,18 +627,17 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { } pub fn analyze_restrictions_on_use(&self, - expr_id: ast::NodeId, + expr_id: hir::ItemLocalId, use_path: &LoanPath<'tcx>, borrow_kind: ty::BorrowKind) -> UseError<'tcx> { - debug!("analyze_restrictions_on_use(expr_id={}, use_path={:?})", - self.tcx().hir.node_to_string(expr_id), - use_path); + debug!("analyze_restrictions_on_use(expr_id={:?}, use_path={:?})", + expr_id, use_path); let mut ret = UseOk; self.each_in_scope_loan_affecting_path( - region::CodeExtent::Misc(expr_id), use_path, |loan| { + region::Scope::Node(expr_id), use_path, |loan| { if !compatible_borrow_kinds(loan.kind, borrow_kind) { ret = UseWhileBorrowed(loan.loan_path.clone(), loan.span); false @@ -725,11 +652,11 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { /// Reports an error if `expr` (which should be a path) /// is using a moved/uninitialized value fn check_if_path_is_moved(&self, - id: ast::NodeId, + id: hir::ItemLocalId, span: Span, use_kind: MovedValueUseKind, lp: &Rc>) { - debug!("check_if_path_is_moved(id={}, use_kind={:?}, lp={:?})", + debug!("check_if_path_is_moved(id={:?}, use_kind={:?}, lp={:?})", id, use_kind, lp); // FIXME (22079): if you find yourself tempted to cut and paste @@ -772,7 +699,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { /// (*p).x = 22; // not ok, p is uninitialized, can't deref /// ``` fn check_if_assigned_path_is_moved(&self, - id: ast::NodeId, + id: hir::ItemLocalId, span: Span, use_kind: MovedValueUseKind, lp: &Rc>) @@ -822,14 +749,14 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { } fn check_assignment(&self, - assignment_id: ast::NodeId, + assignment_id: hir::ItemLocalId, assignment_span: Span, assignee_cmt: mc::cmt<'tcx>) { debug!("check_assignment(assignee_cmt={:?})", assignee_cmt); // Check that we don't invalidate any outstanding loans if let Some(loan_path) = opt_loan_path(&assignee_cmt) { - let scope = region::CodeExtent::Misc(assignment_id); + let scope = region::Scope::Node(assignment_id); self.each_in_scope_loan_affecting_path(scope, &loan_path, |loan| { self.report_illegal_mutation(assignment_span, &loan_path, loan); false @@ -861,13 +788,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { loan_path: &LoanPath<'tcx>, loan: &Loan) { self.bccx.cannot_assign_to_borrowed( - span, &self.bccx.loan_path_to_string(loan_path), Origin::Ast) - .span_label(loan.span, - format!("borrow of `{}` occurs here", - self.bccx.loan_path_to_string(loan_path))) - .span_label(span, - format!("assignment to borrowed `{}` occurs here", - self.bccx.loan_path_to_string(loan_path))) + span, loan.span, &self.bccx.loan_path_to_string(loan_path), Origin::Ast) .emit(); } } diff --git a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs index 7878762788..465457f5ab 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs @@ -27,7 +27,7 @@ use rustc::hir::*; use rustc::hir::map::Node::*; struct GatherMoveInfo<'tcx> { - id: ast::NodeId, + id: hir::ItemLocalId, kind: MoveKind, cmt: mc::cmt<'tcx>, span_path_opt: Option> @@ -79,13 +79,14 @@ pub fn gather_decl<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, var_id: ast::NodeId, var_ty: Ty<'tcx>) { let loan_path = Rc::new(LoanPath::new(LpVar(var_id), var_ty)); - move_data.add_move(bccx.tcx, loan_path, var_id, Declared); + let hir_id = bccx.tcx.hir.node_to_hir_id(var_id); + move_data.add_move(bccx.tcx, loan_path, hir_id.local_id, Declared); } pub fn gather_move_from_expr<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_data: &MoveData<'tcx>, move_error_collector: &mut MoveErrorCollector<'tcx>, - move_expr_id: ast::NodeId, + move_expr_id: hir::ItemLocalId, cmt: mc::cmt<'tcx>, move_reason: euv::MoveReason) { let kind = match move_reason { @@ -118,7 +119,7 @@ pub fn gather_move_from_pat<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, _ => None, }; let move_info = GatherMoveInfo { - id: move_pat.id, + id: move_pat.hir_id.local_id, kind: MovePat, cmt, span_path_opt: pat_span_path_opt, @@ -135,7 +136,7 @@ fn gather_move<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_data: &MoveData<'tcx>, move_error_collector: &mut MoveErrorCollector<'tcx>, move_info: GatherMoveInfo<'tcx>) { - debug!("gather_move(move_id={}, cmt={:?})", + debug!("gather_move(move_id={:?}, cmt={:?})", move_info.id, move_info.cmt); let potentially_illegal_move = @@ -161,10 +162,10 @@ fn gather_move<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, pub fn gather_assignment<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_data: &MoveData<'tcx>, - assignment_id: ast::NodeId, + assignment_id: hir::ItemLocalId, assignment_span: Span, assignee_loan_path: Rc>, - assignee_id: ast::NodeId, + assignee_id: hir::ItemLocalId, mode: euv::MutateMode) { move_data.add_assignment(bccx.tcx, assignee_loan_path, diff --git a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs index 22de3c7591..66aaafb77f 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs @@ -24,7 +24,7 @@ use syntax_pos::Span; type R = Result<(),()>; pub fn guarantee_lifetime<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, - item_scope: region::CodeExtent, + item_scope: region::Scope, span: Span, cause: euv::LoanCause, cmt: mc::cmt<'tcx>, @@ -52,7 +52,7 @@ struct GuaranteeLifetimeContext<'a, 'tcx: 'a> { bccx: &'a BorrowckCtxt<'a, 'tcx>, // the scope of the function body for the enclosing item - item_scope: region::CodeExtent, + item_scope: region::Scope, span: Span, cause: euv::LoanCause, @@ -115,8 +115,9 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { self.bccx.tcx.mk_region(ty::ReScope(self.item_scope)) } Categorization::Local(local_id) => { + let hir_id = self.bccx.tcx.hir.node_to_hir_id(local_id); self.bccx.tcx.mk_region(ty::ReScope( - self.bccx.region_maps.var_scope(local_id))) + self.bccx.region_scope_tree.var_scope(hir_id.local_id))) } Categorization::StaticItem | Categorization::Deref(_, mc::UnsafePtr(..)) => { diff --git a/src/librustc_borrowck/borrowck/gather_loans/mod.rs b/src/librustc_borrowck/borrowck/gather_loans/mod.rs index 00ebf5de44..a58b62ba2a 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/mod.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/mod.rs @@ -43,14 +43,13 @@ pub fn gather_loans_in_fn<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, let mut glcx = GatherLoanCtxt { bccx, all_loans: Vec::new(), - item_ub: region::CodeExtent::Misc(body.node_id), - move_data: MoveData::new(), + item_ub: region::Scope::Node(bccx.tcx.hir.body(body).value.hir_id.local_id), + move_data: MoveData::default(), move_error_collector: move_error::MoveErrorCollector::new(), }; - let body = glcx.bccx.tcx.hir.body(body); - euv::ExprUseVisitor::new(&mut glcx, bccx.tcx, param_env, &bccx.region_maps, bccx.tables) - .consume_body(body); + euv::ExprUseVisitor::new(&mut glcx, bccx.tcx, param_env, &bccx.region_scope_tree, bccx.tables) + .consume_body(bccx.body); glcx.report_potential_errors(); let GatherLoanCtxt { all_loans, move_data, .. } = glcx; @@ -64,7 +63,7 @@ struct GatherLoanCtxt<'a, 'tcx: 'a> { all_loans: Vec>, /// `item_ub` is used as an upper-bound on the lifetime whenever we /// ask for the scope of an expression categorized as an upvar. - item_ub: region::CodeExtent, + item_ub: region::Scope, } impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { @@ -80,7 +79,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { euv::Move(move_reason) => { gather_moves::gather_move_from_expr( self.bccx, &self.move_data, &mut self.move_error_collector, - consume_id, cmt, move_reason); + self.bccx.tcx.hir.node_to_hir_id(consume_id).local_id, cmt, move_reason); } euv::Copy => { } } @@ -127,8 +126,8 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { bk={:?}, loan_cause={:?})", borrow_id, cmt, loan_region, bk, loan_cause); - - self.guarantee_valid(borrow_id, + let hir_id = self.bccx.tcx.hir.node_to_hir_id(borrow_id); + self.guarantee_valid(hir_id.local_id, borrow_span, cmt, bk, @@ -274,8 +273,12 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { self.mark_loan_path_as_mutated(&lp); } gather_moves::gather_assignment(self.bccx, &self.move_data, - assignment_id, assignment_span, - lp, cmt.id, mode); + self.bccx.tcx.hir.node_to_hir_id(assignment_id) + .local_id, + assignment_span, + lp, + self.bccx.tcx.hir.node_to_hir_id(cmt.id).local_id, + mode); } None => { // This can occur with e.g. `*foo() = 5`. In such @@ -289,13 +292,13 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { /// reports an error. This may entail taking out loans, which will be added to the /// `req_loan_map`. fn guarantee_valid(&mut self, - borrow_id: ast::NodeId, + borrow_id: hir::ItemLocalId, borrow_span: Span, cmt: mc::cmt<'tcx>, req_kind: ty::BorrowKind, loan_region: ty::Region<'tcx>, cause: euv::LoanCause) { - debug!("guarantee_valid(borrow_id={}, cmt={:?}, \ + debug!("guarantee_valid(borrow_id={:?}, cmt={:?}, \ req_mutbl={:?}, loan_region={:?})", borrow_id, cmt, @@ -348,11 +351,11 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { ty::ReScope(scope) => scope, ty::ReEarlyBound(ref br) => { - self.bccx.region_maps.early_free_extent(self.tcx(), br) + self.bccx.region_scope_tree.early_free_scope(self.tcx(), br) } ty::ReFree(ref fr) => { - self.bccx.region_maps.free_extent(self.tcx(), fr) + self.bccx.region_scope_tree.free_scope(self.tcx(), fr) } ty::ReStatic => self.item_ub, @@ -370,7 +373,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { }; debug!("loan_scope = {:?}", loan_scope); - let borrow_scope = region::CodeExtent::Misc(borrow_id); + let borrow_scope = region::Scope::Node(borrow_id); let gen_scope = self.compute_gen_scope(borrow_scope, loan_scope); debug!("gen_scope = {:?}", gen_scope); @@ -394,7 +397,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { } }; - debug!("guarantee_valid(borrow_id={}), loan={:?}", + debug!("guarantee_valid(borrow_id={:?}), loan={:?}", borrow_id, loan); // let loan_path = loan.loan_path; @@ -444,7 +447,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { None } LpUpvar(ty::UpvarId{ var_id, closure_expr_id: _ }) => { - let local_id = self.tcx().hir.def_index_to_node_id(var_id); + let local_id = self.tcx().hir.hir_to_node_id(var_id); self.tcx().used_mut_nodes.borrow_mut().insert(local_id); None } @@ -470,23 +473,23 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { } pub fn compute_gen_scope(&self, - borrow_scope: region::CodeExtent, - loan_scope: region::CodeExtent) - -> region::CodeExtent { + borrow_scope: region::Scope, + loan_scope: region::Scope) + -> region::Scope { //! Determine when to introduce the loan. Typically the loan //! is introduced at the point of the borrow, but in some cases, //! notably method arguments, the loan may be introduced only //! later, once it comes into scope. - if self.bccx.region_maps.is_subscope_of(borrow_scope, loan_scope) { + if self.bccx.region_scope_tree.is_subscope_of(borrow_scope, loan_scope) { borrow_scope } else { loan_scope } } - pub fn compute_kill_scope(&self, loan_scope: region::CodeExtent, lp: &LoanPath<'tcx>) - -> region::CodeExtent { + pub fn compute_kill_scope(&self, loan_scope: region::Scope, lp: &LoanPath<'tcx>) + -> region::Scope { //! Determine when the loan restrictions go out of scope. //! This is either when the lifetime expires or when the //! local variable which roots the loan-path goes out of scope, @@ -509,10 +512,10 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { //! do not require restrictions and hence do not cause a loan. let lexical_scope = lp.kill_scope(self.bccx); - if self.bccx.region_maps.is_subscope_of(lexical_scope, loan_scope) { + if self.bccx.region_scope_tree.is_subscope_of(lexical_scope, loan_scope) { lexical_scope } else { - assert!(self.bccx.region_maps.is_subscope_of(loan_scope, lexical_scope)); + assert!(self.bccx.region_scope_tree.is_subscope_of(loan_scope, lexical_scope)); loan_scope } } diff --git a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs index 57b92eb8f8..1f2b917bdb 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs @@ -14,6 +14,7 @@ use rustc::middle::mem_categorization::Categorization; use rustc::middle::mem_categorization::NoteClosureEnv; use rustc::middle::mem_categorization::InteriorOffsetKind as Kind; use rustc::ty; +use rustc_mir::util::borrowck_errors::{BorrowckErrors, Origin}; use syntax::ast; use syntax_pos; use errors::DiagnosticBuilder; @@ -93,7 +94,7 @@ fn report_move_errors<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, errors: &Vec(errors: &Vec>) } // (keep in sync with gather_moves::check_and_get_illegal_move_origin ) -fn report_cannot_move_out_of<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, +fn report_cannot_move_out_of<'a, 'tcx>(bccx: &'a BorrowckCtxt<'a, 'tcx>, move_from: mc::cmt<'tcx>) -> DiagnosticBuilder<'a> { match move_from.cat { @@ -142,43 +143,21 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, Categorization::Deref(_, mc::Implicit(..)) | Categorization::Deref(_, mc::UnsafePtr(..)) | Categorization::StaticItem => { - let mut err = struct_span_err!(bccx, move_from.span, E0507, - "cannot move out of {}", - move_from.descriptive_string(bccx.tcx)); - err.span_label( - move_from.span, - format!("cannot move out of {}", move_from.descriptive_string(bccx.tcx)) - ); - err + bccx.cannot_move_out_of( + move_from.span, &move_from.descriptive_string(bccx.tcx), Origin::Ast) } - Categorization::Interior(ref b, mc::InteriorElement(ik)) => { - let type_name = match (&b.ty.sty, ik) { - (&ty::TyArray(_, _), Kind::Index) => "array", - (&ty::TySlice(_), _) => "slice", - _ => { - span_bug!(move_from.span, "this path should not cause illegal move"); - }, - }; - let mut err = struct_span_err!(bccx, move_from.span, E0508, - "cannot move out of type `{}`, \ - a non-copy {}", - b.ty, type_name); - err.span_label(move_from.span, "cannot move out of here"); - err + bccx.cannot_move_out_of_interior_noncopy( + move_from.span, b.ty, ik == Kind::Index, Origin::Ast) } Categorization::Downcast(ref b, _) | Categorization::Interior(ref b, mc::InteriorField(_)) => { match b.ty.sty { ty::TyAdt(def, _) if def.has_dtor(bccx.tcx) => { - let mut err = struct_span_err!(bccx, move_from.span, E0509, - "cannot move out of type `{}`, \ - which implements the `Drop` trait", - b.ty); - err.span_label(move_from.span, "cannot move out of here"); - err - }, + bccx.cannot_move_out_of_interior_of_drop( + move_from.span, b.ty, Origin::Ast) + } _ => { span_bug!(move_from.span, "this path should not cause illegal move"); } diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index 38336655f2..a3f1340d42 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -32,11 +32,10 @@ use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::mem_categorization::ImmutabilityBlame; -use rustc::middle::region::{self, RegionMaps}; +use rustc::middle::region; use rustc::middle::free_region::RegionRelations; -use rustc::ty::{self, TyCtxt}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::maps::Providers; - use rustc_mir::util::borrowck_errors::{BorrowckErrors, Origin}; use std::fmt; @@ -99,10 +98,9 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) { let body_id = tcx.hir.body_owned_by(owner_id); let tables = tcx.typeck_tables_of(owner_def_id); - let region_maps = tcx.region_maps(owner_def_id); - let bccx = &mut BorrowckCtxt { tcx, tables, region_maps, owner_def_id }; - - let body = bccx.tcx.hir.body(body_id); + let region_scope_tree = tcx.region_scope_tree(owner_def_id); + let body = tcx.hir.body(body_id); + let bccx = &mut BorrowckCtxt { tcx, tables, region_scope_tree, owner_def_id, body }; // Eventually, borrowck will always read the MIR, but at the // moment we do not. So, for now, we always force MIR to be @@ -139,10 +137,9 @@ fn build_borrowck_dataflow_data<'a, 'c, 'tcx, F>(this: &mut BorrowckCtxt<'a, 'tc { // Check the body of fn items. let tcx = this.tcx; - let body = tcx.hir.body(body_id); let id_range = { let mut visitor = intravisit::IdRangeComputingVisitor::new(&tcx.hir); - visitor.visit_body(body); + visitor.visit_body(this.body); visitor.result() }; let (all_loans, move_data) = @@ -163,24 +160,25 @@ fn build_borrowck_dataflow_data<'a, 'c, 'tcx, F>(this: &mut BorrowckCtxt<'a, 'tc let mut loan_dfcx = DataFlowContext::new(this.tcx, "borrowck", - Some(body), + Some(this.body), cfg, LoanDataFlowOperator, id_range, all_loans.len()); for (loan_idx, loan) in all_loans.iter().enumerate() { - loan_dfcx.add_gen(loan.gen_scope.node_id(), loan_idx); + loan_dfcx.add_gen(loan.gen_scope.item_local_id(), loan_idx); loan_dfcx.add_kill(KillFrom::ScopeEnd, - loan.kill_scope.node_id(), loan_idx); + loan.kill_scope.item_local_id(), + loan_idx); } loan_dfcx.add_kills_from_flow_exits(cfg); - loan_dfcx.propagate(cfg, body); + loan_dfcx.propagate(cfg, this.body); let flowed_moves = move_data::FlowedMoveData::new(move_data, this, cfg, id_range, - body); + this.body); Some(AnalysisData { all_loans, loans: loan_dfcx, @@ -198,8 +196,9 @@ pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>( let owner_id = tcx.hir.body_owner(body_id); let owner_def_id = tcx.hir.local_def_id(owner_id); let tables = tcx.typeck_tables_of(owner_def_id); - let region_maps = tcx.region_maps(owner_def_id); - let mut bccx = BorrowckCtxt { tcx, tables, region_maps, owner_def_id }; + let region_scope_tree = tcx.region_scope_tree(owner_def_id); + let body = tcx.hir.body(body_id); + let mut bccx = BorrowckCtxt { tcx, tables, region_scope_tree, owner_def_id, body }; let dataflow_data = build_borrowck_dataflow_data(&mut bccx, true, body_id, |_| cfg); (bccx, dataflow_data.unwrap()) @@ -215,9 +214,11 @@ pub struct BorrowckCtxt<'a, 'tcx: 'a> { // Some in `borrowck_fn` and cleared later tables: &'a ty::TypeckTables<'tcx>, - region_maps: Rc, + region_scope_tree: Rc, owner_def_id: DefId, + + body: &'tcx hir::Body, } impl<'b, 'tcx: 'b> BorrowckErrors for BorrowckCtxt<'b, 'tcx> { @@ -254,13 +255,13 @@ pub struct Loan<'tcx> { /// cases, notably method arguments, the loan may be introduced /// only later, once it comes into scope. See also /// `GatherLoanCtxt::compute_gen_scope`. - gen_scope: region::CodeExtent, + gen_scope: region::Scope, /// kill_scope indicates when the loan goes out of scope. This is /// either when the lifetime expires or when the local variable /// which roots the loan-path goes out of scope, whichever happens /// faster. See also `GatherLoanCtxt::compute_kill_scope`. - kill_scope: region::CodeExtent, + kill_scope: region::Scope, span: Span, cause: euv::LoanCause, } @@ -274,7 +275,7 @@ impl<'tcx> Loan<'tcx> { #[derive(Eq)] pub struct LoanPath<'tcx> { kind: LoanPathKind<'tcx>, - ty: ty::Ty<'tcx>, + ty: Ty<'tcx>, } impl<'tcx> PartialEq for LoanPath<'tcx> { @@ -298,11 +299,11 @@ pub enum LoanPathKind<'tcx> { } impl<'tcx> LoanPath<'tcx> { - fn new(kind: LoanPathKind<'tcx>, ty: ty::Ty<'tcx>) -> LoanPath<'tcx> { + fn new(kind: LoanPathKind<'tcx>, ty: Ty<'tcx>) -> LoanPath<'tcx> { LoanPath { kind: kind, ty: ty } } - fn to_type(&self) -> ty::Ty<'tcx> { self.ty } + fn to_type(&self) -> Ty<'tcx> { self.ty } } // FIXME (pnkfelix): See discussion here @@ -349,7 +350,7 @@ fn closure_to_block(closure_id: DefIndex, let closure_id = tcx.hir.def_index_to_node_id(closure_id); match tcx.hir.get(closure_id) { hir_map::NodeExpr(expr) => match expr.node { - hir::ExprClosure(.., body_id, _) => { + hir::ExprClosure(.., body_id, _, _) => { body_id.node_id } _ => { @@ -361,12 +362,16 @@ fn closure_to_block(closure_id: DefIndex, } impl<'a, 'tcx> LoanPath<'tcx> { - pub fn kill_scope(&self, bccx: &BorrowckCtxt<'a, 'tcx>) -> region::CodeExtent { + pub fn kill_scope(&self, bccx: &BorrowckCtxt<'a, 'tcx>) -> region::Scope { match self.kind { - LpVar(local_id) => bccx.region_maps.var_scope(local_id), + LpVar(local_id) => { + let hir_id = bccx.tcx.hir.node_to_hir_id(local_id); + bccx.region_scope_tree.var_scope(hir_id.local_id) + } LpUpvar(upvar_id) => { let block_id = closure_to_block(upvar_id.closure_expr_id, bccx.tcx); - region::CodeExtent::Misc(block_id) + let hir_id = bccx.tcx.hir.node_to_hir_id(block_id); + region::Scope::Node(hir_id.local_id) } LpDowncast(ref base, _) | LpExtend(ref base, ..) => base.kill_scope(bccx), @@ -530,7 +535,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { { let region_rels = RegionRelations::new(self.tcx, self.owner_def_id, - &self.region_maps, + &self.region_scope_tree, &self.tables.free_region_map); region_rels.is_subregion_of(r_sub, r_sup) } @@ -549,9 +554,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { _ => { } } - let mut db = self.bckerr_to_diag(&err); - self.note_and_explain_bckerr(&mut db, err); - db.emit(); + self.report_bckerr(&err); } pub fn report_use_of_moved_value(&self, @@ -641,19 +644,22 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { // Get type of value and span where it was previously // moved. + let node_id = self.tcx.hir.hir_to_node_id(hir::HirId { + owner: self.body.value.hir_id.owner, + local_id: the_move.id + }); let (move_span, move_note) = match the_move.kind { move_data::Declared => { unreachable!(); } move_data::MoveExpr | - move_data::MovePat => - (self.tcx.hir.span(the_move.id), ""), + move_data::MovePat => (self.tcx.hir.span(node_id), ""), move_data::Captured => - (match self.tcx.hir.expect_expr(the_move.id).node { - hir::ExprClosure(.., fn_decl_span) => fn_decl_span, - ref r => bug!("Captured({}) maps to non-closure: {:?}", + (match self.tcx.hir.expect_expr(node_id).node { + hir::ExprClosure(.., fn_decl_span, _) => fn_decl_span, + ref r => bug!("Captured({:?}) maps to non-closure: {:?}", the_move.id, r), }, " (into closure)"), }; @@ -722,8 +728,12 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { self.tcx.sess.struct_span_err_with_code(s, msg, code) } - fn bckerr_to_diag(&self, err: &BckError<'tcx>) -> DiagnosticBuilder<'a> { - let span = err.span.clone(); + pub fn span_err_with_code>(&self, s: S, msg: &str, code: &str) { + self.tcx.sess.span_err_with_code(s, msg, code); + } + + fn report_bckerr(&self, err: &BckError<'tcx>) { + let error_span = err.span.clone(); match err.code { err_mutbl => { @@ -747,12 +757,12 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } }; - match err.cause { + let mut db = match err.cause { MutabilityViolation => { - struct_span_err!(self.tcx.sess, span, E0594, "cannot assign to {}", descr) + self.cannot_assign(error_span, &descr, Origin::Ast) } BorrowViolation(euv::ClosureCapture(_)) => { - struct_span_err!(self.tcx.sess, span, E0595, + struct_span_err!(self.tcx.sess, error_span, E0595, "closure cannot assign to {}", descr) } BorrowViolation(euv::OverloadedOperator) | @@ -762,30 +772,238 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { BorrowViolation(euv::AutoUnsafe) | BorrowViolation(euv::ForLoop) | BorrowViolation(euv::MatchDiscriminant) => { - struct_span_err!(self.tcx.sess, span, E0596, + struct_span_err!(self.tcx.sess, error_span, E0596, "cannot borrow {} as mutable", descr) } BorrowViolation(euv::ClosureInvocation) => { span_bug!(err.span, "err_mutbl with a closure invocation"); } - } + }; + + self.note_and_explain_mutbl_error(&mut db, &err, &error_span); + self.note_immutability_blame(&mut db, err.cmt.immutability_blame()); + db.emit(); } - err_out_of_scope(..) => { + err_out_of_scope(super_scope, sub_scope, cause) => { let msg = match opt_loan_path(&err.cmt) { None => "borrowed value".to_string(), Some(lp) => { format!("`{}`", self.loan_path_to_string(&lp)) } }; - struct_span_err!(self.tcx.sess, span, E0597, "{} does not live long enough", msg) + + // When you have a borrow that lives across a yield, + // that reference winds up captured in the generator + // type. Regionck then constraints it to live as long + // as the generator itself. If that borrow is borrowing + // data owned by the generator, this winds up resulting in + // an `err_out_of_scope` error: + // + // ``` + // { + // let g = || { + // let a = &3; // this borrow is forced to ... -+ + // yield (); // | + // println!("{}", a); // | + // }; // | + // } <----------------------... live until here --------+ + // ``` + // + // To detect this case, we look for cases where the + // `super_scope` (lifetime of the value) is within the + // body, but the `sub_scope` is not. + debug!("err_out_of_scope: self.body.is_generator = {:?}", + self.body.is_generator); + let maybe_borrow_across_yield = if self.body.is_generator { + let body_scope = region::Scope::Node(self.body.value.hir_id.local_id); + debug!("err_out_of_scope: body_scope = {:?}", body_scope); + debug!("err_out_of_scope: super_scope = {:?}", super_scope); + debug!("err_out_of_scope: sub_scope = {:?}", sub_scope); + match (super_scope, sub_scope) { + (&ty::RegionKind::ReScope(value_scope), + &ty::RegionKind::ReScope(loan_scope)) => { + if { + // value_scope <= body_scope && + self.region_scope_tree.is_subscope_of(value_scope, body_scope) && + // body_scope <= loan_scope + self.region_scope_tree.is_subscope_of(body_scope, loan_scope) + } { + // We now know that this is a case + // that fits the bill described above: + // a borrow of something whose scope + // is within the generator, but the + // borrow is for a scope outside the + // generator. + // + // Now look within the scope of the of + // the value being borrowed (in the + // example above, that would be the + // block remainder that starts with + // `let a`) for a yield. We can cite + // that for the user. + self.region_scope_tree.yield_in_scope(value_scope) + } else { + None + } + } + _ => None, + } + } else { + None + }; + + if let Some((yield_span, _)) = maybe_borrow_across_yield { + debug!("err_out_of_scope: opt_yield_span = {:?}", yield_span); + struct_span_err!(self.tcx.sess, + error_span, + E0626, + "borrow may still be in use when generator yields") + .span_label(yield_span, "possible yield occurs here") + .emit(); + return; + } + + let mut db = struct_span_err!(self.tcx.sess, + error_span, + E0597, + "{} does not live long enough", + msg); + + let (value_kind, value_msg) = match err.cmt.cat { + mc::Categorization::Rvalue(..) => + ("temporary value", "temporary value created here"), + _ => + ("borrowed value", "borrow occurs here") + }; + + let is_closure = match cause { + euv::ClosureCapture(s) => { + // The primary span starts out as the closure creation point. + // Change the primary span here to highlight the use of the variable + // in the closure, because it seems more natural. Highlight + // closure creation point as a secondary span. + match db.span.primary_span() { + Some(primary) => { + db.span = MultiSpan::from_span(s); + db.span_label(primary, "capture occurs here"); + db.span_label(s, "does not live long enough"); + true + } + None => false + } + } + _ => { + db.span_label(error_span, "does not live long enough"); + false + } + }; + + let sub_span = self.region_end_span(sub_scope); + let super_span = self.region_end_span(super_scope); + + match (sub_span, super_span) { + (Some(s1), Some(s2)) if s1 == s2 => { + if !is_closure { + db.span = MultiSpan::from_span(s1); + db.span_label(error_span, value_msg); + let msg = match opt_loan_path(&err.cmt) { + None => value_kind.to_string(), + Some(lp) => { + format!("`{}`", self.loan_path_to_string(&lp)) + } + }; + db.span_label(s1, + format!("{} dropped here while still borrowed", msg)); + } else { + db.span_label(s1, format!("{} dropped before borrower", value_kind)); + } + db.note("values in a scope are dropped in the opposite order \ + they are created"); + } + (Some(s1), Some(s2)) if !is_closure => { + db.span = MultiSpan::from_span(s2); + db.span_label(error_span, value_msg); + let msg = match opt_loan_path(&err.cmt) { + None => value_kind.to_string(), + Some(lp) => { + format!("`{}`", self.loan_path_to_string(&lp)) + } + }; + db.span_label(s2, format!("{} dropped here while still borrowed", msg)); + db.span_label(s1, format!("{} needs to live until here", value_kind)); + } + _ => { + match sub_span { + Some(s) => { + db.span_label(s, format!("{} needs to live until here", + value_kind)); + } + None => { + self.tcx.note_and_explain_region( + &self.region_scope_tree, + &mut db, + "borrowed value must be valid for ", + sub_scope, + "..."); + } + } + match super_span { + Some(s) => { + db.span_label(s, format!("{} only lives until here", value_kind)); + } + None => { + self.tcx.note_and_explain_region( + &self.region_scope_tree, + &mut db, + "...but borrowed value is only valid for ", + super_scope, + ""); + } + } + } + } + + if let ty::ReScope(scope) = *super_scope { + let node_id = scope.node_id(self.tcx, &self.region_scope_tree); + match self.tcx.hir.find(node_id) { + Some(hir_map::NodeStmt(_)) => { + db.note("consider using a `let` binding to increase its lifetime"); + } + _ => {} + } + } + + db.emit(); } - err_borrowed_pointer_too_short(..) => { + err_borrowed_pointer_too_short(loan_scope, ptr_scope) => { let descr = self.cmt_to_path_or_string(&err.cmt); - struct_span_err!(self.tcx.sess, span, E0598, - "lifetime of {} is too short to guarantee \ - its contents can be safely reborrowed", - descr) + let mut db = struct_span_err!(self.tcx.sess, error_span, E0598, + "lifetime of {} is too short to guarantee \ + its contents can be safely reborrowed", + descr); + + let descr = match opt_loan_path(&err.cmt) { + Some(lp) => { + format!("`{}`", self.loan_path_to_string(&lp)) + } + None => self.cmt_to_string(&err.cmt), + }; + self.tcx.note_and_explain_region( + &self.region_scope_tree, + &mut db, + &format!("{} would have to be valid for ", + descr), + loan_scope, + "..."); + self.tcx.note_and_explain_region( + &self.region_scope_tree, + &mut db, + &format!("...but {} is only valid for ", descr), + ptr_scope, + ""); + + db.emit(); } } } @@ -1035,146 +1253,12 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { fn region_end_span(&self, region: ty::Region<'tcx>) -> Option { match *region { ty::ReScope(scope) => { - match scope.span(&self.tcx.hir) { - Some(s) => { - Some(s.end_point()) - } - None => { - None - } - } + Some(scope.span(self.tcx, &self.region_scope_tree).end_point()) } _ => None } } - fn note_and_explain_bckerr(&self, db: &mut DiagnosticBuilder, err: BckError<'tcx>) { - let error_span = err.span.clone(); - match err.code { - err_mutbl => { - self.note_and_explain_mutbl_error(db, &err, &error_span); - self.note_immutability_blame(db, err.cmt.immutability_blame()); - } - err_out_of_scope(super_scope, sub_scope, cause) => { - let (value_kind, value_msg) = match err.cmt.cat { - mc::Categorization::Rvalue(..) => - ("temporary value", "temporary value created here"), - _ => - ("borrowed value", "borrow occurs here") - }; - - let is_closure = match cause { - euv::ClosureCapture(s) => { - // The primary span starts out as the closure creation point. - // Change the primary span here to highlight the use of the variable - // in the closure, because it seems more natural. Highlight - // closure creation point as a secondary span. - match db.span.primary_span() { - Some(primary) => { - db.span = MultiSpan::from_span(s); - db.span_label(primary, "capture occurs here"); - db.span_label(s, "does not live long enough"); - true - } - None => false - } - } - _ => { - db.span_label(error_span, "does not live long enough"); - false - } - }; - - let sub_span = self.region_end_span(sub_scope); - let super_span = self.region_end_span(super_scope); - - match (sub_span, super_span) { - (Some(s1), Some(s2)) if s1 == s2 => { - if !is_closure { - db.span = MultiSpan::from_span(s1); - db.span_label(error_span, value_msg); - let msg = match opt_loan_path(&err.cmt) { - None => value_kind.to_string(), - Some(lp) => { - format!("`{}`", self.loan_path_to_string(&lp)) - } - }; - db.span_label(s1, - format!("{} dropped here while still borrowed", msg)); - } else { - db.span_label(s1, format!("{} dropped before borrower", value_kind)); - } - db.note("values in a scope are dropped in the opposite order \ - they are created"); - } - (Some(s1), Some(s2)) if !is_closure => { - db.span = MultiSpan::from_span(s2); - db.span_label(error_span, value_msg); - let msg = match opt_loan_path(&err.cmt) { - None => value_kind.to_string(), - Some(lp) => { - format!("`{}`", self.loan_path_to_string(&lp)) - } - }; - db.span_label(s2, format!("{} dropped here while still borrowed", msg)); - db.span_label(s1, format!("{} needs to live until here", value_kind)); - } - _ => { - match sub_span { - Some(s) => { - db.span_label(s, format!("{} needs to live until here", - value_kind)); - } - None => { - self.tcx.note_and_explain_region( - db, - "borrowed value must be valid for ", - sub_scope, - "..."); - } - } - match super_span { - Some(s) => { - db.span_label(s, format!("{} only lives until here", value_kind)); - } - None => { - self.tcx.note_and_explain_region( - db, - "...but borrowed value is only valid for ", - super_scope, - ""); - } - } - } - } - - if let Some(_) = statement_scope_span(self.tcx, super_scope) { - db.note("consider using a `let` binding to increase its lifetime"); - } - } - - err_borrowed_pointer_too_short(loan_scope, ptr_scope) => { - let descr = match opt_loan_path(&err.cmt) { - Some(lp) => { - format!("`{}`", self.loan_path_to_string(&lp)) - } - None => self.cmt_to_string(&err.cmt), - }; - self.tcx.note_and_explain_region( - db, - &format!("{} would have to be valid for ", - descr), - loan_scope, - "..."); - self.tcx.note_and_explain_region( - db, - &format!("...but {} is only valid for ", descr), - ptr_scope, - ""); - } - } - } - fn note_and_explain_mutbl_error(&self, db: &mut DiagnosticBuilder, err: &BckError<'tcx>, error_span: &Span) { match err.cmt.note { @@ -1223,10 +1307,10 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { out: &mut String) { match loan_path.kind { LpUpvar(ty::UpvarId { var_id: id, closure_expr_id: _ }) => { - out.push_str(&self.tcx.local_var_name_str_def_index(id)); + out.push_str(&self.tcx.hir.name(self.tcx.hir.hir_to_node_id(id)).as_str()); } LpVar(id) => { - out.push_str(&self.tcx.local_var_name_str(id)); + out.push_str(&self.tcx.hir.name(id).as_str()); } LpDowncast(ref lp_base, variant_def_id) => { @@ -1306,18 +1390,6 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } -fn statement_scope_span(tcx: TyCtxt, region: ty::Region) -> Option { - match *region { - ty::ReScope(scope) => { - match tcx.hir.find(scope.node_id()) { - Some(hir_map::NodeStmt(stmt)) => Some(stmt.span), - _ => None - } - } - _ => None - } -} - impl BitwiseOperator for LoanDataFlowOperator { #[inline] fn join(&self, succ: usize, pred: usize) -> usize { @@ -1363,7 +1435,7 @@ impl<'tcx> fmt::Debug for LoanPath<'tcx> { LpUpvar(ty::UpvarId{ var_id, closure_expr_id }) => { let s = ty::tls::with(|tcx| { - let var_node_id = tcx.hir.def_index_to_node_id(var_id); + let var_node_id = tcx.hir.hir_to_node_id(var_id); tcx.hir.node_to_string(var_node_id) }); write!(f, "$({} captured by id={:?})", s, closure_expr_id) @@ -1398,7 +1470,7 @@ impl<'tcx> fmt::Display for LoanPath<'tcx> { LpUpvar(ty::UpvarId{ var_id, closure_expr_id: _ }) => { let s = ty::tls::with(|tcx| { - let var_node_id = tcx.hir.def_index_to_node_id(var_id); + let var_node_id = tcx.hir.hir_to_node_id(var_id); tcx.hir.node_to_string(var_node_id) }); write!(f, "$({} captured by closure)", s) diff --git a/src/librustc_borrowck/borrowck/move_data.rs b/src/librustc_borrowck/borrowck/move_data.rs index 217bd6e6ca..7915eccbf7 100644 --- a/src/librustc_borrowck/borrowck/move_data.rs +++ b/src/librustc_borrowck/borrowck/move_data.rs @@ -23,16 +23,16 @@ use rustc::middle::expr_use_visitor as euv; use rustc::middle::expr_use_visitor::MutateMode; use rustc::middle::mem_categorization as mc; use rustc::ty::{self, TyCtxt}; -use rustc::util::nodemap::{FxHashMap, NodeSet}; +use rustc::util::nodemap::{FxHashMap, FxHashSet}; use std::cell::RefCell; use std::rc::Rc; use std::usize; -use syntax::ast; use syntax_pos::Span; use rustc::hir; use rustc::hir::intravisit::IdRange; +#[derive(Default)] pub struct MoveData<'tcx> { /// Move paths. See section "Move paths" in `README.md`. pub paths: RefCell>>, @@ -54,7 +54,7 @@ pub struct MoveData<'tcx> { pub path_assignments: RefCell>, /// Assignments to a variable or path, like `x = foo`, but not `x += foo`. - pub assignee_ids: RefCell, + pub assignee_ids: RefCell>, } pub struct FlowedMoveData<'a, 'tcx: 'a> { @@ -133,7 +133,7 @@ pub struct Move { pub path: MovePathIndex, /// id of node that is doing the move. - pub id: ast::NodeId, + pub id: hir::ItemLocalId, /// Kind of move, for error messages. pub kind: MoveKind, @@ -148,13 +148,13 @@ pub struct Assignment { pub path: MovePathIndex, /// id where assignment occurs - pub id: ast::NodeId, + pub id: hir::ItemLocalId, /// span of node where assignment occurs pub span: Span, /// id for l-value expression on lhs of assignment - pub assignee_id: ast::NodeId, + pub assignee_id: hir::ItemLocalId, } #[derive(Clone, Copy)] @@ -189,17 +189,6 @@ fn loan_path_is_precise(loan_path: &LoanPath) -> bool { } impl<'a, 'tcx> MoveData<'tcx> { - pub fn new() -> MoveData<'tcx> { - MoveData { - paths: RefCell::new(Vec::new()), - path_map: RefCell::new(FxHashMap()), - moves: RefCell::new(Vec::new()), - path_assignments: RefCell::new(Vec::new()), - var_assignments: RefCell::new(Vec::new()), - assignee_ids: RefCell::new(NodeSet()), - } - } - /// return true if there are no trackable assignments or moves /// in this move data - that means that there is nothing that /// could cause a borrow error. @@ -345,7 +334,7 @@ impl<'a, 'tcx> MoveData<'tcx> { /// Adds a new move entry for a move of `lp` that occurs at location `id` with kind `kind`. pub fn add_move(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, orig_lp: Rc>, - id: ast::NodeId, + id: hir::ItemLocalId, kind: MoveKind) { // Moving one union field automatically moves all its fields. Also move siblings of // all parent union fields, moves do not propagate upwards automatically. @@ -373,9 +362,9 @@ impl<'a, 'tcx> MoveData<'tcx> { fn add_move_helper(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, lp: Rc>, - id: ast::NodeId, + id: hir::ItemLocalId, kind: MoveKind) { - debug!("add_move(lp={:?}, id={}, kind={:?})", + debug!("add_move(lp={:?}, id={:?}, kind={:?})", lp, id, kind); @@ -398,9 +387,9 @@ impl<'a, 'tcx> MoveData<'tcx> { /// `span`. pub fn add_assignment(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, lp: Rc>, - assign_id: ast::NodeId, + assign_id: hir::ItemLocalId, span: Span, - assignee_id: ast::NodeId, + assignee_id: hir::ItemLocalId, mode: euv::MutateMode) { // Assigning to one union field automatically assigns to all its fields. if let LpExtend(ref base_lp, mutbl, LpInterior(opt_variant_id, interior)) = lp.kind { @@ -429,11 +418,11 @@ impl<'a, 'tcx> MoveData<'tcx> { fn add_assignment_helper(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, lp: Rc>, - assign_id: ast::NodeId, + assign_id: hir::ItemLocalId, span: Span, - assignee_id: ast::NodeId, + assignee_id: hir::ItemLocalId, mode: euv::MutateMode) { - debug!("add_assignment(lp={:?}, assign_id={}, assignee_id={}", + debug!("add_assignment(lp={:?}, assign_id={:?}, assignee_id={:?}", lp, assign_id, assignee_id); let path_index = self.move_path(tcx, lp.clone()); @@ -496,7 +485,7 @@ impl<'a, 'tcx> MoveData<'tcx> { LpVar(..) | LpUpvar(..) | LpDowncast(..) => { let kill_scope = path.loan_path.kill_scope(bccx); let path = *self.path_map.borrow().get(&path.loan_path).unwrap(); - self.kill_moves(path, kill_scope.node_id(), + self.kill_moves(path, kill_scope.item_local_id(), KillFrom::ScopeEnd, dfcx_moves); } LpExtend(..) => {} @@ -511,7 +500,7 @@ impl<'a, 'tcx> MoveData<'tcx> { LpVar(..) | LpUpvar(..) | LpDowncast(..) => { let kill_scope = lp.kill_scope(bccx); dfcx_assign.add_kill(KillFrom::ScopeEnd, - kill_scope.node_id(), + kill_scope.item_local_id(), assignment_index); } LpExtend(..) => { @@ -579,7 +568,7 @@ impl<'a, 'tcx> MoveData<'tcx> { fn kill_moves(&self, path: MovePathIndex, - kill_id: ast::NodeId, + kill_id: hir::ItemLocalId, kill_kind: KillFrom, dfcx_moves: &mut MoveDataFlow) { // We can only perform kills for paths that refer to a unique location, @@ -589,7 +578,7 @@ impl<'a, 'tcx> MoveData<'tcx> { let loan_path = self.path_loan_path(path); if loan_path_is_precise(&loan_path) { self.each_applicable_move(path, |move_index| { - debug!("kill_moves add_kill {:?} kill_id={} move_index={}", + debug!("kill_moves add_kill {:?} kill_id={:?} move_index={}", kill_kind, kill_id, move_index.get()); dfcx_moves.add_kill(kill_kind, kill_id, move_index.get()); true @@ -642,7 +631,7 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> { } pub fn kind_of_move_of_path(&self, - id: ast::NodeId, + id: hir::ItemLocalId, loan_path: &Rc>) -> Option { //! Returns the kind of a move of `loan_path` by `id`, if one exists. @@ -667,7 +656,7 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> { /// have occurred on entry to `id` without an intervening assignment. In other words, any moves /// that would invalidate a reference to `loan_path` at location `id`. pub fn each_move_of(&self, - id: ast::NodeId, + id: hir::ItemLocalId, loan_path: &Rc>, mut f: F) -> bool where @@ -724,7 +713,7 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> { /// Iterates through every assignment to `loan_path` that may have occurred on entry to `id`. /// `loan_path` must be a single variable. pub fn each_assignment_of(&self, - id: ast::NodeId, + id: hir::ItemLocalId, loan_path: &Rc>, mut f: F) -> bool where diff --git a/src/librustc_borrowck/diagnostics.rs b/src/librustc_borrowck/diagnostics.rs index 1f1fc4cc65..031dbcb1eb 100644 --- a/src/librustc_borrowck/diagnostics.rs +++ b/src/librustc_borrowck/diagnostics.rs @@ -317,272 +317,6 @@ fn main() { ``` "##, -E0507: r##" -You tried to move out of a value which was borrowed. Erroneous code example: - -```compile_fail,E0507 -use std::cell::RefCell; - -struct TheDarkKnight; - -impl TheDarkKnight { - fn nothing_is_true(self) {} -} - -fn main() { - let x = RefCell::new(TheDarkKnight); - - x.borrow().nothing_is_true(); // error: cannot move out of borrowed content -} -``` - -Here, the `nothing_is_true` method takes the ownership of `self`. However, -`self` cannot be moved because `.borrow()` only provides an `&TheDarkKnight`, -which is a borrow of the content owned by the `RefCell`. To fix this error, -you have three choices: - -* Try to avoid moving the variable. -* Somehow reclaim the ownership. -* Implement the `Copy` trait on the type. - -Examples: - -``` -use std::cell::RefCell; - -struct TheDarkKnight; - -impl TheDarkKnight { - fn nothing_is_true(&self) {} // First case, we don't take ownership -} - -fn main() { - let x = RefCell::new(TheDarkKnight); - - x.borrow().nothing_is_true(); // ok! -} -``` - -Or: - -``` -use std::cell::RefCell; - -struct TheDarkKnight; - -impl TheDarkKnight { - fn nothing_is_true(self) {} -} - -fn main() { - let x = RefCell::new(TheDarkKnight); - let x = x.into_inner(); // we get back ownership - - x.nothing_is_true(); // ok! -} -``` - -Or: - -``` -use std::cell::RefCell; - -#[derive(Clone, Copy)] // we implement the Copy trait -struct TheDarkKnight; - -impl TheDarkKnight { - fn nothing_is_true(self) {} -} - -fn main() { - let x = RefCell::new(TheDarkKnight); - - x.borrow().nothing_is_true(); // ok! -} -``` - -Moving a member out of a mutably borrowed struct will also cause E0507 error: - -```compile_fail,E0507 -struct TheDarkKnight; - -impl TheDarkKnight { - fn nothing_is_true(self) {} -} - -struct Batcave { - knight: TheDarkKnight -} - -fn main() { - let mut cave = Batcave { - knight: TheDarkKnight - }; - let borrowed = &mut cave; - - borrowed.knight.nothing_is_true(); // E0507 -} -``` - -It is fine only if you put something back. `mem::replace` can be used for that: - -``` -# struct TheDarkKnight; -# impl TheDarkKnight { fn nothing_is_true(self) {} } -# struct Batcave { knight: TheDarkKnight } -use std::mem; - -let mut cave = Batcave { - knight: TheDarkKnight -}; -let borrowed = &mut cave; - -mem::replace(&mut borrowed.knight, TheDarkKnight).nothing_is_true(); // ok! -``` - -You can find more information about borrowing in the rust-book: -http://doc.rust-lang.org/book/first-edition/references-and-borrowing.html -"##, - -E0508: r##" -A value was moved out of a non-copy fixed-size array. - -Example of erroneous code: - -```compile_fail,E0508 -struct NonCopy; - -fn main() { - let array = [NonCopy; 1]; - let _value = array[0]; // error: cannot move out of type `[NonCopy; 1]`, - // a non-copy fixed-size array -} -``` - -The first element was moved out of the array, but this is not -possible because `NonCopy` does not implement the `Copy` trait. - -Consider borrowing the element instead of moving it: - -``` -struct NonCopy; - -fn main() { - let array = [NonCopy; 1]; - let _value = &array[0]; // Borrowing is allowed, unlike moving. -} -``` - -Alternatively, if your type implements `Clone` and you need to own the value, -consider borrowing and then cloning: - -``` -#[derive(Clone)] -struct NonCopy; - -fn main() { - let array = [NonCopy; 1]; - // Now you can clone the array element. - let _value = array[0].clone(); -} -``` -"##, - -E0509: r##" -This error occurs when an attempt is made to move out of a value whose type -implements the `Drop` trait. - -Example of erroneous code: - -```compile_fail,E0509 -struct FancyNum { - num: usize -} - -struct DropStruct { - fancy: FancyNum -} - -impl Drop for DropStruct { - fn drop(&mut self) { - // Destruct DropStruct, possibly using FancyNum - } -} - -fn main() { - let drop_struct = DropStruct{fancy: FancyNum{num: 5}}; - let fancy_field = drop_struct.fancy; // Error E0509 - println!("Fancy: {}", fancy_field.num); - // implicit call to `drop_struct.drop()` as drop_struct goes out of scope -} -``` - -Here, we tried to move a field out of a struct of type `DropStruct` which -implements the `Drop` trait. However, a struct cannot be dropped if one or -more of its fields have been moved. - -Structs implementing the `Drop` trait have an implicit destructor that gets -called when they go out of scope. This destructor may use the fields of the -struct, so moving out of the struct could make it impossible to run the -destructor. Therefore, we must think of all values whose type implements the -`Drop` trait as single units whose fields cannot be moved. - -This error can be fixed by creating a reference to the fields of a struct, -enum, or tuple using the `ref` keyword: - -``` -struct FancyNum { - num: usize -} - -struct DropStruct { - fancy: FancyNum -} - -impl Drop for DropStruct { - fn drop(&mut self) { - // Destruct DropStruct, possibly using FancyNum - } -} - -fn main() { - let drop_struct = DropStruct{fancy: FancyNum{num: 5}}; - let ref fancy_field = drop_struct.fancy; // No more errors! - println!("Fancy: {}", fancy_field.num); - // implicit call to `drop_struct.drop()` as drop_struct goes out of scope -} -``` - -Note that this technique can also be used in the arms of a match expression: - -``` -struct FancyNum { - num: usize -} - -enum DropEnum { - Fancy(FancyNum) -} - -impl Drop for DropEnum { - fn drop(&mut self) { - // Destruct DropEnum, possibly using FancyNum - } -} - -fn main() { - // Creates and enum of type `DropEnum`, which implements `Drop` - let drop_enum = DropEnum::Fancy(FancyNum{num: 10}); - match drop_enum { - // Creates a reference to the inside of `DropEnum::Fancy` - DropEnum::Fancy(ref fancy_field) => // No error! - println!("It was fancy-- {}!", fancy_field.num), - } - // implicit call to `drop_enum.drop()` as drop_enum goes out of scope -} -``` -"##, - E0595: r##" Closures cannot mutate immutable captured variables. @@ -652,10 +386,95 @@ x.x = Some(&y); ``` "##, +E0626: r##" +This error occurs because a borrow in a generator persists across a +yield point. + +```compile_fail,E0626 +# #![feature(generators, generator_trait)] +# use std::ops::Generator; +let mut b = || { + let a = &String::new(); // <-- This borrow... + yield (); // ...is still in scope here, when the yield occurs. + println!("{}", a); +}; +b.resume(); +``` + +At present, it is not permitted to have a yield that occurs while a +borrow is still in scope. To resolve this error, the borrow must +either be "contained" to a smaller scope that does not overlap the +yield or else eliminated in another way. So, for example, we might +resolve the previous example by removing the borrow and just storing +the integer by value: + +``` +# #![feature(generators, generator_trait)] +# use std::ops::Generator; +let mut b = || { + let a = 3; + yield (); + println!("{}", a); +}; +b.resume(); +``` + +This is a very simple case, of course. In more complex cases, we may +wish to have more than one reference to the value that was borrowed -- +in those cases, something like the `Rc` or `Arc` types may be useful. + +This error also frequently arises with iteration: + +```compile_fail,E0626 +# #![feature(generators, generator_trait)] +# use std::ops::Generator; +let mut b = || { + let v = vec![1,2,3]; + for &x in &v { // <-- borrow of `v` is still in scope... + yield x; // ...when this yield occurs. + } +}; +b.resume(); +``` + +Such cases can sometimes be resolved by iterating "by value" (or using +`into_iter()`) to avoid borrowing: + +``` +# #![feature(generators, generator_trait)] +# use std::ops::Generator; +let mut b = || { + let v = vec![1,2,3]; + for x in v { // <-- Take ownership of the values instead! + yield x; // <-- Now yield is OK. + } +}; +b.resume(); +``` + +If taking ownership is not an option, using indices can work too: + +``` +# #![feature(generators, generator_trait)] +# use std::ops::Generator; +let mut b = || { + let v = vec![1,2,3]; + let len = v.len(); // (*) + for i in 0..len { + let x = v[i]; // (*) + yield x; // <-- Now yield is OK. + } +}; +b.resume(); + +// (*) -- Unfortunately, these temporaries are currently required. +// See . +``` +"##, + } register_diagnostics! { // E0385, // {} in an aliasable location - E0594, // cannot assign to {} E0598, // lifetime of {} is too short to guarantee its contents can be... } diff --git a/src/librustc_borrowck/graphviz.rs b/src/librustc_borrowck/graphviz.rs index e3a2bfa392..22867ba5b5 100644 --- a/src/librustc_borrowck/graphviz.rs +++ b/src/librustc_borrowck/graphviz.rs @@ -52,7 +52,7 @@ pub struct DataflowLabeller<'a, 'tcx: 'a> { impl<'a, 'tcx> DataflowLabeller<'a, 'tcx> { fn dataflow_for(&self, e: EntryOrExit, n: &Node<'a>) -> String { let id = n.1.data.id(); - debug!("dataflow_for({:?}, id={}) {:?}", e, id, self.variants); + debug!("dataflow_for({:?}, id={:?}) {:?}", e, id, self.variants); let mut sets = "".to_string(); let mut seen_one = false; for &variant in &self.variants { diff --git a/src/librustc_const_eval/_match.rs b/src/librustc_const_eval/_match.rs index bc9aa9665c..b836b71e74 100644 --- a/src/librustc_const_eval/_match.rs +++ b/src/librustc_const_eval/_match.rs @@ -182,13 +182,16 @@ impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> { self.byte_array_map.entry(pat).or_insert_with(|| { match pat.kind { box PatternKind::Constant { - value: ConstVal::ByteStr(ref data) + value: &ty::Const { val: ConstVal::ByteStr(b), .. } } => { - data.iter().map(|c| &*pattern_arena.alloc(Pattern { + b.data.iter().map(|&b| &*pattern_arena.alloc(Pattern { ty: tcx.types.u8, span: pat.span, kind: box PatternKind::Constant { - value: ConstVal::Integral(ConstInt::U8(*c)) + value: tcx.mk_const(ty::Const { + val: ConstVal::Integral(ConstInt::U8(b)), + ty: tcx.types.u8 + }) } })).collect() } @@ -228,11 +231,11 @@ pub enum Constructor<'tcx> { /// Enum variants. Variant(DefId), /// Literal values. - ConstantValue(ConstVal<'tcx>), + ConstantValue(&'tcx ty::Const<'tcx>), /// Ranges of literal values (`2...5` and `2..5`). - ConstantRange(ConstVal<'tcx>, ConstVal<'tcx>, RangeEnd), + ConstantRange(&'tcx ty::Const<'tcx>, &'tcx ty::Const<'tcx>, RangeEnd), /// Array patterns of length n. - Slice(usize), + Slice(u64), } impl<'tcx> Constructor<'tcx> { @@ -273,7 +276,7 @@ pub enum WitnessPreference { #[derive(Copy, Clone, Debug)] struct PatternContext<'tcx> { ty: Ty<'tcx>, - max_slice_length: usize, + max_slice_length: u64, } /// A stack of patterns in reverse order of construction @@ -327,8 +330,8 @@ impl<'tcx> Witness<'tcx> { { let arity = constructor_arity(cx, ctor, ty); let pat = { - let len = self.0.len(); - let mut pats = self.0.drain(len-arity..).rev(); + let len = self.0.len() as u64; + let mut pats = self.0.drain((len-arity) as usize..).rev(); match ty.sty { ty::TyAdt(..) | @@ -370,7 +373,7 @@ impl<'tcx> Witness<'tcx> { _ => { match *ctor { - ConstantValue(ref v) => PatternKind::Constant { value: v.clone() }, + ConstantValue(value) => PatternKind::Constant { value }, _ => PatternKind::Wild, } } @@ -404,8 +407,24 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, { debug!("all_constructors({:?})", pcx.ty); match pcx.ty.sty { - ty::TyBool => - [true, false].iter().map(|b| ConstantValue(ConstVal::Bool(*b))).collect(), + ty::TyBool => { + [true, false].iter().map(|&b| { + ConstantValue(cx.tcx.mk_const(ty::Const { + val: ConstVal::Bool(b), + ty: cx.tcx.types.bool + })) + }).collect() + } + ty::TyArray(ref sub_ty, len) if len.val.to_const_int().is_some() => { + let len = len.val.to_const_int().unwrap().to_u64().unwrap(); + if len != 0 && cx.is_uninhabited(sub_ty) { + vec![] + } else { + vec![Slice(len)] + } + } + // Treat arrays of a constant but unknown length like slices. + ty::TyArray(ref sub_ty, _) | ty::TySlice(ref sub_ty) => { if cx.is_uninhabited(sub_ty) { vec![Slice(0)] @@ -413,13 +432,6 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, (0..pcx.max_slice_length+1).map(|length| Slice(length)).collect() } } - ty::TyArray(ref sub_ty, length) => { - if length > 0 && cx.is_uninhabited(sub_ty) { - vec![] - } else { - vec![Slice(length)] - } - } ty::TyAdt(def, substs) if def.is_enum() && def.variants.len() != 1 => { def.variants.iter() .filter(|v| !cx.is_variant_uninhabited(v, substs)) @@ -438,7 +450,7 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, fn max_slice_length<'p, 'a: 'p, 'tcx: 'a, I>( _cx: &mut MatchCheckCtxt<'a, 'tcx>, - patterns: I) -> usize + patterns: I) -> u64 where I: Iterator> { // The exhaustiveness-checking paper does not include any details on @@ -511,16 +523,16 @@ fn max_slice_length<'p, 'a: 'p, 'tcx: 'a, I>( for row in patterns { match *row.kind { - PatternKind::Constant { value: ConstVal::ByteStr(ref data) } => { - max_fixed_len = cmp::max(max_fixed_len, data.len()); + PatternKind::Constant { value: &ty::Const { val: ConstVal::ByteStr(b), .. } } => { + max_fixed_len = cmp::max(max_fixed_len, b.data.len() as u64); } PatternKind::Slice { ref prefix, slice: None, ref suffix } => { - let fixed_len = prefix.len() + suffix.len(); + let fixed_len = prefix.len() as u64 + suffix.len() as u64; max_fixed_len = cmp::max(max_fixed_len, fixed_len); } PatternKind::Slice { ref prefix, slice: Some(_), ref suffix } => { - max_prefix_len = cmp::max(max_prefix_len, prefix.len()); - max_suffix_len = cmp::max(max_suffix_len, suffix.len()); + max_prefix_len = cmp::max(max_prefix_len, prefix.len() as u64); + max_suffix_len = cmp::max(max_suffix_len, suffix.len() as u64); } _ => {} } @@ -715,16 +727,18 @@ fn pat_constructors<'tcx>(_cx: &mut MatchCheckCtxt, Some(vec![Single]), PatternKind::Variant { adt_def, variant_index, .. } => Some(vec![Variant(adt_def.variants[variant_index].did)]), - PatternKind::Constant { ref value } => - Some(vec![ConstantValue(value.clone())]), - PatternKind::Range { ref lo, ref hi, ref end } => - Some(vec![ConstantRange(lo.clone(), hi.clone(), end.clone())]), + PatternKind::Constant { value } => + Some(vec![ConstantValue(value)]), + PatternKind::Range { lo, hi, end } => + Some(vec![ConstantRange(lo, hi, end)]), PatternKind::Array { .. } => match pcx.ty.sty { - ty::TyArray(_, length) => Some(vec![Slice(length)]), + ty::TyArray(_, length) => Some(vec![ + Slice(length.val.to_const_int().unwrap().to_u64().unwrap()) + ]), _ => span_bug!(pat.span, "bad ty {:?} for array pattern", pcx.ty) }, PatternKind::Slice { ref prefix, ref slice, ref suffix } => { - let pat_len = prefix.len() + suffix.len(); + let pat_len = prefix.len() as u64 + suffix.len() as u64; if slice.is_some() { Some((pat_len..pcx.max_slice_length+1).map(Slice).collect()) } else { @@ -739,10 +753,10 @@ fn pat_constructors<'tcx>(_cx: &mut MatchCheckCtxt, /// /// For instance, a tuple pattern (_, 42, Some([])) has the arity of 3. /// A struct pattern's arity is the number of fields it contains, etc. -fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> usize { +fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> u64 { debug!("constructor_arity({:?}, {:?})", ctor, ty); match ty.sty { - ty::TyTuple(ref fs, _) => fs.len(), + ty::TyTuple(ref fs, _) => fs.len() as u64, ty::TySlice(..) | ty::TyArray(..) => match *ctor { Slice(length) => length, ConstantValue(_) => 0, @@ -750,7 +764,7 @@ fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> usize }, ty::TyRef(..) => 1, ty::TyAdt(adt, _) => { - adt.variants[ctor.variant_index_for_adt(adt)].fields.len() + adt.variants[ctor.variant_index_for_adt(adt)].fields.len() as u64 } _ => 0 } @@ -768,7 +782,7 @@ fn constructor_sub_pattern_tys<'a, 'tcx: 'a>(cx: &MatchCheckCtxt<'a, 'tcx>, match ty.sty { ty::TyTuple(ref fs, _) => fs.into_iter().map(|t| *t).collect(), ty::TySlice(ty) | ty::TyArray(ty, _) => match *ctor { - Slice(length) => repeat(ty).take(length).collect(), + Slice(length) => (0..length).map(|_| ty).collect(), ConstantValue(_) => vec![], _ => bug!("bad slice pattern {:?} {:?}", ctor, ty) }, @@ -806,7 +820,7 @@ fn slice_pat_covered_by_constructor(_tcx: TyCtxt, _span: Span, suffix: &[Pattern]) -> Result { let data = match *ctor { - ConstantValue(ConstVal::ByteStr(ref data)) => data, + ConstantValue(&ty::Const { val: ConstVal::ByteStr(b), .. }) => b.data, _ => bug!() }; @@ -820,7 +834,7 @@ fn slice_pat_covered_by_constructor(_tcx: TyCtxt, _span: Span, data[data.len()-suffix.len()..].iter().zip(suffix)) { match pat.kind { - box PatternKind::Constant { ref value } => match *value { + box PatternKind::Constant { value } => match value.val { ConstVal::Integral(ConstInt::U8(u)) => { if u != *ch { return Ok(false); @@ -843,23 +857,23 @@ fn constructor_covered_by_range(tcx: TyCtxt, span: Span, let cmp_from = |c_from| Ok(compare_const_vals(tcx, span, c_from, from)? != Ordering::Less); let cmp_to = |c_to| compare_const_vals(tcx, span, c_to, to); match *ctor { - ConstantValue(ref value) => { - let to = cmp_to(value)?; + ConstantValue(value) => { + let to = cmp_to(&value.val)?; let end = (to == Ordering::Less) || (end == RangeEnd::Included && to == Ordering::Equal); - Ok(cmp_from(value)? && end) + Ok(cmp_from(&value.val)? && end) }, - ConstantRange(ref from, ref to, RangeEnd::Included) => { - let to = cmp_to(to)?; + ConstantRange(from, to, RangeEnd::Included) => { + let to = cmp_to(&to.val)?; let end = (to == Ordering::Less) || (end == RangeEnd::Included && to == Ordering::Equal); - Ok(cmp_from(from)? && end) + Ok(cmp_from(&from.val)? && end) }, - ConstantRange(ref from, ref to, RangeEnd::Excluded) => { - let to = cmp_to(to)?; + ConstantRange(from, to, RangeEnd::Excluded) => { + let to = cmp_to(&to.val)?; let end = (to == Ordering::Less) || (end == RangeEnd::Excluded && to == Ordering::Equal); - Ok(cmp_from(from)? && end) + Ok(cmp_from(&from.val)? && end) } Single => Ok(true), _ => bug!(), @@ -919,11 +933,11 @@ fn specialize<'p, 'a: 'p, 'tcx: 'a>( Some(vec![subpattern]) } - PatternKind::Constant { ref value } => { + PatternKind::Constant { value } => { match *constructor { - Slice(..) => match *value { - ConstVal::ByteStr(ref data) => { - if wild_patterns.len() == data.len() { + Slice(..) => match value.val { + ConstVal::ByteStr(b) => { + if wild_patterns.len() == b.data.len() { Some(cx.lower_byte_str_pattern(pat)) } else { None @@ -934,7 +948,7 @@ fn specialize<'p, 'a: 'p, 'tcx: 'a>( }, _ => { match constructor_covered_by_range( - cx.tcx, pat.span, constructor, value, value, RangeEnd::Included + cx.tcx, pat.span, constructor, &value.val, &value.val, RangeEnd::Included ) { Ok(true) => Some(vec![]), Ok(false) => None, @@ -944,9 +958,9 @@ fn specialize<'p, 'a: 'p, 'tcx: 'a>( } } - PatternKind::Range { ref lo, ref hi, ref end } => { + PatternKind::Range { lo, hi, ref end } => { match constructor_covered_by_range( - cx.tcx, pat.span, constructor, lo, hi, end.clone() + cx.tcx, pat.span, constructor, &lo.val, &hi.val, end.clone() ) { Ok(true) => Some(vec![]), Ok(false) => None, diff --git a/src/librustc_const_eval/check_match.rs b/src/librustc_const_eval/check_match.rs index 2bed1950af..0339969f2b 100644 --- a/src/librustc_const_eval/check_match.rs +++ b/src/librustc_const_eval/check_match.rs @@ -18,7 +18,7 @@ use rustc::middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor}; use rustc::middle::expr_use_visitor::{LoanCause, MutateMode}; use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization::{cmt}; -use rustc::middle::region::RegionMaps; +use rustc::middle::region; use rustc::session::Session; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::Substs; @@ -51,7 +51,7 @@ impl<'a, 'tcx> Visitor<'tcx> for OuterVisitor<'a, 'tcx> { MatchVisitor { tcx: self.tcx, tables: self.tcx.body_tables(b), - region_maps: &self.tcx.region_maps(def_id), + region_scope_tree: &self.tcx.region_scope_tree(def_id), param_env: self.tcx.param_env(def_id), identity_substs: Substs::identity_for_item(self.tcx, def_id), }.visit_body(self.tcx.hir.body(b)); @@ -72,7 +72,7 @@ struct MatchVisitor<'a, 'tcx: 'a> { tables: &'a ty::TypeckTables<'tcx>, param_env: ty::ParamEnv<'tcx>, identity_substs: &'tcx Substs<'tcx>, - region_maps: &'a RegionMaps, + region_scope_tree: &'a region::ScopeTree, } impl<'a, 'tcx> Visitor<'tcx> for MatchVisitor<'a, 'tcx> { @@ -140,7 +140,7 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { fn check_match( &self, scrut: &hir::Expr, - arms: &[hir::Arm], + arms: &'tcx [hir::Arm], source: hir::MatchSource) { for arm in arms { @@ -231,7 +231,7 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { } } - fn check_irrefutable(&self, pat: &Pat, origin: &str) { + fn check_irrefutable(&self, pat: &'tcx Pat, origin: &str) { let module = self.tcx.hir.get_module_parent(pat.id); MatchCheckCtxt::create_and_enter(self.tcx, module, |ref mut cx| { let mut patcx = PatternContext::new(self.tcx, @@ -526,7 +526,7 @@ fn check_for_mutation_in_guard(cx: &MatchVisitor, guard: &hir::Expr) { let mut checker = MutationChecker { cx, }; - ExprUseVisitor::new(&mut checker, cx.tcx, cx.param_env, cx.region_maps, cx.tables) + ExprUseVisitor::new(&mut checker, cx.tcx, cx.param_env, cx.region_scope_tree, cx.tables) .walk_expr(guard); } diff --git a/src/librustc_const_eval/diagnostics.rs b/src/librustc_const_eval/diagnostics.rs index 56d08184a0..d01b3c45f7 100644 --- a/src/librustc_const_eval/diagnostics.rs +++ b/src/librustc_const_eval/diagnostics.rs @@ -565,7 +565,7 @@ See also https://github.com/rust-lang/rust/issues/14587 register_diagnostics! { - E0298, // cannot compare constants +// E0298, // cannot compare constants // E0299, // mismatched types between arms // E0471, // constant evaluation error (in pattern) } diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs index c7def0b834..7520c6ac65 100644 --- a/src/librustc_const_eval/eval.rs +++ b/src/librustc_const_eval/eval.rs @@ -9,8 +9,9 @@ // except according to those terms. use rustc::middle::const_val::ConstVal::*; +use rustc::middle::const_val::ConstAggregate::*; use rustc::middle::const_val::ErrKind::*; -use rustc::middle::const_val::{ConstVal, ConstEvalErr, EvalResult, ErrKind}; +use rustc::middle::const_val::{ByteArray, ConstVal, ConstEvalErr, EvalResult, ErrKind}; use rustc::hir::map as hir_map; use rustc::hir::map::blocks::FnLikeNode; @@ -22,7 +23,7 @@ use rustc::ty::maps::Providers; use rustc::ty::util::IntTypeExt; use rustc::ty::subst::{Substs, Subst}; use rustc::util::common::ErrorReported; -use rustc::util::nodemap::DefIdMap; +use rustc::util::nodemap::NodeMap; use syntax::abi::Abi; use syntax::ast; @@ -33,7 +34,6 @@ use syntax_pos::Span; use std::cmp::Ordering; use rustc_const_math::*; - macro_rules! signal { ($e:expr, $exn:expr) => { return Err(ConstEvalErr { span: $e.span, kind: $exn }) @@ -88,7 +88,7 @@ pub struct ConstContext<'a, 'tcx: 'a> { tables: &'a ty::TypeckTables<'tcx>, param_env: ty::ParamEnv<'tcx>, substs: &'tcx Substs<'tcx>, - fn_args: Option>> + fn_args: Option>> } impl<'a, 'tcx> ConstContext<'a, 'tcx> { @@ -107,7 +107,7 @@ impl<'a, 'tcx> ConstContext<'a, 'tcx> { /// Evaluate a constant expression in a context where the expression isn't /// guaranteed to be evaluable. - pub fn eval(&self, e: &Expr) -> EvalResult<'tcx> { + pub fn eval(&self, e: &'tcx Expr) -> EvalResult<'tcx> { if self.tables.tainted_by_errors { signal!(e, TypeckError); } @@ -118,9 +118,10 @@ impl<'a, 'tcx> ConstContext<'a, 'tcx> { type CastResult<'tcx> = Result, ErrKind<'tcx>>; fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, - e: &Expr) -> EvalResult<'tcx> { + e: &'tcx Expr) -> EvalResult<'tcx> { let tcx = cx.tcx; - let ety = cx.tables.expr_ty(e).subst(tcx, cx.substs); + let ty = cx.tables.expr_ty(e).subst(tcx, cx.substs); + let mk_const = |val| tcx.mk_const(ty::Const { val, ty }); let result = match e.node { hir::ExprUnary(hir::UnNeg, ref inner) => { @@ -133,57 +134,66 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, const I32_OVERFLOW: u128 = i32::min_value() as u32 as u128; const I64_OVERFLOW: u128 = i64::min_value() as u64 as u128; const I128_OVERFLOW: u128 = i128::min_value() as u128; - match (&lit.node, &ety.sty) { + let negated = match (&lit.node, &ty.sty) { (&LitKind::Int(I8_OVERFLOW, _), &ty::TyInt(IntTy::I8)) | (&LitKind::Int(I8_OVERFLOW, Signed(IntTy::I8)), _) => { - return Ok(Integral(I8(i8::min_value()))) + Some(I8(i8::min_value())) }, (&LitKind::Int(I16_OVERFLOW, _), &ty::TyInt(IntTy::I16)) | (&LitKind::Int(I16_OVERFLOW, Signed(IntTy::I16)), _) => { - return Ok(Integral(I16(i16::min_value()))) + Some(I16(i16::min_value())) }, (&LitKind::Int(I32_OVERFLOW, _), &ty::TyInt(IntTy::I32)) | (&LitKind::Int(I32_OVERFLOW, Signed(IntTy::I32)), _) => { - return Ok(Integral(I32(i32::min_value()))) + Some(I32(i32::min_value())) }, (&LitKind::Int(I64_OVERFLOW, _), &ty::TyInt(IntTy::I64)) | (&LitKind::Int(I64_OVERFLOW, Signed(IntTy::I64)), _) => { - return Ok(Integral(I64(i64::min_value()))) + Some(I64(i64::min_value())) }, (&LitKind::Int(I128_OVERFLOW, _), &ty::TyInt(IntTy::I128)) | (&LitKind::Int(I128_OVERFLOW, Signed(IntTy::I128)), _) => { - return Ok(Integral(I128(i128::min_value()))) + Some(I128(i128::min_value())) }, (&LitKind::Int(n, _), &ty::TyInt(IntTy::Is)) | (&LitKind::Int(n, Signed(IntTy::Is)), _) => { - match tcx.sess.target.int_type { + match tcx.sess.target.isize_ty { IntTy::I16 => if n == I16_OVERFLOW { - return Ok(Integral(Isize(Is16(i16::min_value())))); + Some(Isize(Is16(i16::min_value()))) + } else { + None }, IntTy::I32 => if n == I32_OVERFLOW { - return Ok(Integral(Isize(Is32(i32::min_value())))); + Some(Isize(Is32(i32::min_value()))) + } else { + None }, IntTy::I64 => if n == I64_OVERFLOW { - return Ok(Integral(Isize(Is64(i64::min_value())))); + Some(Isize(Is64(i64::min_value()))) + } else { + None }, _ => span_bug!(e.span, "typeck error") } }, - _ => {}, + _ => None + }; + if let Some(i) = negated { + return Ok(mk_const(Integral(i))); } } - match cx.eval(inner)? { + mk_const(match cx.eval(inner)?.val { Float(f) => Float(-f), Integral(i) => Integral(math!(e, -i)), - const_val => signal!(e, NegateOn(const_val)), - } + _ => signal!(e, TypeckError) + }) } hir::ExprUnary(hir::UnNot, ref inner) => { - match cx.eval(inner)? { + mk_const(match cx.eval(inner)?.val { Integral(i) => Integral(math!(e, !i)), Bool(b) => Bool(!b), - const_val => signal!(e, NotOn(const_val)), - } + _ => signal!(e, TypeckError) + }) } hir::ExprUnary(hir::UnDeref, _) => signal!(e, UnimplementedConstVal("deref operation")), hir::ExprBinary(op, ref a, ref b) => { @@ -191,7 +201,7 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, // gives us a type through a type-suffix, cast or const def type // we need to re-eval the other value of the BinOp if it was // not inferred - match (cx.eval(a)?, cx.eval(b)?) { + mk_const(match (cx.eval(a)?.val, cx.eval(b)?.val) { (Float(a), Float(b)) => { use std::cmp::Ordering::*; match op.node { @@ -260,16 +270,16 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, } _ => signal!(e, MiscBinaryOp), - } + }) } hir::ExprCast(ref base, _) => { let base_val = cx.eval(base)?; let base_ty = cx.tables.expr_ty(base).subst(tcx, cx.substs); - if ety == base_ty { + if ty == base_ty { base_val } else { - match cast_const(tcx, base_val, ety) { - Ok(val) => val, + match cast_const(tcx, base_val.val, ty) { + Ok(val) => mk_const(val), Err(kind) => signal!(e, kind), } } @@ -291,52 +301,53 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, } }, Def::VariantCtor(variant_def, CtorKind::Const) => { - Variant(variant_def) + mk_const(Variant(variant_def)) } Def::VariantCtor(_, CtorKind::Fn) => { signal!(e, UnimplementedConstVal("enum variants")); } Def::StructCtor(_, CtorKind::Const) => { - ConstVal::Struct(Default::default()) + mk_const(Aggregate(Struct(&[]))) } Def::StructCtor(_, CtorKind::Fn) => { signal!(e, UnimplementedConstVal("tuple struct constructors")) } - Def::Local(def_id) => { - debug!("Def::Local({:?}): {:?}", def_id, cx.fn_args); - if let Some(val) = cx.fn_args.as_ref().and_then(|args| args.get(&def_id)) { - val.clone() + Def::Local(id) => { + debug!("Def::Local({:?}): {:?}", id, cx.fn_args); + if let Some(&val) = cx.fn_args.as_ref().and_then(|args| args.get(&id)) { + val } else { signal!(e, NonConstPath); } }, - Def::Method(id) | Def::Fn(id) => Function(id, substs), + Def::Method(id) | Def::Fn(id) => mk_const(Function(id, substs)), Def::Err => span_bug!(e.span, "typeck error"), _ => signal!(e, NonConstPath), } } hir::ExprCall(ref callee, ref args) => { - let (def_id, substs) = match cx.eval(callee)? { + let (def_id, substs) = match cx.eval(callee)?.val { Function(def_id, substs) => (def_id, substs), _ => signal!(e, TypeckError), }; if tcx.fn_sig(def_id).abi() == Abi::RustIntrinsic { let layout_of = |ty: Ty<'tcx>| { - ty.layout(tcx, cx.param_env).map_err(|err| { + let ty = tcx.erase_regions(&ty); + tcx.at(e.span).layout_raw(cx.param_env.reveal_all().and(ty)).map_err(|err| { ConstEvalErr { span: e.span, kind: LayoutError(err) } }) }; - match &tcx.item_name(def_id).as_str()[..] { + match &tcx.item_name(def_id)[..] { "size_of" => { - let size = layout_of(substs.type_at(0))?.size(tcx); - return Ok(Integral(Usize(ConstUsize::new(size.bytes(), - tcx.sess.target.uint_type).unwrap()))); + let size = layout_of(substs.type_at(0))?.size(tcx).bytes(); + return Ok(mk_const(Integral(Usize(ConstUsize::new(size, + tcx.sess.target.usize_ty).unwrap())))); } "min_align_of" => { - let align = layout_of(substs.type_at(0))?.align(tcx); - return Ok(Integral(Usize(ConstUsize::new(align.abi(), - tcx.sess.target.uint_type).unwrap()))); + let align = layout_of(substs.type_at(0))?.align(tcx).abi(); + return Ok(mk_const(Integral(Usize(ConstUsize::new(align, + tcx.sess.target.usize_ty).unwrap())))); } _ => signal!(e, TypeckError) } @@ -354,24 +365,24 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, } } else { if tcx.is_const_fn(def_id) { - tcx.sess.cstore.item_body(tcx, def_id) + tcx.extern_const_body(def_id).body } else { signal!(e, TypeckError) } }; - let arg_defs = body.arguments.iter().map(|arg| match arg.pat.node { - hir::PatKind::Binding(_, def_id, _, _) => Some(def_id), + let arg_ids = body.arguments.iter().map(|arg| match arg.pat.node { + hir::PatKind::Binding(_, canonical_id, _, _) => Some(canonical_id), _ => None }).collect::>(); - assert_eq!(arg_defs.len(), args.len()); + assert_eq!(arg_ids.len(), args.len()); - let mut call_args = DefIdMap(); - for (arg, arg_expr) in arg_defs.into_iter().zip(args.iter()) { + let mut call_args = NodeMap(); + for (arg, arg_expr) in arg_ids.into_iter().zip(args.iter()) { let arg_val = cx.eval(arg_expr)?; debug!("const call arg: {:?}", arg); - if let Some(def_id) = arg { - assert!(call_args.insert(def_id, arg_val).is_none()); + if let Some(id) = arg { + assert!(call_args.insert(id, arg_val).is_none()); } } debug!("const call({:?})", call_args); @@ -384,84 +395,83 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, }; callee_cx.eval(&body.value)? }, - hir::ExprLit(ref lit) => match lit_to_const(&lit.node, tcx, ety) { - Ok(val) => val, + hir::ExprLit(ref lit) => match lit_to_const(&lit.node, tcx, ty) { + Ok(val) => mk_const(val), Err(err) => signal!(e, err), }, hir::ExprBlock(ref block) => { match block.expr { Some(ref expr) => cx.eval(expr)?, - None => Tuple(vec![]), + None => mk_const(Aggregate(Tuple(&[]))), } } hir::ExprType(ref e, _) => cx.eval(e)?, hir::ExprTup(ref fields) => { - Tuple(fields.iter().map(|e| cx.eval(e)).collect::>()?) + let values = fields.iter().map(|e| cx.eval(e)).collect::, _>>()?; + mk_const(Aggregate(Tuple(tcx.alloc_const_slice(&values)))) } hir::ExprStruct(_, ref fields, _) => { - Struct(fields.iter().map(|f| { + mk_const(Aggregate(Struct(tcx.alloc_name_const_slice(&fields.iter().map(|f| { cx.eval(&f.expr).map(|v| (f.name.node, v)) - }).collect::>()?) + }).collect::, _>>()?)))) } hir::ExprIndex(ref arr, ref idx) => { if !tcx.sess.features.borrow().const_indexing { signal!(e, IndexOpFeatureGated); } let arr = cx.eval(arr)?; - let idx = match cx.eval(idx)? { - Integral(Usize(i)) => i.as_u64(tcx.sess.target.uint_type), + let idx = match cx.eval(idx)?.val { + Integral(Usize(i)) => i.as_u64(), _ => signal!(idx, IndexNotUsize), }; assert_eq!(idx as usize as u64, idx); - match arr { - Array(ref v) => { - if let Some(elem) = v.get(idx as usize) { - elem.clone() + match arr.val { + Aggregate(Array(v)) => { + if let Some(&elem) = v.get(idx as usize) { + elem } else { let n = v.len() as u64; - assert_eq!(n as usize as u64, n); signal!(e, IndexOutOfBounds { len: n, index: idx }) } } - Repeat(.., n) if idx >= n => { + Aggregate(Repeat(.., n)) if idx >= n => { signal!(e, IndexOutOfBounds { len: n, index: idx }) } - Repeat(ref elem, _) => (**elem).clone(), + Aggregate(Repeat(elem, _)) => elem, - ByteStr(ref data) if idx >= data.len() as u64 => { - signal!(e, IndexOutOfBounds { len: data.len() as u64, index: idx }) + ByteStr(b) if idx >= b.data.len() as u64 => { + signal!(e, IndexOutOfBounds { len: b.data.len() as u64, index: idx }) } - ByteStr(data) => { - Integral(U8(data[idx as usize])) + ByteStr(b) => { + mk_const(Integral(U8(b.data[idx as usize]))) }, _ => signal!(e, IndexedNonVec), } } hir::ExprArray(ref v) => { - Array(v.iter().map(|e| cx.eval(e)).collect::>()?) + let values = v.iter().map(|e| cx.eval(e)).collect::, _>>()?; + mk_const(Aggregate(Array(tcx.alloc_const_slice(&values)))) } hir::ExprRepeat(ref elem, _) => { - let n = match ety.sty { - ty::TyArray(_, n) => n as u64, + let n = match ty.sty { + ty::TyArray(_, n) => n.val.to_const_int().unwrap().to_u64().unwrap(), _ => span_bug!(e.span, "typeck error") }; - Repeat(Box::new(cx.eval(elem)?), n) + mk_const(Aggregate(Repeat(cx.eval(elem)?, n))) }, hir::ExprTupField(ref base, index) => { - let c = cx.eval(base)?; - if let Tuple(ref fields) = c { - fields[index.node].clone() + if let Aggregate(Tuple(fields)) = cx.eval(base)?.val { + fields[index.node] } else { signal!(base, ExpectedConstTuple); } } hir::ExprField(ref base, field_name) => { - let c = cx.eval(base)?; - if let Struct(ref fields) = c { - if let Some(f) = fields.get(&field_name.node) { - f.clone() + if let Aggregate(Struct(fields)) = cx.eval(base)?.val { + if let Some(&(_, f)) = fields.iter().find(|&&(name, _)| name == field_name.node) { + f } else { signal!(e, MissingStructField); } @@ -551,7 +561,7 @@ fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty::TyInt(ast::IntTy::I64) => Ok(Integral(I64(v as i128 as i64))), ty::TyInt(ast::IntTy::I128) => Ok(Integral(I128(v as i128))), ty::TyInt(ast::IntTy::Is) => { - Ok(Integral(Isize(ConstIsize::new_truncating(v as i128, tcx.sess.target.int_type)))) + Ok(Integral(Isize(ConstIsize::new_truncating(v as i128, tcx.sess.target.isize_ty)))) }, ty::TyUint(ast::UintTy::U8) => Ok(Integral(U8(v as u8))), ty::TyUint(ast::UintTy::U16) => Ok(Integral(U16(v as u16))), @@ -559,7 +569,7 @@ fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty::TyUint(ast::UintTy::U64) => Ok(Integral(U64(v as u64))), ty::TyUint(ast::UintTy::U128) => Ok(Integral(U128(v as u128))), ty::TyUint(ast::UintTy::Us) => { - Ok(Integral(Usize(ConstUsize::new_truncating(v, tcx.sess.target.uint_type)))) + Ok(Integral(Usize(ConstUsize::new_truncating(v, tcx.sess.target.usize_ty)))) }, ty::TyFloat(fty) => { if let Some(i) = val.to_u128() { @@ -625,7 +635,14 @@ fn cast_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, Err(ErrKind::UnimplementedConstVal("casting a bytestr to a raw ptr")) }, ty::TyRef(_, ty::TypeAndMut { ref ty, mutbl: hir::MutImmutable }) => match ty.sty { - ty::TyArray(ty, n) if ty == tcx.types.u8 && n == b.len() => Ok(ByteStr(b)), + ty::TyArray(ty, n) => { + let n = n.val.to_const_int().unwrap().to_u64().unwrap(); + if ty == tcx.types.u8 && n == b.data.len() as u64 { + Ok(val) + } else { + Err(CannotCast) + } + } ty::TySlice(_) => { Err(ErrKind::UnimplementedConstVal("casting a bytestr to slice")) }, @@ -645,7 +662,7 @@ fn cast_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind, +fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, tcx: TyCtxt<'a, 'tcx, 'tcx>, mut ty: Ty<'tcx>) -> Result, ErrKind<'tcx>> { @@ -660,19 +677,19 @@ fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind, match *lit { LitKind::Str(ref s, _) => Ok(Str(s.as_str())), - LitKind::ByteStr(ref data) => Ok(ByteStr(data.clone())), + LitKind::ByteStr(ref data) => Ok(ByteStr(ByteArray { data })), LitKind::Byte(n) => Ok(Integral(U8(n))), LitKind::Int(n, hint) => { match (&ty.sty, hint) { (&ty::TyInt(ity), _) | (_, Signed(ity)) => { Ok(Integral(ConstInt::new_signed_truncating(n as i128, - ity, tcx.sess.target.int_type))) + ity, tcx.sess.target.isize_ty))) } (&ty::TyUint(uty), _) | (_, Unsigned(uty)) => { Ok(Integral(ConstInt::new_unsigned_truncating(n as u128, - uty, tcx.sess.target.uint_type))) + uty, tcx.sess.target.usize_ty))) } _ => bug!() } @@ -708,8 +725,8 @@ pub fn compare_const_vals(tcx: TyCtxt, span: Span, a: &ConstVal, b: &ConstVal) (&Float(a), &Float(b)) => a.try_cmp(b).ok(), (&Str(ref a), &Str(ref b)) => Some(a.cmp(b)), (&Bool(a), &Bool(b)) => Some(a.cmp(&b)), - (&ByteStr(ref a), &ByteStr(ref b)) => Some(a.cmp(b)), - (&Char(a), &Char(ref b)) => Some(a.cmp(b)), + (&ByteStr(a), &ByteStr(b)) => Some(a.data.cmp(b.data)), + (&Char(a), &Char(b)) => Some(a.cmp(&b)), _ => None, }; @@ -717,10 +734,8 @@ pub fn compare_const_vals(tcx: TyCtxt, span: Span, a: &ConstVal, b: &ConstVal) Some(result) => Ok(result), None => { // FIXME: can this ever be reached? - span_err!(tcx.sess, span, E0298, - "type mismatch comparing {} and {}", - a.description(), - b.description()); + tcx.sess.delay_span_bug(span, + &format!("type mismatch comparing {:?} and {:?}", a, b)); Err(ErrorReported) } } @@ -729,8 +744,8 @@ pub fn compare_const_vals(tcx: TyCtxt, span: Span, a: &ConstVal, b: &ConstVal) impl<'a, 'tcx> ConstContext<'a, 'tcx> { pub fn compare_lit_exprs(&self, span: Span, - a: &Expr, - b: &Expr) -> Result { + a: &'tcx Expr, + b: &'tcx Expr) -> Result { let tcx = self.tcx; let a = match self.eval(a) { Ok(a) => a, @@ -746,7 +761,7 @@ impl<'a, 'tcx> ConstContext<'a, 'tcx> { return Err(ErrorReported); } }; - compare_const_vals(tcx, span, &a, &b) + compare_const_vals(tcx, span, &a.val, &b.val) } } @@ -774,7 +789,7 @@ fn const_eval<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx.mir_const_qualif(def_id); tcx.hir.body(tcx.hir.body_owned_by(id)) } else { - tcx.sess.cstore.item_body(tcx, def_id) + tcx.extern_const_body(def_id).body }; ConstContext::new(tcx, key.param_env.and(substs), tables).eval(&body.value) } diff --git a/src/librustc_const_eval/lib.rs b/src/librustc_const_eval/lib.rs index 9fedee80d4..0c3606cab1 100644 --- a/src/librustc_const_eval/lib.rs +++ b/src/librustc_const_eval/lib.rs @@ -23,9 +23,11 @@ #![feature(slice_patterns)] #![feature(box_patterns)] #![feature(box_syntax)] -#![feature(const_fn)] #![feature(i128_type)] +#![cfg_attr(stage0, feature(const_fn))] +#![cfg_attr(not(stage0), feature(const_min_value))] + extern crate arena; #[macro_use] extern crate syntax; #[macro_use] extern crate log; diff --git a/src/librustc_const_eval/pattern.rs b/src/librustc_const_eval/pattern.rs index ba79f775ef..d7a16e9d2f 100644 --- a/src/librustc_const_eval/pattern.rs +++ b/src/librustc_const_eval/pattern.rs @@ -83,12 +83,12 @@ pub enum PatternKind<'tcx> { }, Constant { - value: ConstVal<'tcx>, + value: &'tcx ty::Const<'tcx>, }, Range { - lo: ConstVal<'tcx>, - hi: ConstVal<'tcx>, + lo: &'tcx ty::Const<'tcx>, + hi: &'tcx ty::Const<'tcx>, end: RangeEnd, }, @@ -112,15 +112,13 @@ fn print_const_val(value: &ConstVal, f: &mut fmt::Formatter) -> fmt::Result { ConstVal::Float(ref x) => write!(f, "{}", x), ConstVal::Integral(ref i) => write!(f, "{}", i), ConstVal::Str(ref s) => write!(f, "{:?}", &s[..]), - ConstVal::ByteStr(ref b) => write!(f, "{:?}", &b[..]), + ConstVal::ByteStr(b) => write!(f, "{:?}", b.data), ConstVal::Bool(b) => write!(f, "{:?}", b), ConstVal::Char(c) => write!(f, "{:?}", c), ConstVal::Variant(_) | - ConstVal::Struct(_) | - ConstVal::Tuple(_) | ConstVal::Function(..) | - ConstVal::Array(..) | - ConstVal::Repeat(..) => bug!("{:?} not printable in a pattern", value) + ConstVal::Aggregate(_) | + ConstVal::Unevaluated(..) => bug!("{:?} not printable in a pattern", value) } } @@ -230,16 +228,16 @@ impl<'tcx> fmt::Display for Pattern<'tcx> { } write!(f, "{}", subpattern) } - PatternKind::Constant { ref value } => { - print_const_val(value, f) + PatternKind::Constant { value } => { + print_const_val(&value.val, f) } - PatternKind::Range { ref lo, ref hi, ref end } => { - print_const_val(lo, f)?; - match *end { + PatternKind::Range { lo, hi, end } => { + print_const_val(&lo.val, f)?; + match end { RangeEnd::Included => write!(f, "...")?, RangeEnd::Excluded => write!(f, "..")?, } - print_const_val(hi, f) + print_const_val(&hi.val, f) } PatternKind::Slice { ref prefix, ref slice, ref suffix } | PatternKind::Array { ref prefix, ref slice, ref suffix } => { @@ -278,7 +276,7 @@ impl<'a, 'tcx> Pattern<'tcx> { pub fn from_hir(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env_and_substs: ty::ParamEnvAnd<'tcx, &'tcx Substs<'tcx>>, tables: &'a ty::TypeckTables<'tcx>, - pat: &hir::Pat) -> Self { + pat: &'tcx hir::Pat) -> Self { let mut pcx = PatternContext::new(tcx, param_env_and_substs, tables); let result = pcx.lower_pattern(pat); if !pcx.errors.is_empty() { @@ -302,7 +300,45 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } } - pub fn lower_pattern(&mut self, pat: &hir::Pat) -> Pattern<'tcx> { + pub fn lower_pattern(&mut self, pat: &'tcx hir::Pat) -> Pattern<'tcx> { + // When implicit dereferences have been inserted in this pattern, the unadjusted lowered + // pattern has the type that results *after* dereferencing. For example, in this code: + // + // ``` + // match &&Some(0i32) { + // Some(n) => { ... }, + // _ => { ... }, + // } + // ``` + // + // the type assigned to `Some(n)` in `unadjusted_pat` would be `Option` (this is + // determined in rustc_typeck::check::match). The adjustments would be + // + // `vec![&&Option, &Option]`. + // + // Applying the adjustments, we want to instead output `&&Some(n)` (as a HAIR pattern). So + // we wrap the unadjusted pattern in `PatternKind::Deref` repeatedly, consuming the + // adjustments in *reverse order* (last-in-first-out, so that the last `Deref` inserted + // gets the least-dereferenced type). + let unadjusted_pat = self.lower_pattern_unadjusted(pat); + self.tables + .pat_adjustments() + .get(pat.hir_id) + .unwrap_or(&vec![]) + .iter() + .rev() + .fold(unadjusted_pat, |pat, ref_ty| { + debug!("{:?}: wrapping pattern with type {:?}", pat, ref_ty); + Pattern { + span: pat.span, + ty: ref_ty, + kind: Box::new(PatternKind::Deref { subpattern: pat }), + } + }, + ) + } + + fn lower_pattern_unadjusted(&mut self, pat: &'tcx hir::Pat) -> Pattern<'tcx> { let mut ty = self.tables.node_id_to_type(pat.hir_id); let kind = match pat.node { @@ -310,11 +346,11 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { PatKind::Lit(ref value) => self.lower_lit(value), - PatKind::Range(ref lo, ref hi, ref end) => { + PatKind::Range(ref lo, ref hi, end) => { match (self.lower_lit(lo), self.lower_lit(hi)) { (PatternKind::Constant { value: lo }, PatternKind::Constant { value: hi }) => { - PatternKind::Range { lo: lo, hi: hi, end: end.clone() } + PatternKind::Range { lo, hi, end } } _ => PatternKind::Wild } @@ -374,8 +410,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } } - PatKind::Binding(_, def_id, ref ident, ref sub) => { - let id = self.tcx.hir.as_local_node_id(def_id).unwrap(); + PatKind::Binding(_, id, ref ident, ref sub) => { let var_ty = self.tables.node_id_to_type(pat.hir_id); let region = match var_ty.sty { ty::TyRef(r, _) => Some(r), @@ -475,11 +510,11 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } } - fn lower_patterns(&mut self, pats: &[P]) -> Vec> { + fn lower_patterns(&mut self, pats: &'tcx [P]) -> Vec> { pats.iter().map(|p| self.lower_pattern(p)).collect() } - fn lower_opt_pattern(&mut self, pat: &Option>) -> Option> + fn lower_opt_pattern(&mut self, pat: &'tcx Option>) -> Option> { pat.as_ref().map(|p| self.lower_pattern(p)) } @@ -522,9 +557,9 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { &mut self, span: Span, ty: Ty<'tcx>, - prefix: &[P], - slice: &Option>, - suffix: &[P]) + prefix: &'tcx [P], + slice: &'tcx Option>, + suffix: &'tcx [P]) -> PatternKind<'tcx> { let prefix = self.lower_patterns(prefix); @@ -541,7 +576,8 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ty::TyArray(_, len) => { // fixed-length array - assert!(len >= prefix.len() + suffix.len()); + let len = len.val.to_const_int().unwrap().to_u64().unwrap(); + assert!(len >= prefix.len() as u64 + suffix.len() as u64); PatternKind::Array { prefix: prefix, slice: slice, suffix: suffix } } @@ -609,7 +645,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { let body = if let Some(id) = self.tcx.hir.as_local_node_id(def_id) { self.tcx.hir.body(self.tcx.hir.body_owned_by(id)) } else { - self.tcx.sess.cstore.item_body(self.tcx, def_id) + self.tcx.extern_const_body(def_id).body }; let pat = self.lower_const_expr(&body.value, pat_id, span); self.tables = old_tables; @@ -632,17 +668,17 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } } - fn lower_lit(&mut self, expr: &hir::Expr) -> PatternKind<'tcx> { + fn lower_lit(&mut self, expr: &'tcx hir::Expr) -> PatternKind<'tcx> { let const_cx = eval::ConstContext::new(self.tcx, self.param_env.and(self.substs), self.tables); match const_cx.eval(expr) { Ok(value) => { - if let ConstVal::Variant(def_id) = value { + if let ConstVal::Variant(def_id) = value.val { let ty = self.tables.expr_ty(expr); self.lower_variant_or_leaf(Def::Variant(def_id), ty, vec![]) } else { - PatternKind::Constant { value: value } + PatternKind::Constant { value } } } Err(e) => { @@ -653,7 +689,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } fn lower_const_expr(&mut self, - expr: &hir::Expr, + expr: &'tcx hir::Expr, pat_id: ast::NodeId, span: Span) -> Pattern<'tcx> { @@ -820,8 +856,8 @@ macro_rules! CloneImpls { } CloneImpls!{ <'tcx> - Span, Field, Mutability, ast::Name, ast::NodeId, usize, ConstVal<'tcx>, Region<'tcx>, - Ty<'tcx>, BindingMode<'tcx>, &'tcx AdtDef, + Span, Field, Mutability, ast::Name, ast::NodeId, usize, &'tcx ty::Const<'tcx>, + Region<'tcx>, Ty<'tcx>, BindingMode<'tcx>, &'tcx AdtDef, &'tcx Substs<'tcx>, &'tcx Kind<'tcx> } @@ -893,18 +929,18 @@ impl<'tcx> PatternFoldable<'tcx> for PatternKind<'tcx> { subpattern: subpattern.fold_with(folder), }, PatternKind::Constant { - ref value + value } => PatternKind::Constant { value: value.fold_with(folder) }, PatternKind::Range { - ref lo, - ref hi, - ref end, + lo, + hi, + end, } => PatternKind::Range { lo: lo.fold_with(folder), hi: hi.fold_with(folder), - end: end.clone(), + end, }, PatternKind::Slice { ref prefix, diff --git a/src/librustc_const_math/int.rs b/src/librustc_const_math/int.rs index 65471416e8..08473d729e 100644 --- a/src/librustc_const_math/int.rs +++ b/src/librustc_const_math/int.rs @@ -311,17 +311,13 @@ impl ::std::fmt::Display for ConstInt { I32(i) => write!(fmt, "{}i32", i), I64(i) => write!(fmt, "{}i64", i), I128(i) => write!(fmt, "{}i128", i), - Isize(ConstIsize::Is64(i)) => write!(fmt, "{}isize", i), - Isize(ConstIsize::Is32(i)) => write!(fmt, "{}isize", i), - Isize(ConstIsize::Is16(i)) => write!(fmt, "{}isize", i), + Isize(i) => write!(fmt, "{}isize", i), U8(i) => write!(fmt, "{}u8", i), U16(i) => write!(fmt, "{}u16", i), U32(i) => write!(fmt, "{}u32", i), U64(i) => write!(fmt, "{}u64", i), U128(i) => write!(fmt, "{}u128", i), - Usize(ConstUsize::Us64(i)) => write!(fmt, "{}usize", i), - Usize(ConstUsize::Us32(i)) => write!(fmt, "{}usize", i), - Usize(ConstUsize::Us16(i)) => write!(fmt, "{}usize", i), + Usize(i) => write!(fmt, "{}usize", i), } } } diff --git a/src/librustc_const_math/is.rs b/src/librustc_const_math/is.rs index 8f28ba14c6..50dfb60112 100644 --- a/src/librustc_const_math/is.rs +++ b/src/librustc_const_math/is.rs @@ -21,18 +21,22 @@ pub enum ConstIsize { } pub use self::ConstIsize::*; +impl ::std::fmt::Display for ConstIsize { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + write!(fmt, "{}", self.as_i64()) + } +} + impl ConstIsize { - pub fn as_i64(self, target_int_ty: ast::IntTy) -> i64 { - match (self, target_int_ty) { - (Is16(i), ast::IntTy::I16) => i as i64, - (Is32(i), ast::IntTy::I32) => i as i64, - (Is64(i), ast::IntTy::I64) => i, - _ => panic!("unable to convert self ({:?}) to target isize ({:?})", - self, target_int_ty), + pub fn as_i64(self) -> i64 { + match self { + Is16(i) => i as i64, + Is32(i) => i as i64, + Is64(i) => i, } } - pub fn new(i: i64, target_int_ty: ast::IntTy) -> Result { - match target_int_ty { + pub fn new(i: i64, isize_ty: ast::IntTy) -> Result { + match isize_ty { ast::IntTy::I16 if i as i16 as i64 == i => Ok(Is16(i as i16)), ast::IntTy::I16 => Err(LitOutOfRange(ast::IntTy::Is)), ast::IntTy::I32 if i as i32 as i64 == i => Ok(Is32(i as i32)), @@ -41,8 +45,8 @@ impl ConstIsize { _ => unreachable!(), } } - pub fn new_truncating(i: i128, target_int_ty: ast::IntTy) -> Self { - match target_int_ty { + pub fn new_truncating(i: i128, isize_ty: ast::IntTy) -> Self { + match isize_ty { ast::IntTy::I16 => Is16(i as i16), ast::IntTy::I32 => Is32(i as i32), ast::IntTy::I64 => Is64(i as i64), diff --git a/src/librustc_const_math/lib.rs b/src/librustc_const_math/lib.rs index 93b70ef8e4..0533f10104 100644 --- a/src/librustc_const_math/lib.rs +++ b/src/librustc_const_math/lib.rs @@ -19,10 +19,13 @@ html_root_url = "https://doc.rust-lang.org/nightly/")] #![deny(warnings)] -#![feature(const_fn)] #![feature(i128)] #![feature(i128_type)] +#![cfg_attr(stage0, feature(const_fn))] +#![cfg_attr(not(stage0), feature(const_min_value))] +#![cfg_attr(not(stage0), feature(const_max_value))] + extern crate rustc_apfloat; extern crate syntax; diff --git a/src/librustc_const_math/us.rs b/src/librustc_const_math/us.rs index 76443f584a..9876bc4d77 100644 --- a/src/librustc_const_math/us.rs +++ b/src/librustc_const_math/us.rs @@ -21,18 +21,22 @@ pub enum ConstUsize { } pub use self::ConstUsize::*; +impl ::std::fmt::Display for ConstUsize { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + write!(fmt, "{}", self.as_u64()) + } +} + impl ConstUsize { - pub fn as_u64(self, target_uint_ty: ast::UintTy) -> u64 { - match (self, target_uint_ty) { - (Us16(i), ast::UintTy::U16) => i as u64, - (Us32(i), ast::UintTy::U32) => i as u64, - (Us64(i), ast::UintTy::U64) => i, - _ => panic!("unable to convert self ({:?}) to target usize ({:?})", - self, target_uint_ty), + pub fn as_u64(self) -> u64 { + match self { + Us16(i) => i as u64, + Us32(i) => i as u64, + Us64(i) => i, } } - pub fn new(i: u64, target_uint_ty: ast::UintTy) -> Result { - match target_uint_ty { + pub fn new(i: u64, usize_ty: ast::UintTy) -> Result { + match usize_ty { ast::UintTy::U16 if i as u16 as u64 == i => Ok(Us16(i as u16)), ast::UintTy::U16 => Err(ULitOutOfRange(ast::UintTy::Us)), ast::UintTy::U32 if i as u32 as u64 == i => Ok(Us32(i as u32)), @@ -41,8 +45,8 @@ impl ConstUsize { _ => unreachable!(), } } - pub fn new_truncating(i: u128, target_uint_ty: ast::UintTy) -> Self { - match target_uint_ty { + pub fn new_truncating(i: u128, usize_ty: ast::UintTy) -> Self { + match usize_ty { ast::UintTy::U16 => Us16(i as u16), ast::UintTy::U32 => Us32(i as u32), ast::UintTy::U64 => Us64(i as u64), diff --git a/src/librustc_cratesio_shim/Cargo.toml b/src/librustc_cratesio_shim/Cargo.toml new file mode 100644 index 0000000000..143f88e8f4 --- /dev/null +++ b/src/librustc_cratesio_shim/Cargo.toml @@ -0,0 +1,23 @@ +# This crate exists to allow rustc to link certain crates from crates.io into +# the distribution. This doesn't work normally because: +# +# - Cargo always builds dependencies as rlibs: +# https://github.com/rust-lang/cargo/issues/629 +# - rustc wants to avoid multiple definitions of the same symbol, so it refuses +# to link multiple dylibs containing the same rlib +# - multiple dylibs depend on the same crates.io crates +# +# This solution works by including all the conflicting rlibs in a single dylib, +# which is then linked into all dylibs that depend on these crates.io crates. +# The result is that each rlib only appears once, and things work! + +[package] +authors = ["The Rust Project Developers"] +name = "rustc_cratesio_shim" +version = "0.0.0" + +[lib] +crate-type = ["dylib"] + +[dependencies] +bitflags = "1.0" diff --git a/src/librustc_cratesio_shim/src/lib.rs b/src/librustc_cratesio_shim/src/lib.rs new file mode 100644 index 0000000000..769b4f5720 --- /dev/null +++ b/src/librustc_cratesio_shim/src/lib.rs @@ -0,0 +1,14 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// See Cargo.toml for a comment explaining this crate. +#![allow(unused_extern_crates)] + +extern crate bitflags; diff --git a/src/librustc_data_structures/bitslice.rs b/src/librustc_data_structures/bitslice.rs index f74af6ee16..7665bfd5b1 100644 --- a/src/librustc_data_structures/bitslice.rs +++ b/src/librustc_data_structures/bitslice.rs @@ -132,6 +132,11 @@ pub trait BitwiseOperator { fn join(&self, pred1: usize, pred2: usize) -> usize; } +pub struct Intersect; +impl BitwiseOperator for Intersect { + #[inline] + fn join(&self, a: usize, b: usize) -> usize { a & b } +} pub struct Union; impl BitwiseOperator for Union { #[inline] diff --git a/src/librustc_data_structures/bitvec.rs b/src/librustc_data_structures/bitvec.rs index 7331016c2d..e8f9a67208 100644 --- a/src/librustc_data_structures/bitvec.rs +++ b/src/librustc_data_structures/bitvec.rs @@ -138,7 +138,7 @@ impl FromIterator for BitVector { /// A "bit matrix" is basically a matrix of booleans represented as /// one gigantic bitvector. In other words, it is as if you have /// `rows` bitvectors, each of length `columns`. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct BitMatrix { columns: usize, vector: Vec, diff --git a/src/librustc_data_structures/graph/mod.rs b/src/librustc_data_structures/graph/mod.rs index a5f83ce05f..474622f366 100644 --- a/src/librustc_data_structures/graph/mod.rs +++ b/src/librustc_data_structures/graph/mod.rs @@ -114,6 +114,13 @@ impl Graph { } } + pub fn with_capacity(nodes: usize, edges: usize) -> Graph { + Graph { + nodes: SnapshotVec::with_capacity(nodes), + edges: SnapshotVec::with_capacity(edges), + } + } + // # Simple accessors #[inline] diff --git a/src/librustc_data_structures/indexed_set.rs b/src/librustc_data_structures/indexed_set.rs index 9cb6806e9a..c790463e47 100644 --- a/src/librustc_data_structures/indexed_set.rs +++ b/src/librustc_data_structures/indexed_set.rs @@ -9,11 +9,13 @@ // except according to those terms. use std::fmt; +use std::iter; use std::marker::PhantomData; use std::mem; use std::ops::{Deref, DerefMut, Range}; +use std::slice; use bitslice::{BitSlice, Word}; -use bitslice::{bitwise, Union, Subtract}; +use bitslice::{bitwise, Union, Subtract, Intersect}; use indexed_vec::Idx; /// Represents a set (or packed family of sets), of some element type @@ -21,6 +23,7 @@ use indexed_vec::Idx; /// /// In other words, `T` is the type used to index into the bitvector /// this type uses to represent the set of object it holds. +#[derive(Eq, PartialEq)] pub struct IdxSetBuf { _pd: PhantomData, bits: Vec, @@ -109,6 +112,13 @@ impl IdxSet { } } + /// Removes all elements + pub fn clear(&mut self) { + for b in &mut self.bits { + *b = 0; + } + } + /// Removes `elem` from the set `self`; returns true iff this changed `self`. pub fn remove(&mut self, elem: &T) -> bool { self.bits.clear_bit(elem.index()) @@ -154,6 +164,18 @@ impl IdxSet { bitwise(self.words_mut(), other.words(), &Subtract) } + pub fn intersect(&mut self, other: &IdxSet) -> bool { + bitwise(self.words_mut(), other.words(), &Intersect) + } + + pub fn iter(&self) -> Iter { + Iter { + cur: None, + iter: self.words().iter().enumerate(), + _pd: PhantomData, + } + } + /// Calls `f` on each index value held in this set, up to the /// bound `max_bits` on the size of universe of indexes. pub fn each_bit(&self, max_bits: usize, f: F) where F: FnMut(T) { @@ -218,3 +240,32 @@ fn each_bit(words: &IdxSet, max_bits: usize, mut f: F) where F: Fn } } } + +pub struct Iter<'a, T: Idx> { + cur: Option<(Word, usize)>, + iter: iter::Enumerate>, + _pd: PhantomData, +} + +impl<'a, T: Idx> Iterator for Iter<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + let word_bits = mem::size_of::() * 8; + loop { + if let Some((ref mut word, offset)) = self.cur { + let bit_pos = word.trailing_zeros() as usize; + if bit_pos != word_bits { + let bit = 1 << bit_pos; + *word ^= bit; + return Some(T::new(bit_pos + offset)) + } + } + + match self.iter.next() { + Some((i, word)) => self.cur = Some((*word, word_bits * i)), + None => return None, + } + } + } +} diff --git a/src/librustc_data_structures/indexed_vec.rs b/src/librustc_data_structures/indexed_vec.rs index bc9bfa4049..4b7f55eba0 100644 --- a/src/librustc_data_structures/indexed_vec.rs +++ b/src/librustc_data_structures/indexed_vec.rs @@ -38,7 +38,44 @@ impl Idx for u32 { fn index(self) -> usize { self as usize } } -#[derive(Clone)] +#[macro_export] +macro_rules! newtype_index { + ($name:ident) => ( + newtype_index!($name, unsafe { ::std::intrinsics::type_name::<$name>() }); + ); + + ($name:ident, $debug_name:expr) => ( + #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, + RustcEncodable, RustcDecodable)] + pub struct $name(u32); + + impl $name { + // HACK use for constants + #[allow(unused)] + const fn const_new(x: u32) -> Self { + $name(x) + } + } + + impl Idx for $name { + fn new(value: usize) -> Self { + assert!(value < (::std::u32::MAX) as usize); + $name(value as u32) + } + fn index(self) -> usize { + self.0 as usize + } + } + + impl ::std::fmt::Debug for $name { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(fmt, "{}{}", $debug_name, self.0) + } + } + ) +} + +#[derive(Clone, PartialEq, Eq)] pub struct IndexVec { pub raw: Vec, _marker: PhantomData diff --git a/src/librustc_data_structures/snapshot_vec.rs b/src/librustc_data_structures/snapshot_vec.rs index dac074ab91..2da9191828 100644 --- a/src/librustc_data_structures/snapshot_vec.rs +++ b/src/librustc_data_structures/snapshot_vec.rs @@ -66,6 +66,13 @@ impl SnapshotVec { } } + pub fn with_capacity(n: usize) -> SnapshotVec { + SnapshotVec { + values: Vec::with_capacity(n), + undo_log: Vec::new(), + } + } + fn in_snapshot(&self) -> bool { !self.undo_log.is_empty() } diff --git a/src/librustc_data_structures/stable_hasher.rs b/src/librustc_data_structures/stable_hasher.rs index 6801aa455e..9aba48c5be 100644 --- a/src/librustc_data_structures/stable_hasher.rs +++ b/src/librustc_data_structures/stable_hasher.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::hash::{Hash, Hasher}; +use std::hash::{Hash, Hasher, BuildHasher}; use std::marker::PhantomData; use std::mem; use blake2b::Blake2bHasher; @@ -192,6 +192,28 @@ impl Hasher for StableHasher { } +/// Something that can provide a stable hashing context. +pub trait StableHashingContextProvider { + type ContextType; + fn create_stable_hashing_context(&self) -> Self::ContextType; +} + +impl<'a, T: StableHashingContextProvider> StableHashingContextProvider for &'a T { + type ContextType = T::ContextType; + + fn create_stable_hashing_context(&self) -> Self::ContextType { + (**self).create_stable_hashing_context() + } +} + +impl<'a, T: StableHashingContextProvider> StableHashingContextProvider for &'a mut T { + type ContextType = T::ContextType; + + fn create_stable_hashing_context(&self) -> Self::ContextType { + (**self).create_stable_hashing_context() + } +} + /// Something that implements `HashStable` can be hashed in a way that is /// stable across multiple compilation sessions. pub trait HashStable { @@ -200,6 +222,14 @@ pub trait HashStable { hasher: &mut StableHasher); } +/// Implement this for types that can be turned into stable keys like, for +/// example, for DefId that can be converted to a DefPathHash. This is used for +/// bringing maps into a predictable order before hashing them. +pub trait ToStableHashKey { + type KeyType: Ord + Clone + Sized + HashStable; + fn to_stable_hash_key(&self, hcx: &HCX) -> Self::KeyType; +} + // Implement HashStable by just calling `Hash::hash()`. This works fine for // self-contained values that don't depend on the hashing context `CTX`. macro_rules! impl_stable_hash_via_hash { @@ -259,7 +289,8 @@ impl, CTX> HashStable for (T1,) { fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { - self.0.hash_stable(ctx, hasher); + let (ref _0,) = *self; + _0.hash_stable(ctx, hasher); } } @@ -267,8 +298,24 @@ impl, T2: HashStable, CTX> HashStable for (T1, T2) fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { - self.0.hash_stable(ctx, hasher); - self.1.hash_stable(ctx, hasher); + let (ref _0, ref _1) = *self; + _0.hash_stable(ctx, hasher); + _1.hash_stable(ctx, hasher); + } +} + +impl HashStable for (T1, T2, T3) + where T1: HashStable, + T2: HashStable, + T3: HashStable, +{ + fn hash_stable(&self, + ctx: &mut CTX, + hasher: &mut StableHasher) { + let (ref _0, ref _1, ref _2) = *self; + _0.hash_stable(ctx, hasher); + _1.hash_stable(ctx, hasher); + _2.hash_stable(ctx, hasher); } } @@ -292,7 +339,7 @@ impl, CTX> HashStable for Vec { } } -impl, CTX> HashStable for ::std::rc::Rc { +impl, CTX> HashStable for Box { #[inline] fn hash_stable(&self, ctx: &mut CTX, @@ -301,7 +348,16 @@ impl, CTX> HashStable for ::std::rc::Rc { } } -impl, CTX> HashStable for ::std::sync::Arc { +impl, CTX> HashStable for ::std::rc::Rc { + #[inline] + fn hash_stable(&self, + ctx: &mut CTX, + hasher: &mut StableHasher) { + (**self).hash_stable(ctx, hasher); + } +} + +impl, CTX> HashStable for ::std::sync::Arc { #[inline] fn hash_stable(&self, ctx: &mut CTX, @@ -330,6 +386,14 @@ impl HashStable for String { } } +impl ToStableHashKey for String { + type KeyType = String; + #[inline] + fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType { + self.clone() + } +} + impl HashStable for bool { #[inline] fn hash_stable(&self, @@ -356,8 +420,24 @@ impl HashStable for Option } } +impl HashStable for Result + where T1: HashStable, + T2: HashStable, +{ + #[inline] + fn hash_stable(&self, + ctx: &mut CTX, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(ctx, hasher); + match *self { + Ok(ref x) => x.hash_stable(ctx, hasher), + Err(ref x) => x.hash_stable(ctx, hasher), + } + } +} + impl<'a, T, CTX> HashStable for &'a T - where T: HashStable + where T: HashStable + ?Sized { #[inline] fn hash_stable(&self, @@ -376,43 +456,139 @@ impl HashStable for ::std::mem::Discriminant { } } -impl HashStable for ::std::collections::BTreeMap - where K: Ord + HashStable, - V: HashStable, +impl HashStable for ::indexed_vec::IndexVec + where T: HashStable, { fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { self.len().hash_stable(ctx, hasher); - for (k, v) in self { - k.hash_stable(ctx, hasher); + for v in &self.raw { v.hash_stable(ctx, hasher); } } } -impl HashStable for ::std::collections::BTreeSet - where T: Ord + HashStable, + +impl HashStable for ::indexed_set::IdxSetBuf { fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { - self.len().hash_stable(ctx, hasher); - for v in self { - v.hash_stable(ctx, hasher); - } + self.words().hash_stable(ctx, hasher); } } -impl HashStable for ::indexed_vec::IndexVec - where T: HashStable, +impl_stable_hash_via_hash!(::std::path::Path); +impl_stable_hash_via_hash!(::std::path::PathBuf); + +impl HashStable for ::std::collections::HashMap + where K: ToStableHashKey + Eq + Hash, + V: HashStable, + R: BuildHasher, { + #[inline] fn hash_stable(&self, - ctx: &mut CTX, + hcx: &mut HCX, hasher: &mut StableHasher) { - self.len().hash_stable(ctx, hasher); - for v in &self.raw { - v.hash_stable(ctx, hasher); - } + hash_stable_hashmap(hcx, hasher, self, ToStableHashKey::to_stable_hash_key); + } +} + +impl HashStable for ::std::collections::HashSet + where K: ToStableHashKey + Eq + Hash, + R: BuildHasher, +{ + fn hash_stable(&self, + hcx: &mut HCX, + hasher: &mut StableHasher) { + let mut keys: Vec<_> = self.iter() + .map(|k| k.to_stable_hash_key(hcx)) + .collect(); + keys.sort_unstable(); + keys.hash_stable(hcx, hasher); + } +} + +impl HashStable for ::std::collections::BTreeMap + where K: ToStableHashKey, + V: HashStable, +{ + fn hash_stable(&self, + hcx: &mut HCX, + hasher: &mut StableHasher) { + let mut entries: Vec<_> = self.iter() + .map(|(k, v)| (k.to_stable_hash_key(hcx), v)) + .collect(); + entries.sort_unstable_by(|&(ref sk1, _), &(ref sk2, _)| sk1.cmp(sk2)); + entries.hash_stable(hcx, hasher); + } +} + +impl HashStable for ::std::collections::BTreeSet + where K: ToStableHashKey, +{ + fn hash_stable(&self, + hcx: &mut HCX, + hasher: &mut StableHasher) { + let mut keys: Vec<_> = self.iter() + .map(|k| k.to_stable_hash_key(hcx)) + .collect(); + keys.sort_unstable(); + keys.hash_stable(hcx, hasher); + } +} + +pub fn hash_stable_hashmap( + hcx: &mut HCX, + hasher: &mut StableHasher, + map: &::std::collections::HashMap, + to_stable_hash_key: F) + where K: Eq + Hash, + V: HashStable, + R: BuildHasher, + SK: HashStable + Ord + Clone, + F: Fn(&K, &HCX) -> SK, + W: StableHasherResult, +{ + let mut entries: Vec<_> = map.iter() + .map(|(k, v)| (to_stable_hash_key(k, hcx), v)) + .collect(); + entries.sort_unstable_by(|&(ref sk1, _), &(ref sk2, _)| sk1.cmp(sk2)); + entries.hash_stable(hcx, hasher); +} + + +/// A vector container that makes sure that its items are hashed in a stable +/// order. +pub struct StableVec(Vec); + +impl StableVec { + pub fn new(v: Vec) -> Self { + StableVec(v) + } +} + +impl ::std::ops::Deref for StableVec { + type Target = Vec; + + fn deref(&self) -> &Vec { + &self.0 + } +} + +impl HashStable for StableVec + where T: HashStable + ToStableHashKey +{ + fn hash_stable(&self, + hcx: &mut HCX, + hasher: &mut StableHasher) { + let StableVec(ref v) = *self; + + let mut sorted: Vec<_> = v.iter() + .map(|x| x.to_stable_hash_key(hcx)) + .collect(); + sorted.sort_unstable(); + sorted.hash_stable(hcx, hasher); } } diff --git a/src/librustc_data_structures/transitive_relation.rs b/src/librustc_data_structures/transitive_relation.rs index 4646394404..7cb386b019 100644 --- a/src/librustc_data_structures/transitive_relation.rs +++ b/src/librustc_data_structures/transitive_relation.rs @@ -18,7 +18,7 @@ use std::hash::Hash; use std::mem; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct TransitiveRelation { // List of elements. This is used to map from a T to a usize. elements: Vec, @@ -42,10 +42,10 @@ pub struct TransitiveRelation { closure: RefCell>, } -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable, Debug)] struct Index(usize); -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug)] struct Edge { source: Index, target: Index, diff --git a/src/librustc_data_structures/unify/mod.rs b/src/librustc_data_structures/unify/mod.rs index 7853bf9478..5411ae0257 100644 --- a/src/librustc_data_structures/unify/mod.rs +++ b/src/librustc_data_structures/unify/mod.rs @@ -119,10 +119,10 @@ impl VarValue { } } -// We can't use V:LatticeValue, much as I would like to, -// because frequently the pattern is that V=Option for some -// other type parameter U, and we have no way to say -// Option:LatticeValue. +/// We can't use V:LatticeValue, much as I would like to, +/// because frequently the pattern is that V=Option for some +/// other type parameter U, and we have no way to say +/// Option:LatticeValue. impl UnificationTable { pub fn new() -> UnificationTable { @@ -249,7 +249,7 @@ impl sv::SnapshotVecDelegate for Delegate { fn reverse(_: &mut Vec>, _: ()) {} } -// # Base union-find algorithm, where we are just making sets +/// # Base union-find algorithm, where we are just making sets impl<'tcx, K: UnifyKey> UnificationTable where K::Value: Combine @@ -281,11 +281,11 @@ impl<'tcx, K: UnifyKey> UnificationTable } } -// # Non-subtyping unification -// -// Code to handle keys which carry a value, like ints, -// floats---anything that doesn't have a subtyping relationship we -// need to worry about. +/// # Non-subtyping unification +/// +/// Code to handle keys which carry a value, like ints, +/// floats---anything that doesn't have a subtyping relationship we +/// need to worry about. impl<'tcx, K, V> UnificationTable where K: UnifyKey>, diff --git a/src/librustc_driver/README.md b/src/librustc_driver/README.md new file mode 100644 index 0000000000..5331a05b5c --- /dev/null +++ b/src/librustc_driver/README.md @@ -0,0 +1,12 @@ +NB: This crate is part of the Rust compiler. For an overview of the +compiler as a whole, see +[the README.md file found in `librustc`](../librustc/README.md). + +The `driver` crate is effectively the "main" function for the rust +compiler. It orchstrates the compilation process and "knits together" +the code from the other crates within rustc. This crate itself does +not contain any of the "main logic" of the compiler (though it does +have some code related to pretty printing or other minor compiler +options). + + diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 96688c6ac9..ad6f7fbf11 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -8,8 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(not(feature="llvm"), allow(dead_code))] - +use rustc::dep_graph::DepGraph; use rustc::hir::{self, map as hir_map}; use rustc::hir::lowering::lower_crate; use rustc::ich::Fingerprint; @@ -21,20 +20,20 @@ use rustc::session::config::{self, Input, OutputFilenames, OutputType}; use rustc::session::search_paths::PathKind; use rustc::lint; use rustc::middle::{self, stability, reachable}; +use rustc::middle::cstore::CrateStore; use rustc::middle::privacy::AccessLevels; use rustc::mir::transform::{MIR_CONST, MIR_VALIDATED, MIR_OPTIMIZED, Passes}; use rustc::ty::{self, TyCtxt, Resolutions, GlobalArenas}; use rustc::traits; use rustc::util::common::{ErrorReported, time}; -use rustc::util::nodemap::NodeSet; use rustc_allocator as allocator; use rustc_borrowck as borrowck; -use rustc_incremental::{self, IncrementalHashesMap}; +use rustc_incremental; use rustc_resolve::{MakeGlobMap, Resolver}; use rustc_metadata::creader::CrateLoader; use rustc_metadata::cstore::{self, CStore}; -use rustc_trans::back::write; use rustc_trans as trans; +use rustc_trans_utils::trans_crate::TransCrate; use rustc_typeck as typeck; use rustc_privacy; use rustc_plugin::registry::Registry; @@ -42,9 +41,11 @@ use rustc_plugin as plugin; use rustc_passes::{ast_validation, no_asm, loops, consts, static_recursion, hir_stats}; use rustc_const_eval::{self, check_match}; use super::Compilation; +use ::DefaultTransCrate; use serialize::json; +use std::any::Any; use std::env; use std::ffi::{OsString, OsStr}; use std::fs; @@ -52,6 +53,7 @@ use std::io::{self, Write}; use std::iter; use std::path::{Path, PathBuf}; use std::rc::Rc; +use std::sync::mpsc; use syntax::{ast, diagnostics, visit}; use syntax::attr; use syntax::ext::base::ExtCtxt; @@ -73,7 +75,8 @@ pub fn compile_input(sess: &Session, output: &Option, addl_plugins: Option>, control: &CompileController) -> CompileResult { - use rustc_trans::back::write::OngoingCrateTranslation; + use rustc::session::config::CrateType; + macro_rules! controller_entry_point { ($point: ident, $tsess: expr, $make_state: expr, $phase_result: expr) => {{ let state = &mut $make_state; @@ -91,17 +94,16 @@ pub fn compile_input(sess: &Session, } if cfg!(not(feature="llvm")) { - use rustc::session::config::CrateType; - if !sess.opts.debugging_opts.no_trans && sess.opts.output_types.should_trans() { - sess.err("LLVM is not supported by this rustc. Please use -Z no-trans to compile") - } - - if sess.opts.crate_types.iter().all(|&t|{ - t != CrateType::CrateTypeRlib && t != CrateType::CrateTypeExecutable - }) && !sess.opts.crate_types.is_empty() { - sess.err( - "LLVM is not supported by this rustc, so non rlib libraries are not supported" - ); + for cty in sess.opts.crate_types.iter() { + match *cty { + CrateType::CrateTypeRlib | CrateType::CrateTypeDylib | + CrateType::CrateTypeExecutable => {}, + _ => { + sess.parse_sess.span_diagnostic.warn( + &format!("LLVM unsupported, so output type {} is not supported", cty) + ); + }, + } } sess.abort_if_errors(); @@ -114,7 +116,7 @@ pub fn compile_input(sess: &Session, // We need nested scopes here, because the intermediate results can keep // large chunks of memory alive and we want to free them as soon as // possible to keep the peak memory usage low - let (outputs, trans): (OutputFilenames, OngoingCrateTranslation) = { + let (outputs, trans, dep_graph) = { let krate = match phase_1_parse_input(control, sess, input) { Ok(krate) => krate, Err(mut parse_error) => { @@ -143,7 +145,13 @@ pub fn compile_input(sess: &Session, ::rustc_trans_utils::link::find_crate_name(Some(sess), &krate.attrs, input); let ExpansionResult { expanded_crate, defs, analysis, resolutions, mut hir_forest } = { phase_2_configure_and_expand( - sess, &cstore, krate, registry, &crate_name, addl_plugins, control.make_glob_map, + sess, + &cstore, + krate, + registry, + &crate_name, + addl_plugins, + control.make_glob_map, |expanded_crate| { let mut state = CompileState::state_after_expand( input, sess, outdir, output, &cstore, expanded_crate, &crate_name, @@ -166,7 +174,7 @@ pub fn compile_input(sess: &Session, // Construct the HIR map let hir_map = time(sess.time_passes(), "indexing hir", - || hir_map::map_crate(&mut hir_forest, defs)); + || hir_map::map_crate(sess, cstore, &mut hir_forest, &defs)); { let _ignore = hir_map.dep_graph.in_ignore(); @@ -184,6 +192,7 @@ pub fn compile_input(sess: &Session, &resolutions, &expanded_crate, &hir_map.krate(), + &outputs, &crate_name), Ok(())); } @@ -200,13 +209,15 @@ pub fn compile_input(sess: &Session, }; phase_3_run_analysis_passes(sess, + cstore, hir_map, analysis, resolutions, &arena, &arenas, &crate_name, - |tcx, analysis, incremental_hashes_map, result| { + &outputs, + |tcx, analysis, rx, result| { { // Eventually, we will want to track plugins. let _ignore = tcx.dep_graph.in_ignore(); @@ -234,8 +245,7 @@ pub fn compile_input(sess: &Session, tcx.print_debug_stats(); } - let trans = phase_4_translate_to_llvm(tcx, analysis, incremental_hashes_map, - &outputs); + let trans = phase_4_translate_to_llvm::(tcx, rx); if log_enabled!(::log::LogLevel::Info) { println!("Post-trans"); @@ -249,48 +259,46 @@ pub fn compile_input(sess: &Session, } } - Ok((outputs, trans)) + Ok((outputs.clone(), trans, tcx.dep_graph.clone())) })?? }; - if cfg!(not(feature="llvm")) { - let (_, _) = (outputs, trans); - sess.fatal("LLVM is not supported by this rustc"); + if sess.opts.debugging_opts.print_type_sizes { + sess.code_stats.borrow().print_type_sizes(); } - #[cfg(feature="llvm")] - { - if sess.opts.debugging_opts.print_type_sizes { - sess.code_stats.borrow().print_type_sizes(); - } - - let (phase5_result, trans) = phase_5_run_llvm_passes(sess, trans); + let (phase5_result, trans) = + phase_5_run_llvm_passes::(sess, &dep_graph, trans); - controller_entry_point!(after_llvm, - sess, - CompileState::state_after_llvm(input, sess, outdir, output, &trans), - phase5_result); - phase5_result?; + controller_entry_point!(after_llvm, + sess, + CompileState::state_after_llvm(input, sess, outdir, output, &trans), + phase5_result); + phase5_result?; - phase_6_link_output(sess, &trans, &outputs); - - // Now that we won't touch anything in the incremental compilation directory - // any more, we can finalize it (which involves renaming it) - rustc_incremental::finalize_session_directory(sess, trans.link.crate_hash); + // Run the linker on any artifacts that resulted from the LLVM run. + // This should produce either a finished executable or library. + time(sess.time_passes(), "linking", || { + DefaultTransCrate::link_binary(sess, &trans, &outputs) + }); - if sess.opts.debugging_opts.perf_stats { - sess.print_perf_stats(); - } + // Now that we won't touch anything in the incremental compilation directory + // any more, we can finalize it (which involves renaming it) + #[cfg(feature="llvm")] + rustc_incremental::finalize_session_directory(sess, trans.link.crate_hash); - controller_entry_point!( - compilation_done, - sess, - CompileState::state_when_compilation_done(input, sess, outdir, output), - Ok(()) - ); + if sess.opts.debugging_opts.perf_stats { + sess.print_perf_stats(); + } + controller_entry_point!( + compilation_done, + sess, + CompileState::state_when_compilation_done(input, sess, outdir, output), Ok(()) - } + ); + + Ok(()) } fn keep_hygiene_data(sess: &Session) -> bool { @@ -385,7 +393,7 @@ pub struct CompileState<'a, 'tcx: 'a> { pub session: &'tcx Session, pub krate: Option, pub registry: Option>, - pub cstore: Option<&'a CStore>, + pub cstore: Option<&'tcx CStore>, pub crate_name: Option<&'a str>, pub output_filenames: Option<&'a OutputFilenames>, pub out_dir: Option<&'a Path>, @@ -433,7 +441,7 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { out_dir: &'a Option, out_file: &'a Option, krate: ast::Crate, - cstore: &'a CStore) + cstore: &'tcx CStore) -> Self { CompileState { // Initialize the registry before moving `krate` @@ -449,7 +457,7 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { session: &'tcx Session, out_dir: &'a Option, out_file: &'a Option, - cstore: &'a CStore, + cstore: &'tcx CStore, expanded_crate: &'a ast::Crate, crate_name: &'a str) -> Self { @@ -468,12 +476,13 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { out_file: &'a Option, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, - cstore: &'a CStore, + cstore: &'tcx CStore, hir_map: &'a hir_map::Map<'tcx>, analysis: &'a ty::CrateAnalysis, resolutions: &'a Resolutions, krate: &'a ast::Crate, hir_crate: &'a hir::Crate, + output_filenames: &'a OutputFilenames, crate_name: &'a str) -> Self { CompileState { @@ -486,6 +495,7 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { resolutions: Some(resolutions), expanded_crate: Some(krate), hir_crate: Some(hir_crate), + output_filenames: Some(output_filenames), out_file: out_file.as_ref().map(|s| &**s), ..CompileState::empty(input, session, out_dir) } @@ -622,7 +632,24 @@ pub fn phase_2_configure_and_expand(sess: &Session, *sess.features.borrow_mut() = features; *sess.crate_types.borrow_mut() = collect_crate_types(sess, &krate.attrs); - *sess.crate_disambiguator.borrow_mut() = Symbol::intern(&compute_crate_disambiguator(sess)); + + let disambiguator = Symbol::intern(&compute_crate_disambiguator(sess)); + *sess.crate_disambiguator.borrow_mut() = Some(disambiguator); + rustc_incremental::prepare_session_directory( + sess, + &crate_name, + &disambiguator.as_str(), + ); + + let dep_graph = if sess.opts.build_dep_graph() { + let prev_dep_graph = time(time_passes, "load prev dep-graph", || { + rustc_incremental::load_dep_graph(sess) + }); + + DepGraph::new(prev_dep_graph) + } else { + DepGraph::new_disabled() + }; time(time_passes, "recursion limit", || { middle::recursion_limit::update_limits(sess, &krate); @@ -692,11 +719,10 @@ pub fn phase_2_configure_and_expand(sess: &Session, // item, much like we do for macro expansion. In other words, the hash reflects not just // its contents but the results of name resolution on those contents. Hopefully we'll push // this back at some point. - let _ignore = sess.dep_graph.in_ignore(); let mut crate_loader = CrateLoader::new(sess, &cstore, crate_name); - crate_loader.preprocess(&krate); let resolver_arenas = Resolver::arenas(); let mut resolver = Resolver::new(sess, + cstore, &krate, crate_name, make_glob_map, @@ -845,13 +871,13 @@ pub fn phase_2_configure_and_expand(sess: &Session, // Lower ast -> hir. let hir_forest = time(time_passes, "lowering ast -> hir", || { - let hir_crate = lower_crate(sess, &krate, &mut resolver); + let hir_crate = lower_crate(sess, cstore, &dep_graph, &krate, &mut resolver); if sess.opts.debugging_opts.hir_stats { hir_stats::print_hir_stats(&hir_crate); } - hir_map::Forest::new(hir_crate, &sess.dep_graph) + hir_map::Forest::new(hir_crate, &dep_graph) }); time(time_passes, @@ -868,7 +894,6 @@ pub fn phase_2_configure_and_expand(sess: &Session, defs: resolver.definitions, analysis: ty::CrateAnalysis { access_levels: Rc::new(AccessLevels::default()), - reachable: Rc::new(NodeSet()), name: crate_name.to_string(), glob_map: if resolver.make_glob_map { Some(resolver.glob_map) } else { None }, }, @@ -887,25 +912,27 @@ pub fn phase_2_configure_and_expand(sess: &Session, /// miscellaneous analysis passes on the crate. Return various /// structures carrying the results of the analysis. pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, + cstore: &'tcx CrateStore, hir_map: hir_map::Map<'tcx>, mut analysis: ty::CrateAnalysis, resolutions: Resolutions, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, name: &str, + output_filenames: &OutputFilenames, f: F) -> Result where F: for<'a> FnOnce(TyCtxt<'a, 'tcx, 'tcx>, ty::CrateAnalysis, - IncrementalHashesMap, + mpsc::Receiver>, CompileResult) -> R { macro_rules! try_with_f { - ($e: expr, ($t: expr, $a: expr, $h: expr)) => { + ($e: expr, ($($t:tt)*)) => { match $e { Ok(x) => x, Err(x) => { - f($t, $a, $h, Err(x)); + f($($t)*, Err(x)); return Err(x); } } @@ -914,15 +941,9 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, let time_passes = sess.time_passes(); - let lang_items = time(time_passes, "language item collection", || { - sess.track_errors(|| { - middle::lang_items::collect_language_items(&sess, &hir_map) - }) - })?; - let named_region_map = time(time_passes, "lifetime resolution", - || middle::resolve_lifetime::krate(sess, &hir_map))?; + || middle::resolve_lifetime::krate(sess, cstore, &hir_map))?; time(time_passes, "looking for entry point", @@ -941,14 +962,12 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, "static item recursion checking", || static_recursion::check_crate(sess, &hir_map))?; - let index = stability::Index::new(&sess); - let mut local_providers = ty::maps::Providers::default(); borrowck::provide(&mut local_providers); mir::provide(&mut local_providers); reachable::provide(&mut local_providers); rustc_privacy::provide(&mut local_providers); - trans::provide(&mut local_providers); + DefaultTransCrate::provide_local(&mut local_providers); typeck::provide(&mut local_providers); ty::provide(&mut local_providers); traits::provide(&mut local_providers); @@ -960,7 +979,7 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, let mut extern_providers = ty::maps::Providers::default(); cstore::provide(&mut extern_providers); - trans::provide(&mut extern_providers); + DefaultTransCrate::provide_extern(&mut extern_providers); ty::provide_extern(&mut extern_providers); traits::provide_extern(&mut extern_providers); // FIXME(eddyb) get rid of this once we replace const_eval with miri. @@ -983,19 +1002,16 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, // What we need to run borrowck etc. passes.push_pass(MIR_VALIDATED, mir::transform::qualify_consts::QualifyAndPromoteConstants); - - // FIXME: ariel points SimplifyBranches should run after - // mir-borrowck; otherwise code within `if false { ... }` would - // not be checked. - passes.push_pass(MIR_VALIDATED, - mir::transform::simplify_branches::SimplifyBranches::new("initial")); passes.push_pass(MIR_VALIDATED, mir::transform::simplify::SimplifyCfg::new("qualify-consts")); passes.push_pass(MIR_VALIDATED, mir::transform::nll::NLL); // borrowck runs between MIR_VALIDATED and MIR_OPTIMIZED. - // These next passes must be executed together passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads); + passes.push_pass(MIR_OPTIMIZED, + mir::transform::simplify_branches::SimplifyBranches::new("initial")); + + // These next passes must be executed together passes.push_pass(MIR_OPTIMIZED, mir::transform::add_call_guards::CriticalCallEdges); passes.push_pass(MIR_OPTIMIZED, mir::transform::elaborate_drops::ElaborateDrops); passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads); @@ -1015,10 +1031,15 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, passes.push_pass(MIR_OPTIMIZED, mir::transform::deaggregator::Deaggregator); passes.push_pass(MIR_OPTIMIZED, mir::transform::copy_prop::CopyPropagation); passes.push_pass(MIR_OPTIMIZED, mir::transform::simplify::SimplifyLocals); + + passes.push_pass(MIR_OPTIMIZED, mir::transform::generator::StateTransform); passes.push_pass(MIR_OPTIMIZED, mir::transform::add_call_guards::CriticalCallEdges); passes.push_pass(MIR_OPTIMIZED, mir::transform::dump_mir::Marker("PreTrans")); + let (tx, rx) = mpsc::channel(); + TyCtxt::create_and_enter(sess, + cstore, local_providers, extern_providers, Rc::new(passes), @@ -1027,29 +1048,20 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, resolutions, named_region_map, hir_map, - lang_items, - index, name, + tx, + output_filenames, |tcx| { - let incremental_hashes_map = - time(time_passes, - "compute_incremental_hashes_map", - || rustc_incremental::compute_incremental_hashes_map(tcx)); - - time(time_passes, - "load_dep_graph", - || rustc_incremental::load_dep_graph(tcx, &incremental_hashes_map)); - - time(time_passes, "stability index", || { - tcx.stability.borrow_mut().build(tcx) - }); + // Do some initialization of the DepGraph that can only be done with the + // tcx available. + rustc_incremental::dep_graph_tcx_init(tcx); time(time_passes, "stability checking", || stability::check_unstable_api_usage(tcx)); // passes are timed inside typeck - try_with_f!(typeck::check_crate(tcx), (tcx, analysis, incremental_hashes_map)); + try_with_f!(typeck::check_crate(tcx), (tcx, analysis, rx)); time(time_passes, "const checking", @@ -1062,10 +1074,6 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, "intrinsic checking", || middle::intrinsicck::check_crate(tcx)); - time(time_passes, - "effect checking", - || middle::effect::check_crate(tcx)); - time(time_passes, "match checking", || check_match::check_crate(tcx)); @@ -1086,6 +1094,11 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, "MIR borrow checking", || for def_id in tcx.body_owners() { tcx.mir_borrowck(def_id) }); + time(time_passes, + "MIR effect checking", + || for def_id in tcx.body_owners() { + mir::transform::check_unsafety::check_unsafety(tcx, def_id) + }); // Avoid overwhelming user with errors if type checking failed. // I'm not sure how helpful this is, to be honest, but it avoids // a @@ -1093,14 +1106,9 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, // lint warnings and so on -- kindck used to do this abort, but // kindck is gone now). -nmatsakis if sess.err_count() > 0 { - return Ok(f(tcx, analysis, incremental_hashes_map, sess.compile_status())); + return Ok(f(tcx, analysis, rx, sess.compile_status())); } - analysis.reachable = - time(time_passes, - "reachability checking", - || reachable::find_reachable(tcx)); - time(time_passes, "death checking", || middle::dead::check_crate(tcx)); time(time_passes, "unused lib feature checking", || { @@ -1109,17 +1117,15 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, time(time_passes, "lint checking", || lint::check_crate(tcx)); - return Ok(f(tcx, analysis, incremental_hashes_map, tcx.sess.compile_status())); + return Ok(f(tcx, analysis, rx, tcx.sess.compile_status())); }) } /// Run the translation phase to LLVM, after which the AST and analysis can /// be discarded. -pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - analysis: ty::CrateAnalysis, - incremental_hashes_map: IncrementalHashesMap, - output_filenames: &OutputFilenames) - -> write::OngoingCrateTranslation { +pub fn phase_4_translate_to_llvm<'a, 'tcx, Trans: TransCrate>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver>) + -> ::OngoingCrateTranslation { let time_passes = tcx.sess.time_passes(); time(time_passes, @@ -1127,10 +1133,9 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, || ::rustc::middle::dependency_format::calculate(tcx)); let translation = - time(time_passes, - "translation", - move || trans::trans_crate(tcx, analysis, incremental_hashes_map, output_filenames)); - + time(time_passes, "translation", move || { + Trans::trans_crate(tcx, rx) + }); if tcx.sess.profile_queries() { profile::dump("profile_queries".to_string()) } @@ -1140,34 +1145,23 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, /// Run LLVM itself, producing a bitcode file, assembly file or object file /// as a side effect. -#[cfg(feature="llvm")] -pub fn phase_5_run_llvm_passes(sess: &Session, - trans: write::OngoingCrateTranslation) - -> (CompileResult, trans::CrateTranslation) { - let trans = trans.join(sess); +pub fn phase_5_run_llvm_passes(sess: &Session, + dep_graph: &DepGraph, + trans: ::OngoingCrateTranslation) + -> (CompileResult, ::TranslatedCrate) { + let trans = Trans::join_trans(trans, sess, dep_graph); if sess.opts.debugging_opts.incremental_info { - write::dump_incremental_data(&trans); + Trans::dump_incremental_data(&trans); } time(sess.time_passes(), "serialize work products", - move || rustc_incremental::save_work_products(sess)); + move || rustc_incremental::save_work_products(sess, dep_graph)); (sess.compile_status(), trans) } -/// Run the linker on any artifacts that resulted from the LLVM run. -/// This should produce either a finished executable or library. -#[cfg(feature="llvm")] -pub fn phase_6_link_output(sess: &Session, - trans: &trans::CrateTranslation, - outputs: &OutputFilenames) { - time(sess.time_passes(), "linking", || { - ::rustc_trans::back::link::link_binary(sess, trans, outputs, &trans.crate_name.as_str()) - }); -} - fn escape_dep_filename(filename: &str) -> String { // Apparently clang and gcc *only* escape spaces: // http://llvm.org/klaus/clang/commit/9d50634cfc268ecc9a7250226dd5ca0e945240d4 diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index 1915a1c864..6bdad0b212 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -64,7 +64,6 @@ use pretty::{PpMode, UserIdentifiedItem}; use rustc_resolve as resolve; use rustc_save_analysis as save; use rustc_save_analysis::DumpHandler; -use rustc::dep_graph::DepGraph; use rustc::session::{self, config, Session, build_session, CompileResult}; use rustc::session::CompileIncomplete; use rustc::session::config::{Input, PrintRequest, OutputType, ErrorOutputType}; @@ -72,9 +71,11 @@ use rustc::session::config::nightly_options; use rustc::session::{early_error, early_warn}; use rustc::lint::Lint; use rustc::lint; +use rustc::middle::cstore::CrateStore; use rustc_metadata::locator; use rustc_metadata::cstore::CStore; use rustc::util::common::{time, ErrorReported}; +use rustc_trans_utils::trans_crate::TransCrate; use serialize::json::ToJson; @@ -151,101 +152,31 @@ pub fn run(run_compiler: F) -> isize } #[cfg(not(feature="llvm"))] -pub use no_llvm_metadata_loader::NoLLvmMetadataLoader as MetadataLoader; +pub use rustc_trans_utils::trans_crate::MetadataOnlyTransCrate as DefaultTransCrate; #[cfg(feature="llvm")] -pub use rustc_trans::LlvmMetadataLoader as MetadataLoader; - -#[cfg(not(feature="llvm"))] -mod no_llvm_metadata_loader { - extern crate ar; - extern crate owning_ref; - - use rustc::middle::cstore::MetadataLoader as MetadataLoaderTrait; - use rustc_back::target::Target; - use std::io; - use std::fs::File; - use std::path::Path; - - use self::ar::Archive; - use self::owning_ref::{OwningRef, ErasedBoxRef}; - - pub struct NoLLvmMetadataLoader; - - impl MetadataLoaderTrait for NoLLvmMetadataLoader { - fn get_rlib_metadata( - &self, - _: &Target, - filename: &Path - ) -> Result, String> { - let file = File::open(filename).map_err(|e| { - format!("metadata file open err: {:?}", e) - })?; - let mut archive = Archive::new(file); - - while let Some(entry_result) = archive.next_entry() { - let mut entry = entry_result.map_err(|e| { - format!("metadata section read err: {:?}", e) - })?; - if entry.header().identifier() == "rust.metadata.bin" { - let mut buf = Vec::new(); - io::copy(&mut entry, &mut buf).unwrap(); - let buf: OwningRef, [u8]> = OwningRef::new(buf).into(); - return Ok(buf.map_owner_box().erase_owner()); - } - } - - Err("Couldnt find metadata section".to_string()) - } - - fn get_dylib_metadata(&self, - _target: &Target, - _filename: &Path) - -> Result, String> { - panic!("Dylib metadata loading not supported without LLVM") - } - } -} +pub use rustc_trans::LlvmTransCrate as DefaultTransCrate; #[cfg(not(feature="llvm"))] mod rustc_trans { use syntax_pos::symbol::Symbol; use rustc::session::Session; - use rustc::session::config::{PrintRequest, OutputFilenames}; - use rustc::ty::{TyCtxt, CrateAnalysis}; - use rustc::ty::maps::Providers; - use rustc_incremental::IncrementalHashesMap; - - use self::back::write::OngoingCrateTranslation; + use rustc::session::config::PrintRequest; + pub use rustc_trans_utils::trans_crate::MetadataOnlyTransCrate as LlvmTransCrate; + pub use rustc_trans_utils::trans_crate::TranslatedCrate as CrateTranslation; pub fn init(_sess: &Session) {} pub fn enable_llvm_debug() {} - pub fn provide(_providers: &mut Providers) {} pub fn print_version() {} pub fn print_passes() {} pub fn print(_req: PrintRequest, _sess: &Session) {} pub fn target_features(_sess: &Session) -> Vec { vec![] } - pub fn trans_crate<'a, 'tcx>( - _tcx: TyCtxt<'a, 'tcx, 'tcx>, - _analysis: CrateAnalysis, - _incr_hashes_map: IncrementalHashesMap, - _output_filenames: &OutputFilenames - ) -> OngoingCrateTranslation { - OngoingCrateTranslation(()) - } - - pub struct CrateTranslation(()); - pub mod back { pub mod write { - pub struct OngoingCrateTranslation(pub (in ::rustc_trans) ()); - pub const RELOC_MODEL_ARGS: [(&'static str, ()); 0] = []; pub const CODE_GEN_MODEL_ARGS: [(&'static str, ()); 0] = []; } } - - __build_diagnostic_array! { librustc_trans, DIAGNOSTICS } } // Parse args and run the compiler. This is the primary entry point for rustc. @@ -293,13 +224,12 @@ pub fn run_compiler<'a>(args: &[String], }, }; - let dep_graph = DepGraph::new(sopts.build_dep_graph()); - let cstore = Rc::new(CStore::new(&dep_graph, box ::MetadataLoader)); + let cstore = Rc::new(CStore::new(DefaultTransCrate::metadata_loader())); let loader = file_loader.unwrap_or(box RealFileLoader); let codemap = Rc::new(CodeMap::with_file_loader(loader, sopts.file_path_mapping())); let mut sess = session::build_session_with_codemap( - sopts, &dep_graph, input_file_path, descriptions, cstore.clone(), codemap, emitter_dest, + sopts, input_file_path, descriptions, codemap, emitter_dest, ); rustc_trans::init(&sess); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); @@ -308,11 +238,22 @@ pub fn run_compiler<'a>(args: &[String], target_features::add_configuration(&mut cfg, &sess); sess.parse_sess.config = cfg; - do_or_return!(callbacks.late_callback(&matches, &sess, &input, &odir, &ofile), Some(sess)); + do_or_return!(callbacks.late_callback(&matches, + &sess, + &*cstore, + &input, + &odir, + &ofile), Some(sess)); let plugins = sess.opts.debugging_opts.extra_plugins.clone(); let control = callbacks.build_controller(&sess, &matches); - (driver::compile_input(&sess, &cstore, &input, &odir, &ofile, Some(plugins), &control), + (driver::compile_input(&sess, + &cstore, + &input, + &odir, + &ofile, + Some(plugins), + &control), Some(sess)) } @@ -400,6 +341,7 @@ pub trait CompilerCalls<'a> { fn late_callback(&mut self, _: &getopts::Matches, _: &Session, + _: &CrateStore, _: &Input, _: &Option, _: &Option) @@ -573,13 +515,9 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { describe_lints(&ls, false); return None; } - let dep_graph = DepGraph::new(sopts.build_dep_graph()); - let cstore = Rc::new(CStore::new(&dep_graph, box ::MetadataLoader)); let mut sess = build_session(sopts.clone(), - &dep_graph, None, - descriptions.clone(), - cstore.clone()); + descriptions.clone()); rustc_trans::init(&sess); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); let mut cfg = config::build_configuration(&sess, cfg.clone()); @@ -601,12 +539,13 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { fn late_callback(&mut self, matches: &getopts::Matches, sess: &Session, + cstore: &CrateStore, input: &Input, odir: &Option, ofile: &Option) -> Compilation { RustcDefaultCalls::print_crate_info(sess, Some(input), odir, ofile) - .and_then(|| RustcDefaultCalls::list_metadata(sess, matches, input)) + .and_then(|| RustcDefaultCalls::list_metadata(sess, cstore, matches, input)) } fn build_controller(&mut self, @@ -627,6 +566,7 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { }; control.after_hir_lowering.callback = box move |state| { pretty::print_after_hir_lowering(state.session, + state.cstore.unwrap(), state.hir_map.unwrap(), state.analysis.unwrap(), state.resolutions.unwrap(), @@ -636,6 +576,7 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { ppm, state.arena.unwrap(), state.arenas.unwrap(), + state.output_filenames.unwrap(), opt_uii.clone(), state.out_file); }; @@ -711,7 +652,11 @@ fn save_analysis(sess: &Session) -> bool { } impl RustcDefaultCalls { - pub fn list_metadata(sess: &Session, matches: &getopts::Matches, input: &Input) -> Compilation { + pub fn list_metadata(sess: &Session, + cstore: &CrateStore, + matches: &getopts::Matches, + input: &Input) + -> Compilation { let r = matches.opt_strs("Z"); if r.contains(&("ls".to_string())) { match input { @@ -720,7 +665,7 @@ impl RustcDefaultCalls { let mut v = Vec::new(); locator::list_file_metadata(&sess.target.target, path, - sess.cstore.metadata_loader(), + cstore.metadata_loader(), &mut v) .unwrap(); println!("{}", String::from_utf8(v).unwrap()); @@ -741,7 +686,9 @@ impl RustcDefaultCalls { odir: &Option, ofile: &Option) -> Compilation { - if sess.opts.prints.is_empty() { + // PrintRequest::NativeStaticLibs is special - printed during linking + // (empty iterator returns true) + if sess.opts.prints.iter().all(|&p| p==PrintRequest::NativeStaticLibs) { return Compilation::Continue; } @@ -851,6 +798,9 @@ impl RustcDefaultCalls { PrintRequest::TargetCPUs | PrintRequest::TargetFeatures => { rustc_trans::print(*req, sess); } + PrintRequest::NativeStaticLibs => { + println!("Native static libs can be printed only during linking"); + } } } return Compilation::Stop; @@ -1312,6 +1262,7 @@ pub fn diagnostics_registry() -> errors::registry::Registry { all_errors.extend_from_slice(&rustc_borrowck::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_resolve::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS); + #[cfg(feature="llvm")] all_errors.extend_from_slice(&rustc_trans::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_const_eval::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS); diff --git a/src/librustc_driver/pretty.rs b/src/librustc_driver/pretty.rs index 20f2a146b0..e6d89f77f2 100644 --- a/src/librustc_driver/pretty.rs +++ b/src/librustc_driver/pretty.rs @@ -20,9 +20,9 @@ use {abort_on_err, driver}; use rustc::ty::{self, TyCtxt, GlobalArenas, Resolutions}; use rustc::cfg; use rustc::cfg::graphviz::LabelledCFG; -use rustc::dep_graph::DepGraph; +use rustc::middle::cstore::CrateStore; use rustc::session::Session; -use rustc::session::config::Input; +use rustc::session::config::{Input, OutputFilenames}; use rustc_borrowck as borrowck; use rustc_borrowck::graphviz as borrowck_dot; @@ -198,11 +198,13 @@ impl PpSourceMode { } fn call_with_pp_support_hir<'tcx, A, F>(&self, sess: &'tcx Session, + cstore: &'tcx CrateStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, + output_filenames: &OutputFilenames, id: &str, f: F) -> A @@ -226,12 +228,14 @@ impl PpSourceMode { } PpmTyped => { abort_on_err(driver::phase_3_run_analysis_passes(sess, + cstore, hir_map.clone(), analysis.clone(), resolutions.clone(), arena, arenas, id, + output_filenames, |tcx, _, _, _| { let empty_tables = ty::TypeckTables::empty(None); let annotation = TypedAnnotation { @@ -655,10 +659,26 @@ impl ReplaceBodyWithLoop { ast::TyKind::Ptr(ast::MutTy { ty: ref subty, .. }) | ast::TyKind::Rptr(_, ast::MutTy { ty: ref subty, .. }) | ast::TyKind::Paren(ref subty) => involves_impl_trait(subty), - ast::TyKind::Tup(ref tys) => tys.iter().any(|subty| involves_impl_trait(subty)), + ast::TyKind::Tup(ref tys) => any_involves_impl_trait(tys.iter()), + ast::TyKind::Path(_, ref path) => path.segments.iter().any(|seg| { + match seg.parameters.as_ref().map(|p| &**p) { + None => false, + Some(&ast::PathParameters::AngleBracketed(ref data)) => + any_involves_impl_trait(data.types.iter()) || + any_involves_impl_trait(data.bindings.iter().map(|b| &b.ty)), + Some(&ast::PathParameters::Parenthesized(ref data)) => + any_involves_impl_trait(data.inputs.iter()) || + any_involves_impl_trait(data.output.iter()), + } + }), _ => false, } } + + fn any_involves_impl_trait<'a, I: Iterator>>(mut it: I) -> bool { + it.any(|subty| involves_impl_trait(subty)) + } + involves_impl_trait(ty) } else { false @@ -765,7 +785,7 @@ fn print_flowgraph<'a, 'tcx, W: Write>(variants: Vec, let cfg = cfg::CFG::new(tcx, &body); let labelled_edges = mode != PpFlowGraphMode::UnlabelledEdges; let lcfg = LabelledCFG { - hir_map: &tcx.hir, + tcx, cfg: &cfg, name: format!("node_{}", code.id()), labelled_edges, @@ -843,9 +863,6 @@ pub fn print_after_parsing(sess: &Session, krate: &ast::Crate, ppm: PpMode, ofile: Option<&Path>) { - let dep_graph = DepGraph::new(false); - let _ignore = dep_graph.in_ignore(); - let (src, src_name) = get_source(input, sess); let mut rdr = &*src; @@ -875,6 +892,7 @@ pub fn print_after_parsing(sess: &Session, } pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, + cstore: &'tcx CrateStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, @@ -884,19 +902,19 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, ppm: PpMode, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, + output_filenames: &OutputFilenames, opt_uii: Option, ofile: Option<&Path>) { - let dep_graph = DepGraph::new(false); - let _ignore = dep_graph.in_ignore(); - if ppm.needs_analysis() { print_with_analysis(sess, + cstore, hir_map, analysis, resolutions, crate_name, arena, arenas, + output_filenames, ppm, opt_uii, ofile); @@ -929,11 +947,13 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, (PpmHir(s), None) => { let out: &mut Write = &mut out; s.call_with_pp_support_hir(sess, + cstore, hir_map, analysis, resolutions, arena, arenas, + output_filenames, crate_name, move |annotation, krate| { debug!("pretty printing source code {:?}", s); @@ -952,11 +972,13 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, (PpmHir(s), Some(uii)) => { let out: &mut Write = &mut out; s.call_with_pp_support_hir(sess, + cstore, hir_map, analysis, resolutions, arena, arenas, + output_filenames, crate_name, move |annotation, _| { debug!("pretty printing source code {:?}", s); @@ -993,12 +1015,14 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, // with a different callback than the standard driver, so that isn't easy. // Instead, we call that function ourselves. fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, + cstore: &'a CrateStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, crate_name: &str, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, + output_filenames: &OutputFilenames, ppm: PpMode, uii: Option, ofile: Option<&Path>) { @@ -1013,12 +1037,14 @@ fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, let mut out = Vec::new(); abort_on_err(driver::phase_3_run_analysis_passes(sess, + cstore, hir_map.clone(), analysis.clone(), resolutions.clone(), arena, arenas, crate_name, + output_filenames, |tcx, _, _, _| { match ppm { PpmMir | PpmMirCFG => { diff --git a/src/librustc_driver/profile/trace.rs b/src/librustc_driver/profile/trace.rs index f5079836c3..280f3c8c79 100644 --- a/src/librustc_driver/profile/trace.rs +++ b/src/librustc_driver/profile/trace.rs @@ -9,7 +9,7 @@ // except according to those terms. use super::*; -use syntax_pos::Span; +use syntax_pos::SpanData; use rustc::ty::maps::QueryMsg; use std::fs::File; use std::time::{Duration, Instant}; @@ -18,7 +18,7 @@ use rustc::dep_graph::{DepNode}; #[derive(Debug, Clone, Eq, PartialEq)] pub struct Query { - pub span: Span, + pub span: SpanData, pub msg: QueryMsg, } pub enum Effect { diff --git a/src/librustc_driver/test.rs b/src/librustc_driver/test.rs index b187cdaa48..6de36820f0 100644 --- a/src/librustc_driver/test.rs +++ b/src/librustc_driver/test.rs @@ -10,16 +10,16 @@ //! # Standalone Tests for the Inference Module +use std::path::PathBuf; +use std::sync::mpsc; + use driver; -use rustc::dep_graph::DepGraph; use rustc_lint; use rustc_resolve::MakeGlobMap; use rustc_trans; -use rustc::middle::lang_items; use rustc::middle::free_region::FreeRegionMap; -use rustc::middle::region::{CodeExtent, RegionMaps}; +use rustc::middle::region; use rustc::middle::resolve_lifetime; -use rustc::middle::stability; use rustc::ty::subst::{Kind, Subst}; use rustc::traits::{ObligationCause, Reveal}; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; @@ -29,6 +29,8 @@ use rustc_metadata::cstore::CStore; use rustc::hir::map as hir_map; use rustc::mir::transform::Passes; use rustc::session::{self, config}; +use rustc::session::config::{OutputFilenames, OutputTypes}; +use rustc_trans_utils::trans_crate::TransCrate; use std::rc::Rc; use syntax::ast; use syntax::abi::Abi; @@ -45,12 +47,12 @@ use rustc::hir; struct Env<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { infcx: &'a infer::InferCtxt<'a, 'gcx, 'tcx>, - region_maps: &'a mut RegionMaps, + region_scope_tree: &'a mut region::ScopeTree, param_env: ty::ParamEnv<'tcx>, } struct RH<'a> { - id: ast::NodeId, + id: hir::ItemLocalId, sub: &'a [RH<'a>], } @@ -104,15 +106,11 @@ fn test_env(source_string: &str, options.unstable_features = UnstableFeatures::Allow; let diagnostic_handler = errors::Handler::with_emitter(true, false, emitter); - let dep_graph = DepGraph::new(false); - let _ignore = dep_graph.in_ignore(); - let cstore = Rc::new(CStore::new(&dep_graph, box ::MetadataLoader)); + let cstore = Rc::new(CStore::new(::DefaultTransCrate::metadata_loader())); let sess = session::build_session_(options, - &dep_graph, None, diagnostic_handler, - Rc::new(CodeMap::new(FilePathMapping::empty())), - cstore.clone()); + Rc::new(CodeMap::new(FilePathMapping::empty()))); rustc_trans::init(&sess); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); let input = config::Input::Str { @@ -133,17 +131,23 @@ fn test_env(source_string: &str, |_| Ok(())) .expect("phase 2 aborted") }; - let _ignore = dep_graph.in_ignore(); let arena = DroplessArena::new(); let arenas = ty::GlobalArenas::new(); - let hir_map = hir_map::map_crate(&mut hir_forest, defs); + let hir_map = hir_map::map_crate(&sess, &*cstore, &mut hir_forest, &defs); // run just enough stuff to build a tcx: - let lang_items = lang_items::collect_language_items(&sess, &hir_map); - let named_region_map = resolve_lifetime::krate(&sess, &hir_map); - let index = stability::Index::new(&sess); + let named_region_map = resolve_lifetime::krate(&sess, &*cstore, &hir_map); + let (tx, _rx) = mpsc::channel(); + let outputs = OutputFilenames { + out_directory: PathBuf::new(), + out_filestem: String::new(), + single_output_file: None, + extra: String::new(), + outputs: OutputTypes::new(&[]), + }; TyCtxt::create_and_enter(&sess, + &*cstore, ty::maps::Providers::default(), ty::maps::Providers::default(), Rc::new(Passes::new()), @@ -152,20 +156,20 @@ fn test_env(source_string: &str, resolutions, named_region_map.unwrap(), hir_map, - lang_items, - index, "test_crate", + tx, + &outputs, |tcx| { tcx.infer_ctxt().enter(|infcx| { - let mut region_maps = RegionMaps::new(); + let mut region_scope_tree = region::ScopeTree::default(); body(Env { infcx: &infcx, - region_maps: &mut region_maps, + region_scope_tree: &mut region_scope_tree, param_env: ty::ParamEnv::empty(Reveal::UserFacing), }); let free_regions = FreeRegionMap::new(); let def_id = tcx.hir.local_def_id(ast::CRATE_NODE_ID); - infcx.resolve_regions_and_report_errors(def_id, ®ion_maps, &free_regions); + infcx.resolve_regions_and_report_errors(def_id, ®ion_scope_tree, &free_regions); assert_eq!(tcx.sess.err_count(), expected_err_count); }); }); @@ -176,9 +180,9 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { self.infcx.tcx } - pub fn create_region_hierarchy(&mut self, rh: &RH, parent: CodeExtent) { - let me = CodeExtent::Misc(rh.id); - self.region_maps.record_code_extent(me, Some(parent)); + pub fn create_region_hierarchy(&mut self, rh: &RH, parent: region::Scope) { + let me = region::Scope::Node(rh.id); + self.region_scope_tree.record_scope_parent(me, Some(parent)); for child_rh in rh.sub { self.create_region_hierarchy(child_rh, me); } @@ -188,21 +192,19 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { // creates a region hierarchy where 1 is root, 10 and 11 are // children of 1, etc - let node = ast::NodeId::from_u32; - let dscope = CodeExtent::DestructionScope(node(1)); - self.region_maps.record_code_extent(dscope, None); + let dscope = region::Scope::Destruction(hir::ItemLocalId(1)); + self.region_scope_tree.record_scope_parent(dscope, None); self.create_region_hierarchy(&RH { - id: node(1), - sub: &[RH { - id: node(10), - sub: &[], - }, - RH { - id: node(11), - sub: &[], - }], - }, - dscope); + id: hir::ItemLocalId(1), + sub: &[RH { + id: hir::ItemLocalId(10), + sub: &[], + }, + RH { + id: hir::ItemLocalId(11), + sub: &[], + }], + }, dscope); } #[allow(dead_code)] // this seems like it could be useful, even if we don't use it now @@ -335,7 +337,7 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { } pub fn t_rptr_scope(&self, id: u32) -> Ty<'tcx> { - let r = ty::ReScope(CodeExtent::Misc(ast::NodeId::from_u32(id))); + let r = ty::ReScope(region::Scope::Node(hir::ItemLocalId(id))); self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) } @@ -809,13 +811,13 @@ fn walk_ty() { test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { let tcx = env.infcx.tcx; let int_ty = tcx.types.isize; - let uint_ty = tcx.types.usize; - let tup1_ty = tcx.intern_tup(&[int_ty, uint_ty, int_ty, uint_ty], false); - let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, uint_ty], false); + let usize_ty = tcx.types.usize; + let tup1_ty = tcx.intern_tup(&[int_ty, usize_ty, int_ty, usize_ty], false); + let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, usize_ty], false); let walked: Vec<_> = tup2_ty.walk().collect(); assert_eq!(walked, - [tup2_ty, tup1_ty, int_ty, uint_ty, int_ty, uint_ty, tup1_ty, int_ty, - uint_ty, int_ty, uint_ty, uint_ty]); + [tup2_ty, tup1_ty, int_ty, usize_ty, int_ty, usize_ty, tup1_ty, int_ty, + usize_ty, int_ty, usize_ty, usize_ty]); }) } @@ -824,20 +826,20 @@ fn walk_ty_skip_subtree() { test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { let tcx = env.infcx.tcx; let int_ty = tcx.types.isize; - let uint_ty = tcx.types.usize; - let tup1_ty = tcx.intern_tup(&[int_ty, uint_ty, int_ty, uint_ty], false); - let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, uint_ty], false); + let usize_ty = tcx.types.usize; + let tup1_ty = tcx.intern_tup(&[int_ty, usize_ty, int_ty, usize_ty], false); + let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, usize_ty], false); // types we expect to see (in order), plus a boolean saying // whether to skip the subtree. let mut expected = vec![(tup2_ty, false), (tup1_ty, false), (int_ty, false), - (uint_ty, false), + (usize_ty, false), (int_ty, false), - (uint_ty, false), + (usize_ty, false), (tup1_ty, true), // skip the isize/usize/isize/usize - (uint_ty, false)]; + (usize_ty, false)]; expected.reverse(); let mut walker = tup2_ty.walk(); diff --git a/src/librustc_errors/diagnostic.rs b/src/librustc_errors/diagnostic.rs index 0f06354238..02c0307e98 100644 --- a/src/librustc_errors/diagnostic.rs +++ b/src/librustc_errors/diagnostic.rs @@ -208,7 +208,7 @@ impl Diagnostic { /// Prints out a message with a suggested edit of the code. If the suggestion is presented /// inline it will only show the text message and not the text. /// - /// See `diagnostic::CodeSuggestion` for more information. + /// See `CodeSuggestion` for more information. pub fn span_suggestion_short(&mut self, sp: Span, msg: &str, suggestion: String) -> &mut Self { self.suggestions.push(CodeSuggestion { substitution_parts: vec![Substitution { @@ -235,7 +235,7 @@ impl Diagnostic { /// * may look like "to do xyz, use" or "to do xyz, use abc" /// * may contain a name of a function, variable or type, but not whole expressions /// - /// See `diagnostic::CodeSuggestion` for more information. + /// See `CodeSuggestion` for more information. pub fn span_suggestion(&mut self, sp: Span, msg: &str, suggestion: String) -> &mut Self { self.suggestions.push(CodeSuggestion { substitution_parts: vec![Substitution { @@ -288,7 +288,7 @@ impl Diagnostic { /// Convenience function for internal use, clients should use one of the /// public methods above. - fn sub(&mut self, + pub(crate) fn sub(&mut self, level: Level, message: &str, span: MultiSpan, diff --git a/src/librustc_errors/diagnostic_builder.rs b/src/librustc_errors/diagnostic_builder.rs index 2c8d8b4691..2cd433bfe3 100644 --- a/src/librustc_errors/diagnostic_builder.rs +++ b/src/librustc_errors/diagnostic_builder.rs @@ -110,6 +110,19 @@ impl<'a> DiagnosticBuilder<'a> { // } } + /// Convenience function for internal use, clients should use one of the + /// span_* methods instead. + pub fn sub>( + &mut self, + level: Level, + message: &str, + span: Option, + ) -> &mut Self { + let span = span.map(|s| s.into()).unwrap_or(MultiSpan::new()); + self.diagnostic.sub(level, message, span, None); + self + } + /// Delay emission of this diagnostic as a bug. /// /// This can be useful in contexts where an error indicates a bug but diff --git a/src/librustc_errors/emitter.rs b/src/librustc_errors/emitter.rs index 53d90531cc..2f994de396 100644 --- a/src/librustc_errors/emitter.rs +++ b/src/librustc_errors/emitter.rs @@ -48,7 +48,7 @@ impl Emitter for EmitterWriter { sugg.substitution_parts[0].substitutions[0].find('\n').is_none() { let substitution = &sugg.substitution_parts[0].substitutions[0]; let msg = if substitution.len() == 0 || !sugg.show_code_when_inline { - // This substitution is only removal or we explicitely don't want to show the + // This substitution is only removal or we explicitly don't want to show the // code inline, don't show it format!("help: {}", sugg.msg) } else { @@ -183,8 +183,8 @@ impl EmitterWriter { continue; } - let lo = cm.lookup_char_pos(span_label.span.lo); - let mut hi = cm.lookup_char_pos(span_label.span.hi); + let lo = cm.lookup_char_pos(span_label.span.lo()); + let mut hi = cm.lookup_char_pos(span_label.span.hi()); // Watch out for "empty spans". If we get a span like 6..6, we // want to just display a `^` at 6, so convert that to @@ -683,7 +683,7 @@ impl EmitterWriter { if let Some(ref cm) = self.cm { for primary_span in msp.primary_spans() { if primary_span != &DUMMY_SP { - let hi = cm.lookup_char_pos(primary_span.hi); + let hi = cm.lookup_char_pos(primary_span.hi()); if hi.line > max { max = hi.line; } @@ -691,7 +691,7 @@ impl EmitterWriter { } for span_label in msp.span_labels() { if span_label.span != DUMMY_SP { - let hi = cm.lookup_char_pos(span_label.span.hi); + let hi = cm.lookup_char_pos(span_label.span.hi()); if hi.line > max { max = hi.line; } @@ -914,7 +914,7 @@ impl EmitterWriter { let (primary_lo, cm) = if let (Some(cm), Some(ref primary_span)) = (self.cm.as_ref(), msp.primary_span().as_ref()) { if primary_span != &&DUMMY_SP { - (cm.lookup_char_pos(primary_span.lo), cm) + (cm.lookup_char_pos(primary_span.lo()), cm) } else { emit_to_destination(&buffer.render(), level, &mut self.dst)?; return Ok(()); @@ -1091,7 +1091,7 @@ impl EmitterWriter { Some(Style::HeaderMsg)); let suggestions = suggestion.splice_lines(cm.borrow()); - let span_start_pos = cm.lookup_char_pos(primary_sub.span.lo); + let span_start_pos = cm.lookup_char_pos(primary_sub.span.lo()); let line_start = span_start_pos.line; draw_col_separator_no_space(&mut buffer, 1, max_line_num_len + 1); let mut row_num = 2; diff --git a/src/librustc_errors/lib.rs b/src/librustc_errors/lib.rs index 870bb01bb9..d9b0f4ac8a 100644 --- a/src/librustc_errors/lib.rs +++ b/src/librustc_errors/lib.rs @@ -148,16 +148,12 @@ impl CodeSuggestion { // Assumption: all spans are in the same file, and all spans // are disjoint. Sort in ascending order. - primary_spans.sort_by_key(|sp| sp.0.lo); + primary_spans.sort_by_key(|sp| sp.0.lo()); // Find the bounding span. - let lo = primary_spans.iter().map(|sp| sp.0.lo).min().unwrap(); - let hi = primary_spans.iter().map(|sp| sp.0.hi).min().unwrap(); - let bounding_span = Span { - lo, - hi, - ctxt: NO_EXPANSION, - }; + let lo = primary_spans.iter().map(|sp| sp.0.lo()).min().unwrap(); + let hi = primary_spans.iter().map(|sp| sp.0.hi()).min().unwrap(); + let bounding_span = Span::new(lo, hi, NO_EXPANSION); let lines = cm.span_to_lines(bounding_span).unwrap(); assert!(!lines.lines.is_empty()); @@ -171,14 +167,14 @@ impl CodeSuggestion { // // Finally push the trailing line segment of the last span let fm = &lines.file; - let mut prev_hi = cm.lookup_char_pos(bounding_span.lo); + let mut prev_hi = cm.lookup_char_pos(bounding_span.lo()); prev_hi.col = CharPos::from_usize(0); let mut prev_line = fm.get_line(lines.lines[0].line_index); let mut bufs = vec![(String::new(), false); self.substitutions()]; for (sp, substitutes) in primary_spans { - let cur_lo = cm.lookup_char_pos(sp.lo); + let cur_lo = cm.lookup_char_pos(sp.lo()); for (&mut (ref mut buf, ref mut underline), substitute) in bufs.iter_mut() .zip(substitutes) { if prev_hi.line == cur_lo.line { @@ -208,7 +204,7 @@ impl CodeSuggestion { } buf.push_str(substitute); } - prev_hi = cm.lookup_char_pos(sp.hi); + prev_hi = cm.lookup_char_pos(sp.hi()); prev_line = fm.get_line(prev_hi.line - 1); } for &mut (ref mut buf, _) in &mut bufs { @@ -306,6 +302,12 @@ impl Handler { self.continue_after_error.set(continue_after_error); } + // NOTE: DO NOT call this function from rustc, as it relies on `err_count` being non-zero + // if an error happened to avoid ICEs. This function should only be called from tools. + pub fn reset_err_count(&self) { + self.err_count.set(0); + } + pub fn struct_dummy<'a>(&'a self) -> DiagnosticBuilder<'a> { DiagnosticBuilder::new(self, Level::Cancelled, "") } diff --git a/src/librustc_errors/styled_buffer.rs b/src/librustc_errors/styled_buffer.rs index f1f2e6c55e..ceb94f27dc 100644 --- a/src/librustc_errors/styled_buffer.rs +++ b/src/librustc_errors/styled_buffer.rs @@ -26,14 +26,11 @@ impl StyledBuffer { } } - pub fn copy_tabs(&mut self, row: usize) { - if row < self.text.len() { - for i in row + 1..self.text.len() { - for j in 0..self.text[i].len() { - if self.text[row].len() > j && self.text[row][j] == '\t' && - self.text[i][j] == ' ' { - self.text[i][j] = '\t'; - } + fn replace_tabs(&mut self) { + for line in self.text.iter_mut() { + for c in line.iter_mut() { + if *c == '\t' { + *c = ' '; } } } @@ -43,8 +40,8 @@ impl StyledBuffer { let mut output: Vec> = vec![]; let mut styled_vec: Vec = vec![]; - // before we render, do a little patch-up work to support tabs - self.copy_tabs(3); + // before we render, replace tabs with spaces + self.replace_tabs(); for (row, row_style) in self.text.iter().zip(&self.styles) { let mut current_style = Style::NoStyle; diff --git a/src/librustc_incremental/assert_dep_graph.rs b/src/librustc_incremental/assert_dep_graph.rs index 1d58d17996..acbd3e0d63 100644 --- a/src/librustc_incremental/assert_dep_graph.rs +++ b/src/librustc_incremental/assert_dep_graph.rs @@ -209,7 +209,7 @@ fn check_paths<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } let query = tcx.dep_graph.query(); for &(_, source_def_id, ref source_dep_node) in if_this_changed { - let dependents = query.transitive_successors(source_dep_node); + let dependents = query.transitive_predecessors(source_dep_node); for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need { if !dependents.contains(&target_dep_node) { tcx.sess.span_err( diff --git a/src/librustc_incremental/calculate_svh/mod.rs b/src/librustc_incremental/calculate_svh/mod.rs deleted file mode 100644 index 94400890c5..0000000000 --- a/src/librustc_incremental/calculate_svh/mod.rs +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Calculation of the (misnamed) "strict version hash" for crates and -//! items. This hash is used to tell when the HIR changed in such a -//! way that results from previous compilations may no longer be -//! applicable and hence must be recomputed. It should probably be -//! renamed to the ICH (incremental compilation hash). -//! -//! The hashes for all items are computed once at the beginning of -//! compilation and stored into a map. In addition, a hash is computed -//! of the **entire crate**. -//! -//! Storing the hashes in a map avoids the need to compute them twice -//! (once when loading prior incremental results and once when -//! saving), but it is also important for correctness: at least as of -//! the time of this writing, the typeck passes rewrites entries in -//! the dep-map in-place to accommodate UFCS resolutions. Since name -//! resolution is part of the hash, the result is that hashes computed -//! at the end of compilation would be different from those computed -//! at the beginning. - -use std::cell::RefCell; -use std::hash::Hash; -use rustc::dep_graph::{DepNode, DepKind}; -use rustc::hir; -use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId}; -use rustc::hir::map::DefPathHash; -use rustc::hir::itemlikevisit::ItemLikeVisitor; -use rustc::ich::{Fingerprint, StableHashingContext}; -use rustc::ty::TyCtxt; -use rustc::util::common::record_time; -use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::accumulate_vec::AccumulateVec; - -pub type IchHasher = StableHasher; - -pub struct IncrementalHashesMap { - hashes: FxHashMap, - - // These are the metadata hashes for the current crate as they were stored - // during the last compilation session. They are only loaded if - // -Z query-dep-graph was specified and are needed for auto-tests using - // the #[rustc_metadata_dirty] and #[rustc_metadata_clean] attributes to - // check whether some metadata hash has changed in between two revisions. - pub prev_metadata_hashes: RefCell>, -} - -impl IncrementalHashesMap { - pub fn new() -> IncrementalHashesMap { - IncrementalHashesMap { - hashes: FxHashMap(), - prev_metadata_hashes: RefCell::new(FxHashMap()), - } - } - - pub fn insert(&mut self, k: DepNode, v: Fingerprint) { - assert!(self.hashes.insert(k, v).is_none()); - } - - pub fn iter<'a>(&'a self) - -> ::std::collections::hash_map::Iter<'a, DepNode, Fingerprint> { - self.hashes.iter() - } - - pub fn len(&self) -> usize { - self.hashes.len() - } -} - -impl<'a> ::std::ops::Index<&'a DepNode> for IncrementalHashesMap { - type Output = Fingerprint; - - fn index(&self, index: &'a DepNode) -> &Fingerprint { - match self.hashes.get(index) { - Some(fingerprint) => fingerprint, - None => { - bug!("Could not find ICH for {:?}", index); - } - } - } -} - -struct ComputeItemHashesVisitor<'a, 'tcx: 'a> { - hcx: StableHashingContext<'a, 'tcx, 'tcx>, - hashes: IncrementalHashesMap, -} - -impl<'a, 'tcx: 'a> ComputeItemHashesVisitor<'a, 'tcx> { - fn compute_and_store_ich_for_item_like(&mut self, - dep_node: DepNode, - hash_bodies: bool, - item_like: T) - where T: HashStable> - { - if !hash_bodies && !self.hcx.tcx().sess.opts.build_dep_graph() { - // If we just need the hashes in order to compute the SVH, we don't - // need have two hashes per item. Just the one containing also the - // item's body is sufficient. - return - } - - let mut hasher = IchHasher::new(); - self.hcx.while_hashing_hir_bodies(hash_bodies, |hcx| { - item_like.hash_stable(hcx, &mut hasher); - }); - - let bytes_hashed = hasher.bytes_hashed(); - let item_hash = hasher.finish(); - debug!("calculate_def_hash: dep_node={:?} hash={:?}", dep_node, item_hash); - self.hashes.insert(dep_node, item_hash); - - let tcx = self.hcx.tcx(); - let bytes_hashed = - tcx.sess.perf_stats.incr_comp_bytes_hashed.get() + - bytes_hashed; - tcx.sess.perf_stats.incr_comp_bytes_hashed.set(bytes_hashed); - } - - fn compute_crate_hash(&mut self) { - let tcx = self.hcx.tcx(); - let krate = tcx.hir.krate(); - - let mut crate_state = IchHasher::new(); - - let crate_disambiguator = tcx.sess.local_crate_disambiguator(); - "crate_disambiguator".hash(&mut crate_state); - crate_disambiguator.as_str().len().hash(&mut crate_state); - crate_disambiguator.as_str().hash(&mut crate_state); - - // add each item (in some deterministic order) to the overall - // crate hash. - { - let mut item_hashes: Vec<_> = - self.hashes.iter() - .filter_map(|(&item_dep_node, &item_hash)| { - // This `match` determines what kinds of nodes - // go into the SVH: - match item_dep_node.kind { - DepKind::Hir | - DepKind::HirBody => { - // We want to incoporate these into the - // SVH. - } - DepKind::AllLocalTraitImpls => { - // These are already covered by hashing - // the HIR. - return None - } - ref other => { - bug!("Found unexpected DepKind during \ - SVH computation: {:?}", - other) - } - } - - Some((item_dep_node, item_hash)) - }) - .collect(); - item_hashes.sort_unstable(); // avoid artificial dependencies on item ordering - item_hashes.hash(&mut crate_state); - } - - krate.attrs.hash_stable(&mut self.hcx, &mut crate_state); - - let crate_hash = crate_state.finish(); - self.hashes.insert(DepNode::new_no_params(DepKind::Krate), crate_hash); - debug!("calculate_crate_hash: crate_hash={:?}", crate_hash); - } - - fn hash_crate_root_module(&mut self, krate: &'tcx hir::Crate) { - let hir::Crate { - ref module, - // Crate attributes are not copied over to the root `Mod`, so hash - // them explicitly here. - ref attrs, - span, - - // These fields are handled separately: - exported_macros: _, - items: _, - trait_items: _, - impl_items: _, - bodies: _, - trait_impls: _, - trait_default_impl: _, - body_ids: _, - } = *krate; - - let def_path_hash = self.hcx.tcx().hir.definitions().def_path_hash(CRATE_DEF_INDEX); - self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir), - false, - (module, (span, attrs))); - self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody), - true, - (module, (span, attrs))); - } - - fn compute_and_store_ich_for_trait_impls(&mut self, krate: &'tcx hir::Crate) - { - let tcx = self.hcx.tcx(); - - let mut impls: Vec<(DefPathHash, Fingerprint)> = krate - .trait_impls - .iter() - .map(|(&trait_id, impls)| { - let trait_id = tcx.def_path_hash(trait_id); - let mut impls: AccumulateVec<[_; 32]> = impls - .iter() - .map(|&node_id| { - let def_id = tcx.hir.local_def_id(node_id); - tcx.def_path_hash(def_id) - }) - .collect(); - - impls.sort_unstable(); - let mut hasher = StableHasher::new(); - impls.hash_stable(&mut self.hcx, &mut hasher); - (trait_id, hasher.finish()) - }) - .collect(); - - impls.sort_unstable(); - - let mut default_impls: AccumulateVec<[_; 32]> = krate - .trait_default_impl - .iter() - .map(|(&trait_def_id, &impl_node_id)| { - let impl_def_id = tcx.hir.local_def_id(impl_node_id); - (tcx.def_path_hash(trait_def_id), tcx.def_path_hash(impl_def_id)) - }) - .collect(); - - default_impls.sort_unstable(); - - let mut hasher = StableHasher::new(); - impls.hash_stable(&mut self.hcx, &mut hasher); - - self.hashes.insert(DepNode::new_no_params(DepKind::AllLocalTraitImpls), - hasher.finish()); - } -} - -impl<'a, 'tcx: 'a> ItemLikeVisitor<'tcx> for ComputeItemHashesVisitor<'a, 'tcx> { - fn visit_item(&mut self, item: &'tcx hir::Item) { - let def_id = self.hcx.tcx().hir.local_def_id(item.id); - let def_path_hash = self.hcx.tcx().def_path_hash(def_id); - self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir), - false, - item); - self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody), - true, - item); - } - - fn visit_trait_item(&mut self, item: &'tcx hir::TraitItem) { - let def_id = self.hcx.tcx().hir.local_def_id(item.id); - let def_path_hash = self.hcx.tcx().def_path_hash(def_id); - self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir), - false, - item); - self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody), - true, - item); - } - - fn visit_impl_item(&mut self, item: &'tcx hir::ImplItem) { - let def_id = self.hcx.tcx().hir.local_def_id(item.id); - let def_path_hash = self.hcx.tcx().def_path_hash(def_id); - self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir), - false, - item); - self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody), - true, - item); - } -} - - - -pub fn compute_incremental_hashes_map<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> IncrementalHashesMap { - let _ignore = tcx.dep_graph.in_ignore(); - let krate = tcx.hir.krate(); - - let mut visitor = ComputeItemHashesVisitor { - hcx: StableHashingContext::new(tcx), - hashes: IncrementalHashesMap::new(), - }; - - record_time(&tcx.sess.perf_stats.incr_comp_hashes_time, || { - visitor.hash_crate_root_module(krate); - krate.visit_all_item_likes(&mut visitor); - - for macro_def in krate.exported_macros.iter() { - let def_id = tcx.hir.local_def_id(macro_def.id); - let def_path_hash = tcx.def_path_hash(def_id); - visitor.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir), - false, - macro_def); - visitor.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody), - true, - macro_def); - } - - visitor.compute_and_store_ich_for_trait_impls(krate); - }); - - tcx.sess.perf_stats.incr_comp_hashes_count.set(visitor.hashes.len() as u64); - - record_time(&tcx.sess.perf_stats.svh_time, || visitor.compute_crate_hash()); - visitor.hashes -} diff --git a/src/librustc_incremental/lib.rs b/src/librustc_incremental/lib.rs index 8870033095..0294adb3f5 100644 --- a/src/librustc_incremental/lib.rs +++ b/src/librustc_incremental/lib.rs @@ -28,16 +28,15 @@ extern crate syntax; extern crate syntax_pos; mod assert_dep_graph; -mod calculate_svh; mod persist; pub use assert_dep_graph::assert_dep_graph; -pub use calculate_svh::compute_incremental_hashes_map; -pub use calculate_svh::IncrementalHashesMap; -pub use calculate_svh::IchHasher; pub use persist::load_dep_graph; +pub use persist::dep_graph_tcx_init; pub use persist::save_dep_graph; pub use persist::save_trans_partition; pub use persist::save_work_products; pub use persist::in_incr_comp_dir; +pub use persist::prepare_session_directory; pub use persist::finalize_session_directory; +pub use persist::delete_workproduct_files; diff --git a/src/librustc_incremental/persist/data.rs b/src/librustc_incremental/persist/data.rs index 06acfb5d77..fc417851b8 100644 --- a/src/librustc_incremental/persist/data.rs +++ b/src/librustc_incremental/persist/data.rs @@ -10,86 +10,11 @@ //! The data that we will serialize and deserialize. -use rustc::dep_graph::{DepNode, WorkProduct, WorkProductId}; +use rustc::dep_graph::{WorkProduct, WorkProductId}; use rustc::hir::def_id::DefIndex; use rustc::hir::map::DefPathHash; -use rustc::ich::Fingerprint; use rustc::middle::cstore::EncodedMetadataHash; use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::indexed_vec::{IndexVec, Idx}; - -/// Data for use when recompiling the **current crate**. -#[derive(Debug, RustcEncodable, RustcDecodable)] -pub struct SerializedDepGraph { - /// The set of all DepNodes in the graph - pub nodes: IndexVec, - /// For each DepNode, stores the list of edges originating from that - /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data, - /// which holds the actual DepNodeIndices of the target nodes. - pub edge_list_indices: IndexVec, - /// A flattened list of all edge targets in the graph. Edge sources are - /// implicit in edge_list_indices. - pub edge_list_data: Vec, - - /// These are output nodes that have no incoming edges. We track - /// these separately so that when we reload all edges, we don't - /// lose track of these nodes. - pub bootstrap_outputs: Vec, - - /// These are hashes of two things: - /// - the HIR nodes in this crate - /// - the metadata nodes from dependent crates we use - /// - /// In each case, we store a hash summarizing the contents of - /// those items as they were at the time we did this compilation. - /// In the case of HIR nodes, this hash is derived by walking the - /// HIR itself. In the case of metadata nodes, the hash is loaded - /// from saved state. - /// - /// When we do the next compile, we will load these back up and - /// compare them against the hashes we see at that time, which - /// will tell us what has changed, either in this crate or in some - /// crate that we depend on. - /// - /// Because they will be reloaded, we don't store the DefId (which - /// will be different when we next compile) related to each node, - /// but rather the `DefPathIndex`. This can then be retraced - /// to find the current def-id. - pub hashes: Vec<(DepNodeIndex, Fingerprint)>, -} - -impl SerializedDepGraph { - pub fn edge_targets_from(&self, source: DepNodeIndex) -> &[DepNodeIndex] { - let targets = self.edge_list_indices[source]; - &self.edge_list_data[targets.0 as usize .. targets.1 as usize] - } -} - -/// The index of a DepNode in the SerializedDepGraph::nodes array. -#[derive(Copy, Clone, Hash, Eq, PartialEq, Ord, PartialOrd, Debug, - RustcEncodable, RustcDecodable)] -pub struct DepNodeIndex(pub u32); - -impl DepNodeIndex { - #[inline] - pub fn new(idx: usize) -> DepNodeIndex { - assert!(idx <= ::std::u32::MAX as usize); - DepNodeIndex(idx as u32) - } -} - -impl Idx for DepNodeIndex { - #[inline] - fn new(idx: usize) -> Self { - assert!(idx <= ::std::u32::MAX as usize); - DepNodeIndex(idx as u32) - } - - #[inline] - fn index(self) -> usize { - self.0 as usize - } -} #[derive(Debug, RustcEncodable, RustcDecodable)] pub struct SerializedWorkProduct { diff --git a/src/librustc_incremental/persist/dirty_clean.rs b/src/librustc_incremental/persist/dirty_clean.rs index 126057fd04..0270e3618e 100644 --- a/src/librustc_incremental/persist/dirty_clean.rs +++ b/src/librustc_incremental/persist/dirty_clean.rs @@ -8,18 +8,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Debugging code to test the state of the dependency graph just -//! after it is loaded from disk and just after it has been saved. +//! Debugging code to test fingerprints computed for query results. //! For each node marked with `#[rustc_clean]` or `#[rustc_dirty]`, -//! we will check that a suitable node for that item either appears -//! or does not appear in the dep-graph, as appropriate: +//! we will compare the fingerprint from the current and from the previous +//! compilation session as appropriate: //! //! - `#[rustc_dirty(label="TypeckTables", cfg="rev2")]` if we are -//! in `#[cfg(rev2)]`, then there MUST NOT be a node -//! `DepNode::TypeckTables(X)` where `X` is the def-id of the -//! current node. +//! in `#[cfg(rev2)]`, then the fingerprints associated with +//! `DepNode::TypeckTables(X)` must be DIFFERENT (`X` is the def-id of the +//! current node). //! - `#[rustc_clean(label="TypeckTables", cfg="rev2")]` same as above, -//! except that the node MUST exist. +//! except that the fingerprints must be the SAME. //! //! Errors are reported if we are in the suitable configuration but //! the required condition is not met. @@ -40,9 +39,9 @@ //! previous revision to compare things to. //! -use super::data::DepNodeIndex; -use super::load::DirtyNodes; -use rustc::dep_graph::{DepGraphQuery, DepNode, DepKind}; +use std::collections::HashSet; +use std::vec::Vec; +use rustc::dep_graph::DepNode; use rustc::hir; use rustc::hir::def_id::DefId; use rustc::hir::itemlikevisit::ItemLikeVisitor; @@ -51,41 +50,24 @@ use rustc::ich::{Fingerprint, ATTR_DIRTY, ATTR_CLEAN, ATTR_DIRTY_METADATA, ATTR_CLEAN_METADATA}; use syntax::ast::{self, Attribute, NestedMetaItem}; use rustc_data_structures::fx::{FxHashSet, FxHashMap}; -use rustc_data_structures::indexed_vec::IndexVec; use syntax_pos::Span; use rustc::ty::TyCtxt; const LABEL: &'static str = "label"; const CFG: &'static str = "cfg"; -pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - nodes: &IndexVec, - dirty_inputs: &DirtyNodes) { +type Labels = HashSet; + +pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { // can't add `#[rustc_dirty]` etc without opting in to this feature if !tcx.sess.features.borrow().rustc_attrs { return; } let _ignore = tcx.dep_graph.in_ignore(); - let dirty_inputs: FxHashSet = - dirty_inputs.keys() - .filter_map(|dep_node_index| { - let dep_node = nodes[*dep_node_index]; - if dep_node.extract_def_id(tcx).is_some() { - Some(dep_node) - } else { - None - } - }) - .collect(); - - let query = tcx.dep_graph.query(); - debug!("query-nodes: {:?}", query.nodes()); let krate = tcx.hir.krate(); let mut dirty_clean_visitor = DirtyCleanVisitor { tcx, - query: &query, - dirty_inputs, checked_attrs: FxHashSet(), }; krate.visit_all_item_likes(&mut dirty_clean_visitor); @@ -105,29 +87,50 @@ pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pub struct DirtyCleanVisitor<'a, 'tcx:'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - query: &'a DepGraphQuery, - dirty_inputs: FxHashSet, checked_attrs: FxHashSet, } impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> { - fn dep_node(&self, attr: &Attribute, def_id: DefId) -> DepNode { - let def_path_hash = self.tcx.def_path_hash(def_id); + fn labels(&self, attr: &Attribute) -> Labels { for item in attr.meta_item_list().unwrap_or_else(Vec::new) { if item.check_name(LABEL) { let value = expect_associated_value(self.tcx, &item); - match DepNode::from_label_string(&value.as_str(), def_path_hash) { - Ok(dep_node) => return dep_node, - Err(()) => { - self.tcx.sess.span_fatal( - item.span, - &format!("dep-node label `{}` not recognized", value)); - } + return self.resolve_labels(&item, value.as_str().as_ref()); + } + } + self.tcx.sess.span_fatal(attr.span, "no `label` found"); + } + + fn resolve_labels(&self, item: &NestedMetaItem, value: &str) -> Labels { + let mut out: Labels = HashSet::new(); + for label in value.split(',') { + let label = label.trim(); + if DepNode::has_label_string(label) { + if out.contains(label) { + self.tcx.sess.span_fatal( + item.span, + &format!("dep-node label `{}` is repeated", label)); } + out.insert(label.to_string()); + } else { + self.tcx.sess.span_fatal( + item.span, + &format!("dep-node label `{}` not recognized", label)); } } + out + } - self.tcx.sess.span_fatal(attr.span, "no `label` found"); + fn dep_nodes(&self, labels: &Labels, def_id: DefId) -> Vec { + let mut out = Vec::with_capacity(labels.len()); + let def_path_hash = self.tcx.def_path_hash(def_id); + for label in labels.iter() { + match DepNode::from_label_string(label, def_path_hash) { + Ok(dep_node) => out.push(dep_node), + Err(()) => unreachable!(), + } + } + out } fn dep_node_str(&self, dep_node: &DepNode) -> String { @@ -143,59 +146,28 @@ impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> { fn assert_dirty(&self, item_span: Span, dep_node: DepNode) { debug!("assert_dirty({:?})", dep_node); - match dep_node.kind { - DepKind::Krate | - DepKind::Hir | - DepKind::HirBody => { - // HIR nodes are inputs, so if we are asserting that the HIR node is - // dirty, we check the dirty input set. - if !self.dirty_inputs.contains(&dep_node) { - let dep_node_str = self.dep_node_str(&dep_node); - self.tcx.sess.span_err( - item_span, - &format!("`{}` not found in dirty set, but should be dirty", - dep_node_str)); - } - } - _ => { - // Other kinds of nodes would be targets, so check if - // the dep-graph contains the node. - if self.query.contains_node(&dep_node) { - let dep_node_str = self.dep_node_str(&dep_node); - self.tcx.sess.span_err( - item_span, - &format!("`{}` found in dep graph, but should be dirty", dep_node_str)); - } - } + let current_fingerprint = self.tcx.dep_graph.fingerprint_of(&dep_node); + let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node); + + if Some(current_fingerprint) == prev_fingerprint { + let dep_node_str = self.dep_node_str(&dep_node); + self.tcx.sess.span_err( + item_span, + &format!("`{}` should be dirty but is not", dep_node_str)); } } fn assert_clean(&self, item_span: Span, dep_node: DepNode) { debug!("assert_clean({:?})", dep_node); - match dep_node.kind { - DepKind::Krate | - DepKind::Hir | - DepKind::HirBody => { - // For HIR nodes, check the inputs. - if self.dirty_inputs.contains(&dep_node) { - let dep_node_str = self.dep_node_str(&dep_node); - self.tcx.sess.span_err( - item_span, - &format!("`{}` found in dirty-node set, but should be clean", - dep_node_str)); - } - } - _ => { - // Otherwise, check if the dep-node exists. - if !self.query.contains_node(&dep_node) { - let dep_node_str = self.dep_node_str(&dep_node); - self.tcx.sess.span_err( - item_span, - &format!("`{}` not found in dep graph, but should be clean", - dep_node_str)); - } - } + let current_fingerprint = self.tcx.dep_graph.fingerprint_of(&dep_node); + let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node); + + if Some(current_fingerprint) != prev_fingerprint { + let dep_node_str = self.dep_node_str(&dep_node); + self.tcx.sess.span_err( + item_span, + &format!("`{}` should be clean but is not", dep_node_str)); } } @@ -205,12 +177,18 @@ impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> { if attr.check_name(ATTR_DIRTY) { if check_config(self.tcx, attr) { self.checked_attrs.insert(attr.id); - self.assert_dirty(item_span, self.dep_node(attr, def_id)); + let labels = self.labels(attr); + for dep_node in self.dep_nodes(&labels, def_id) { + self.assert_dirty(item_span, dep_node); + } } } else if attr.check_name(ATTR_CLEAN) { if check_config(self.tcx, attr) { self.checked_attrs.insert(attr.id); - self.assert_clean(item_span, self.dep_node(attr, def_id)); + let labels = self.labels(attr); + for dep_node in self.dep_nodes(&labels, def_id) { + self.assert_clean(item_span, dep_node); + } } } } diff --git a/src/librustc_incremental/persist/fs.rs b/src/librustc_incremental/persist/fs.rs index 28d33d9528..9b12b75558 100644 --- a/src/librustc_incremental/persist/fs.rs +++ b/src/librustc_incremental/persist/fs.rs @@ -114,15 +114,12 @@ //! unsupported file system and emit a warning in that case. This is not yet //! implemented. -use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc::hir::svh::Svh; use rustc::session::Session; -use rustc::ty::TyCtxt; use rustc::util::fs as fs_util; use rustc_data_structures::{flock, base_n}; use rustc_data_structures::fx::{FxHashSet, FxHashMap}; -use std::ffi::OsString; use std::fs as std_fs; use std::io; use std::mem; @@ -153,10 +150,6 @@ pub fn metadata_hash_export_path(sess: &Session) -> PathBuf { in_incr_comp_dir_sess(sess, METADATA_HASHES_FILENAME) } -pub fn metadata_hash_import_path(import_session_dir: &Path) -> PathBuf { - import_session_dir.join(METADATA_HASHES_FILENAME) -} - pub fn lock_file_path(session_dir: &Path) -> PathBuf { let crate_dir = session_dir.parent().unwrap(); @@ -193,13 +186,21 @@ pub fn in_incr_comp_dir(incr_comp_session_dir: &Path, file_name: &str) -> PathBu /// a dep-graph and work products from a previous session. /// If the call fails, the fn may leave behind an invalid session directory. /// The garbage collection will take care of it. -pub fn prepare_session_directory(tcx: TyCtxt) -> Result { +pub fn prepare_session_directory(sess: &Session, + crate_name: &str, + crate_disambiguator: &str) { + if sess.opts.incremental.is_none() { + return + } + debug!("prepare_session_directory"); // {incr-comp-dir}/{crate-name-and-disambiguator} - let crate_dir = crate_path_tcx(tcx, LOCAL_CRATE); + let crate_dir = crate_path(sess, crate_name, crate_disambiguator); debug!("crate-dir: {}", crate_dir.display()); - try!(create_dir(tcx.sess, &crate_dir, "crate")); + if create_dir(sess, &crate_dir, "crate").is_err() { + return + } // Hack: canonicalize the path *after creating the directory* // because, on windows, long paths can cause problems; @@ -208,9 +209,9 @@ pub fn prepare_session_directory(tcx: TyCtxt) -> Result { let crate_dir = match crate_dir.canonicalize() { Ok(v) => v, Err(err) => { - tcx.sess.err(&format!("incremental compilation: error canonicalizing path `{}`: {}", - crate_dir.display(), err)); - return Err(()); + sess.err(&format!("incremental compilation: error canonicalizing path `{}`: {}", + crate_dir.display(), err)); + return } }; @@ -225,11 +226,16 @@ pub fn prepare_session_directory(tcx: TyCtxt) -> Result { // Lock the new session directory. If this fails, return an // error without retrying - let (directory_lock, lock_file_path) = try!(lock_directory(tcx.sess, &session_dir)); + let (directory_lock, lock_file_path) = match lock_directory(sess, &session_dir) { + Ok(e) => e, + Err(_) => return, + }; // Now that we have the lock, we can actually create the session // directory - try!(create_dir(tcx.sess, &session_dir, "session")); + if create_dir(sess, &session_dir, "session").is_err() { + return + } // Find a suitable source directory to copy from. Ignore those that we // have already tried before. @@ -243,14 +249,14 @@ pub fn prepare_session_directory(tcx: TyCtxt) -> Result { debug!("no source directory found. Continuing with empty session \ directory."); - tcx.sess.init_incr_comp_session(session_dir, directory_lock); - return Ok(false) + sess.init_incr_comp_session(session_dir, directory_lock, false); + return }; debug!("attempting to copy data from source: {}", source_directory.display()); - let print_file_copy_stats = tcx.sess.opts.debugging_opts.incremental_info; + let print_file_copy_stats = sess.opts.debugging_opts.incremental_info; // Try copying over all files from the source directory if let Ok(allows_links) = copy_files(&session_dir, &source_directory, @@ -259,7 +265,7 @@ pub fn prepare_session_directory(tcx: TyCtxt) -> Result { source_directory.display()); if !allows_links { - tcx.sess.warn(&format!("Hard linking files in the incremental \ + sess.warn(&format!("Hard linking files in the incremental \ compilation cache failed. Copying files \ instead. Consider moving the cache \ directory to a file system which supports \ @@ -268,8 +274,8 @@ pub fn prepare_session_directory(tcx: TyCtxt) -> Result { ); } - tcx.sess.init_incr_comp_session(session_dir, directory_lock); - return Ok(true) + sess.init_incr_comp_session(session_dir, directory_lock, true); + return } else { debug!("copying failed - trying next directory"); @@ -280,13 +286,13 @@ pub fn prepare_session_directory(tcx: TyCtxt) -> Result { // Try to remove the session directory we just allocated. We don't // know if there's any garbage in it from the failed copy action. if let Err(err) = safe_remove_dir_all(&session_dir) { - tcx.sess.warn(&format!("Failed to delete partly initialized \ - session dir `{}`: {}", - session_dir.display(), - err)); + sess.warn(&format!("Failed to delete partly initialized \ + session dir `{}`: {}", + session_dir.display(), + err)); } - delete_session_dir_lock_file(tcx.sess, &lock_file_path); + delete_session_dir_lock_file(sess, &lock_file_path); mem::drop(directory_lock); } } @@ -603,70 +609,6 @@ fn string_to_timestamp(s: &str) -> Result { Ok(UNIX_EPOCH + duration) } -fn crate_path_tcx(tcx: TyCtxt, cnum: CrateNum) -> PathBuf { - crate_path(tcx.sess, &tcx.crate_name(cnum).as_str(), &tcx.crate_disambiguator(cnum).as_str()) -} - -/// Finds the session directory containing the correct metadata hashes file for -/// the given crate. In order to do that it has to compute the crate directory -/// of the given crate, and in there, look for the session directory with the -/// correct SVH in it. -/// Note that we have to match on the exact SVH here, not just the -/// crate's (name, disambiguator) pair. The metadata hashes are only valid for -/// the exact version of the binary we are reading from now (i.e. the hashes -/// are part of the dependency graph of a specific compilation session). -pub fn find_metadata_hashes_for(tcx: TyCtxt, cnum: CrateNum) -> Option { - let crate_directory = crate_path_tcx(tcx, cnum); - - if !crate_directory.exists() { - return None - } - - let dir_entries = match crate_directory.read_dir() { - Ok(dir_entries) => dir_entries, - Err(e) => { - tcx.sess - .err(&format!("incremental compilation: Could not read crate directory `{}`: {}", - crate_directory.display(), e)); - return None - } - }; - - let target_svh = tcx.sess.cstore.crate_hash(cnum); - let target_svh = base_n::encode(target_svh.as_u64(), INT_ENCODE_BASE); - - let sub_dir = find_metadata_hashes_iter(&target_svh, dir_entries.filter_map(|e| { - e.ok().map(|e| e.file_name().to_string_lossy().into_owned()) - })); - - sub_dir.map(|sub_dir_name| crate_directory.join(&sub_dir_name)) -} - -fn find_metadata_hashes_iter<'a, I>(target_svh: &str, iter: I) -> Option - where I: Iterator -{ - for sub_dir_name in iter { - if !is_session_directory(&sub_dir_name) || !is_finalized(&sub_dir_name) { - // This is not a usable session directory - continue - } - - let is_match = if let Some(last_dash_pos) = sub_dir_name.rfind("-") { - let candidate_svh = &sub_dir_name[last_dash_pos + 1 .. ]; - target_svh == candidate_svh - } else { - // some kind of invalid directory name - continue - }; - - if is_match { - return Some(OsString::from(sub_dir_name)) - } - } - - None -} - fn crate_path(sess: &Session, crate_name: &str, crate_disambiguator: &str) @@ -1006,52 +948,3 @@ fn test_find_source_directory_in_iter() { PathBuf::from("crate-dir/s-1234-0000-working")].into_iter(), &already_visited), None); } - -#[test] -fn test_find_metadata_hashes_iter() -{ - assert_eq!(find_metadata_hashes_iter("testsvh2", - vec![ - String::from("s-timestamp1-testsvh1"), - String::from("s-timestamp2-testsvh2"), - String::from("s-timestamp3-testsvh3"), - ].into_iter()), - Some(OsString::from("s-timestamp2-testsvh2")) - ); - - assert_eq!(find_metadata_hashes_iter("testsvh2", - vec![ - String::from("s-timestamp1-testsvh1"), - String::from("s-timestamp2-testsvh2"), - String::from("invalid-name"), - ].into_iter()), - Some(OsString::from("s-timestamp2-testsvh2")) - ); - - assert_eq!(find_metadata_hashes_iter("testsvh2", - vec![ - String::from("s-timestamp1-testsvh1"), - String::from("s-timestamp2-testsvh2-working"), - String::from("s-timestamp3-testsvh3"), - ].into_iter()), - None - ); - - assert_eq!(find_metadata_hashes_iter("testsvh1", - vec![ - String::from("s-timestamp1-random1-working"), - String::from("s-timestamp2-random2-working"), - String::from("s-timestamp3-random3-working"), - ].into_iter()), - None - ); - - assert_eq!(find_metadata_hashes_iter("testsvh2", - vec![ - String::from("timestamp1-testsvh2"), - String::from("timestamp2-testsvh2"), - String::from("timestamp3-testsvh2"), - ].into_iter()), - None - ); -} diff --git a/src/librustc_incremental/persist/hash.rs b/src/librustc_incremental/persist/hash.rs deleted file mode 100644 index 9d762a74bf..0000000000 --- a/src/librustc_incremental/persist/hash.rs +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::dep_graph::{DepNode, DepKind}; -use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; -use rustc::hir::svh::Svh; -use rustc::ich::Fingerprint; -use rustc::ty::TyCtxt; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::flock; -use rustc_serialize::Decodable; -use rustc_serialize::opaque::Decoder; - -use IncrementalHashesMap; -use super::data::*; -use super::fs::*; -use super::file_format; - -use std::hash::Hash; -use std::fmt::Debug; - -pub struct HashContext<'a, 'tcx: 'a> { - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: &'a IncrementalHashesMap, - metadata_hashes: FxHashMap, - crate_hashes: FxHashMap, -} - -impl<'a, 'tcx> HashContext<'a, 'tcx> { - pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: &'a IncrementalHashesMap) - -> Self { - HashContext { - tcx, - incremental_hashes_map, - metadata_hashes: FxHashMap(), - crate_hashes: FxHashMap(), - } - } - - pub fn is_hashable(tcx: TyCtxt, dep_node: &DepNode) -> bool { - match dep_node.kind { - DepKind::Krate | - DepKind::Hir | - DepKind::HirBody => - true, - DepKind::MetaData => { - let def_id = dep_node.extract_def_id(tcx).unwrap(); - !def_id.is_local() - } - _ => false, - } - } - - pub fn hash(&mut self, dep_node: &DepNode) -> Option { - match dep_node.kind { - DepKind::Krate => { - Some(self.incremental_hashes_map[dep_node]) - } - - // HIR nodes (which always come from our crate) are an input: - DepKind::Hir | - DepKind::HirBody => { - Some(self.incremental_hashes_map[dep_node]) - } - - // MetaData from other crates is an *input* to us. - // MetaData nodes from *our* crates are an *output*; we - // don't hash them, but we do compute a hash for them and - // save it for others to use. - DepKind::MetaData => { - let def_id = dep_node.extract_def_id(self.tcx).unwrap(); - if !def_id.is_local() { - Some(self.metadata_hash(def_id, - def_id.krate, - |this| &mut this.metadata_hashes)) - } else { - None - } - } - - _ => { - // Other kinds of nodes represent computed by-products - // that we don't hash directly; instead, they should - // have some transitive dependency on a Hir or - // MetaData node, so we'll just hash that - None - } - } - } - - fn metadata_hash(&mut self, - key: K, - cnum: CrateNum, - cache: C) - -> Fingerprint - where K: Hash + Eq + Debug, - C: Fn(&mut Self) -> &mut FxHashMap, - { - debug!("metadata_hash(key={:?})", key); - - debug_assert!(cnum != LOCAL_CRATE); - loop { - // check whether we have a result cached for this def-id - if let Some(&hash) = cache(self).get(&key) { - return hash; - } - - // check whether we did not find detailed metadata for this - // krate; in that case, we just use the krate's overall hash - if let Some(&svh) = self.crate_hashes.get(&cnum) { - // micro-"optimization": avoid a cache miss if we ask - // for metadata from this particular def-id again. - let fingerprint = svh_to_fingerprint(svh); - cache(self).insert(key, fingerprint); - - return fingerprint; - } - - // otherwise, load the data and repeat. - self.load_data(cnum); - assert!(self.crate_hashes.contains_key(&cnum)); - } - } - - fn load_data(&mut self, cnum: CrateNum) { - debug!("load_data(cnum={})", cnum); - - let svh = self.tcx.sess.cstore.crate_hash(cnum); - let old = self.crate_hashes.insert(cnum, svh); - debug!("load_data: svh={}", svh); - assert!(old.is_none(), "loaded data for crate {:?} twice", cnum); - - if let Some(session_dir) = find_metadata_hashes_for(self.tcx, cnum) { - debug!("load_data: session_dir={:?}", session_dir); - - // Lock the directory we'll be reading the hashes from. - let lock_file_path = lock_file_path(&session_dir); - let _lock = match flock::Lock::new(&lock_file_path, - false, // don't wait - false, // don't create the lock-file - false) { // shared lock - Ok(lock) => lock, - Err(err) => { - debug!("Could not acquire lock on `{}` while trying to \ - load metadata hashes: {}", - lock_file_path.display(), - err); - - // Could not acquire the lock. The directory is probably in - // in the process of being deleted. It's OK to just exit - // here. It's the same scenario as if the file had not - // existed in the first place. - return - } - }; - - let hashes_file_path = metadata_hash_import_path(&session_dir); - - match file_format::read_file(self.tcx.sess, &hashes_file_path) - { - Ok(Some(data)) => { - match self.load_from_data(cnum, &data, svh) { - Ok(()) => { } - Err(err) => { - bug!("decoding error in dep-graph from `{}`: {}", - &hashes_file_path.display(), err); - } - } - } - Ok(None) => { - // If the file is not found, that's ok. - } - Err(err) => { - self.tcx.sess.err( - &format!("could not load dep information from `{}`: {}", - hashes_file_path.display(), err)); - } - } - } - } - - fn load_from_data(&mut self, - cnum: CrateNum, - data: &[u8], - expected_svh: Svh) -> Result<(), String> { - debug!("load_from_data(cnum={})", cnum); - - // Load up the hashes for the def-ids from this crate. - let mut decoder = Decoder::new(data, 0); - let svh_in_hashes_file = Svh::decode(&mut decoder)?; - - if svh_in_hashes_file != expected_svh { - // We should not be able to get here. If we do, then - // `fs::find_metadata_hashes_for()` has messed up. - bug!("mismatch between SVH in crate and SVH in incr. comp. hashes") - } - - let serialized_hashes = SerializedMetadataHashes::decode(&mut decoder)?; - for serialized_hash in serialized_hashes.entry_hashes { - // the hashes are stored with just a def-index, which is - // always relative to the old crate; convert that to use - // our internal crate number - let def_id = DefId { krate: cnum, index: serialized_hash.def_index }; - - // record the hash for this dep-node - let old = self.metadata_hashes.insert(def_id, serialized_hash.hash); - debug!("load_from_data: def_id={:?} hash={}", def_id, serialized_hash.hash); - assert!(old.is_none(), "already have hash for {:?}", def_id); - } - - Ok(()) - } -} - -fn svh_to_fingerprint(svh: Svh) -> Fingerprint { - Fingerprint::from_smaller_hash(svh.as_u64()) -} diff --git a/src/librustc_incremental/persist/load.rs b/src/librustc_incremental/persist/load.rs index 2c43896ec7..cdfc9f2edc 100644 --- a/src/librustc_incremental/persist/load.rs +++ b/src/librustc_incremental/persist/load.rs @@ -10,86 +10,68 @@ //! Code to save/load the dep-graph from files. -use rustc::dep_graph::{DepNode, WorkProductId, DepKind}; -use rustc::hir::def_id::DefId; +use rustc::dep_graph::{PreviousDepGraph, SerializedDepGraph}; use rustc::hir::svh::Svh; use rustc::ich::Fingerprint; use rustc::session::Session; use rustc::ty::TyCtxt; -use rustc_data_structures::fx::{FxHashSet, FxHashMap}; -use rustc_data_structures::indexed_vec::IndexVec; +use rustc::util::nodemap::DefIdMap; use rustc_serialize::Decodable as RustcDecodable; use rustc_serialize::opaque::Decoder; -use std::path::{Path}; +use std::path::Path; -use IncrementalHashesMap; use super::data::*; -use super::dirty_clean; -use super::hash::*; use super::fs::*; use super::file_format; use super::work_product; -// The key is a dirty node. The value is **some** base-input that we -// can blame it on. -pub type DirtyNodes = FxHashMap; - -/// If we are in incremental mode, and a previous dep-graph exists, -/// then load up those nodes/edges that are still valid into the -/// dep-graph for this session. (This is assumed to be running very -/// early in compilation, before we've really done any work, but -/// actually it doesn't matter all that much.) See `README.md` for -/// more general overview. -pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: &IncrementalHashesMap) { - if tcx.sess.opts.incremental.is_none() { - return; +pub fn dep_graph_tcx_init<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + if !tcx.dep_graph.is_fully_enabled() { + return } - match prepare_session_directory(tcx) { - Ok(true) => { - // We successfully allocated a session directory and there is - // something in it to load, so continue - } - Ok(false) => { - // We successfully allocated a session directory, but there is no - // dep-graph data in it to load (because this is the first - // compilation session with this incr. comp. dir.) - return - } - Err(()) => { - // Something went wrong while trying to allocate the session - // directory. Don't try to use it any further. - return - } + tcx.allocate_metadata_dep_nodes(); + tcx.precompute_in_scope_traits_hashes(); + + if tcx.sess.incr_comp_session_dir_opt().is_none() { + // If we are only building with -Zquery-dep-graph but without an actual + // incr. comp. session directory, we exit here. Otherwise we'd fail + // when trying to load work products. + return } - let _ignore = tcx.dep_graph.in_ignore(); - load_dep_graph_if_exists(tcx, incremental_hashes_map); -} + let work_products_path = work_products_path(tcx.sess); + if let Some(work_products_data) = load_data(tcx.sess, &work_products_path) { + // Decode the list of work_products + let mut work_product_decoder = Decoder::new(&work_products_data[..], 0); + let work_products: Vec = + RustcDecodable::decode(&mut work_product_decoder).unwrap_or_else(|e| { + let msg = format!("Error decoding `work-products` from incremental \ + compilation session directory: {}", e); + tcx.sess.fatal(&msg[..]) + }); -fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: &IncrementalHashesMap) { - let dep_graph_path = dep_graph_path(tcx.sess); - let dep_graph_data = match load_data(tcx.sess, &dep_graph_path) { - Some(p) => p, - None => return // no file - }; + for swp in work_products { + let mut all_files_exist = true; + for &(_, ref file_name) in swp.work_product.saved_files.iter() { + let path = in_incr_comp_dir_sess(tcx.sess, file_name); + if !path.exists() { + all_files_exist = false; - let work_products_path = work_products_path(tcx.sess); - let work_products_data = match load_data(tcx.sess, &work_products_path) { - Some(p) => p, - None => return // no file - }; + if tcx.sess.opts.debugging_opts.incremental_info { + eprintln!("incremental: could not find file for work \ + product: {}", path.display()); + } + } + } - match decode_dep_graph(tcx, incremental_hashes_map, &dep_graph_data, &work_products_data) { - Ok(dirty_nodes) => dirty_nodes, - Err(err) => { - tcx.sess.warn( - &format!("decoding error in dep-graph from `{}` and `{}`: {}", - dep_graph_path.display(), - work_products_path.display(), - err)); + if all_files_exist { + debug!("reconcile_work_products: all files for {:?} exist", swp); + tcx.dep_graph.insert_previous_work_product(&swp.id, swp.work_product); + } else { + debug!("reconcile_work_products: some file for {:?} does not exist", swp); + delete_dirty_work_product(tcx, swp); + } } } } @@ -117,223 +99,18 @@ fn load_data(sess: &Session, path: &Path) -> Option> { None } -/// Check if a DepNode from the previous dep-graph refers to something that -/// still exists in the current compilation session. Only works for DepNode -/// variants that represent inputs (HIR and imported Metadata). -fn does_still_exist(tcx: TyCtxt, dep_node: &DepNode) -> bool { - match dep_node.kind { - DepKind::Hir | - DepKind::HirBody | - DepKind::MetaData => { - dep_node.extract_def_id(tcx).is_some() - } - _ => { - bug!("unexpected Input DepNode: {:?}", dep_node) - } - } -} - -/// Decode the dep graph and load the edges/nodes that are still clean -/// into `tcx.dep_graph`. -pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: &IncrementalHashesMap, - dep_graph_data: &[u8], - work_products_data: &[u8]) - -> Result<(), String> -{ - // Decode the list of work_products - let mut work_product_decoder = Decoder::new(work_products_data, 0); - let work_products = >::decode(&mut work_product_decoder)?; - - // Deserialize the directory and dep-graph. - let mut dep_graph_decoder = Decoder::new(dep_graph_data, 0); - let prev_commandline_args_hash = u64::decode(&mut dep_graph_decoder)?; - - if prev_commandline_args_hash != tcx.sess.opts.dep_tracking_hash() { - if tcx.sess.opts.debugging_opts.incremental_info { - eprintln!("incremental: completely ignoring cache because of \ - differing commandline arguments"); - } - // We can't reuse the cache, purge it. - debug!("decode_dep_graph: differing commandline arg hashes"); - for swp in work_products { - delete_dirty_work_product(tcx, swp); - } - - // No need to do any further work - return Ok(()); - } - - let serialized_dep_graph = SerializedDepGraph::decode(&mut dep_graph_decoder)?; - - // Compute the set of nodes from the old graph where some input - // has changed or been removed. - let dirty_raw_nodes = initial_dirty_nodes(tcx, - incremental_hashes_map, - &serialized_dep_graph.nodes, - &serialized_dep_graph.hashes); - let dirty_raw_nodes = transitive_dirty_nodes(&serialized_dep_graph, - dirty_raw_nodes); - - // Recreate the edges in the graph that are still clean. - let mut clean_work_products = FxHashSet(); - let mut dirty_work_products = FxHashSet(); // incomplete; just used to suppress debug output - for (source, targets) in serialized_dep_graph.edge_list_indices.iter_enumerated() { - let target_begin = targets.0 as usize; - let target_end = targets.1 as usize; - - for &target in &serialized_dep_graph.edge_list_data[target_begin .. target_end] { - process_edge(tcx, - source, - target, - &serialized_dep_graph.nodes, - &dirty_raw_nodes, - &mut clean_work_products, - &mut dirty_work_products, - &work_products); - } - } - - // Recreate bootstrap outputs, which are outputs that have no incoming edges - // (and hence cannot be dirty). - for bootstrap_output in &serialized_dep_graph.bootstrap_outputs { - if let DepKind::WorkProduct = bootstrap_output.kind { - let wp_id = WorkProductId::from_fingerprint(bootstrap_output.hash); - clean_work_products.insert(wp_id); - } - - tcx.dep_graph.add_node_directly(*bootstrap_output); - } - - // Add in work-products that are still clean, and delete those that are - // dirty. - reconcile_work_products(tcx, work_products, &clean_work_products); - - dirty_clean::check_dirty_clean_annotations(tcx, - &serialized_dep_graph.nodes, - &dirty_raw_nodes); - - load_prev_metadata_hashes(tcx, - &mut *incremental_hashes_map.prev_metadata_hashes.borrow_mut()); - Ok(()) -} - -/// Computes which of the original set of def-ids are dirty. Stored in -/// a bit vector where the index is the DefPathIndex. -fn initial_dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: &IncrementalHashesMap, - nodes: &IndexVec, - serialized_hashes: &[(DepNodeIndex, Fingerprint)]) - -> DirtyNodes { - let mut hcx = HashContext::new(tcx, incremental_hashes_map); - let mut dirty_nodes = FxHashMap(); - - for &(dep_node_index, prev_hash) in serialized_hashes { - let dep_node = nodes[dep_node_index]; - if does_still_exist(tcx, &dep_node) { - let current_hash = hcx.hash(&dep_node).unwrap_or_else(|| { - bug!("Cannot find current ICH for input that still exists?") - }); - - if current_hash == prev_hash { - debug!("initial_dirty_nodes: {:?} is clean (hash={:?})", - dep_node, - current_hash); - continue; - } - - if tcx.sess.opts.debugging_opts.incremental_dump_hash { - println!("node {:?} is dirty as hash is {:?}, was {:?}", - dep_node, - current_hash, - prev_hash); - } - - debug!("initial_dirty_nodes: {:?} is dirty as hash is {:?}, was {:?}", - dep_node, - current_hash, - prev_hash); - } else { - if tcx.sess.opts.debugging_opts.incremental_dump_hash { - println!("node {:?} is dirty as it was removed", dep_node); - } - - debug!("initial_dirty_nodes: {:?} is dirty as it was removed", dep_node); - } - dirty_nodes.insert(dep_node_index, dep_node_index); - } - - dirty_nodes -} - -fn transitive_dirty_nodes(serialized_dep_graph: &SerializedDepGraph, - mut dirty_nodes: DirtyNodes) - -> DirtyNodes -{ - let mut stack: Vec<(DepNodeIndex, DepNodeIndex)> = vec![]; - stack.extend(dirty_nodes.iter().map(|(&s, &b)| (s, b))); - while let Some((source, blame)) = stack.pop() { - // we know the source is dirty (because of the node `blame`)... - debug_assert!(dirty_nodes.contains_key(&source)); - - // ...so we dirty all the targets (with the same blame) - for &target in serialized_dep_graph.edge_targets_from(source) { - if !dirty_nodes.contains_key(&target) { - dirty_nodes.insert(target, blame); - stack.push((target, blame)); - } - } - } - dirty_nodes -} - -/// Go through the list of work-products produced in the previous run. -/// Delete any whose nodes have been found to be dirty or which are -/// otherwise no longer applicable. -fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - work_products: Vec, - clean_work_products: &FxHashSet) { - debug!("reconcile_work_products({:?})", work_products); - for swp in work_products { - if !clean_work_products.contains(&swp.id) { - debug!("reconcile_work_products: dep-node for {:?} is dirty", swp); - delete_dirty_work_product(tcx, swp); - } else { - let mut all_files_exist = true; - for &(_, ref file_name) in swp.work_product.saved_files.iter() { - let path = in_incr_comp_dir_sess(tcx.sess, file_name); - if !path.exists() { - all_files_exist = false; - - if tcx.sess.opts.debugging_opts.incremental_info { - eprintln!("incremental: could not find file for \ - up-to-date work product: {}", path.display()); - } - } - } - - if all_files_exist { - debug!("reconcile_work_products: all files for {:?} exist", swp); - tcx.dep_graph.insert_previous_work_product(&swp.id, swp.work_product); - } else { - debug!("reconcile_work_products: some file for {:?} does not exist", swp); - delete_dirty_work_product(tcx, swp); - } - } - } -} - fn delete_dirty_work_product(tcx: TyCtxt, swp: SerializedWorkProduct) { debug!("delete_dirty_work_product({:?})", swp); work_product::delete_workproduct_files(tcx.sess, &swp.work_product); } -fn load_prev_metadata_hashes(tcx: TyCtxt, - output: &mut FxHashMap) { +pub fn load_prev_metadata_hashes(tcx: TyCtxt) -> DefIdMap { + let mut output = DefIdMap(); + if !tcx.sess.opts.debugging_opts.query_dep_graph { // Previous metadata hashes are only needed for testing. - return + return output } debug!("load_prev_metadata_hashes() - Loading previous metadata hashes"); @@ -343,7 +120,7 @@ fn load_prev_metadata_hashes(tcx: TyCtxt, if !file_path.exists() { debug!("load_prev_metadata_hashes() - Couldn't find file containing \ hashes at `{}`", file_path.display()); - return + return output } debug!("load_prev_metadata_hashes() - File: {}", file_path.display()); @@ -353,12 +130,12 @@ fn load_prev_metadata_hashes(tcx: TyCtxt, Ok(None) => { debug!("load_prev_metadata_hashes() - File produced by incompatible \ compiler version: {}", file_path.display()); - return + return output } Err(err) => { debug!("load_prev_metadata_hashes() - Error reading file `{}`: {}", file_path.display(), err); - return + return output } }; @@ -382,78 +159,39 @@ fn load_prev_metadata_hashes(tcx: TyCtxt, debug!("load_prev_metadata_hashes() - successfully loaded {} hashes", serialized_hashes.index_map.len()); + + output } -fn process_edge<'a, 'tcx, 'edges>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, - source: DepNodeIndex, - target: DepNodeIndex, - nodes: &IndexVec, - dirty_raw_nodes: &DirtyNodes, - clean_work_products: &mut FxHashSet, - dirty_work_products: &mut FxHashSet, - work_products: &[SerializedWorkProduct]) -{ - // If the target is dirty, skip the edge. If this is an edge - // that targets a work-product, we can print the blame - // information now. - if let Some(&blame) = dirty_raw_nodes.get(&target) { - let target = nodes[target]; - if let DepKind::WorkProduct = target.kind { - if tcx.sess.opts.debugging_opts.incremental_info { - let wp_id = WorkProductId::from_fingerprint(target.hash); - - if dirty_work_products.insert(wp_id) { - // Try to reconstruct the human-readable version of the - // DepNode. This cannot be done for things that where - // removed. - let blame = nodes[blame]; - let blame_str = if let Some(def_id) = blame.extract_def_id(tcx) { - format!("{:?}({})", - blame.kind, - tcx.def_path(def_id).to_string(tcx)) - } else { - format!("{:?}", blame) - }; - - let wp = work_products.iter().find(|swp| swp.id == wp_id).unwrap(); - - eprintln!("incremental: module {:?} is dirty because \ - {:?} changed or was removed", - wp.work_product.cgu_name, - blame_str); - } - } - } - return; +pub fn load_dep_graph(sess: &Session) -> PreviousDepGraph { + let empty = PreviousDepGraph::new(SerializedDepGraph::new()); + + if sess.opts.incremental.is_none() { + return empty } - // At this point we have asserted that the target is clean -- otherwise, we - // would have hit the return above. We can do some further consistency - // checks based on this fact: + if let Some(bytes) = load_data(sess, &dep_graph_path(sess)) { + let mut decoder = Decoder::new(&bytes, 0); + let prev_commandline_args_hash = u64::decode(&mut decoder) + .expect("Error reading commandline arg hash from cached dep-graph"); - // We should never have an edge where the target is clean but the source - // was dirty. Otherwise something was wrong with the dirtying pass above: - debug_assert!(!dirty_raw_nodes.contains_key(&source)); + if prev_commandline_args_hash != sess.opts.dep_tracking_hash() { + if sess.opts.debugging_opts.incremental_info { + eprintln!("incremental: completely ignoring cache because of \ + differing commandline arguments"); + } + // We can't reuse the cache, purge it. + debug!("load_dep_graph_new: differing commandline arg hashes"); - // We also never should encounter an edge going from a removed input to a - // clean target because removing the input would have dirtied the input - // node and transitively dirtied the target. - debug_assert!(match nodes[source].kind { - DepKind::Hir | DepKind::HirBody | DepKind::MetaData => { - does_still_exist(tcx, &nodes[source]) + // No need to do any further work + return empty } - _ => true, - }); - if !dirty_raw_nodes.contains_key(&target) { - let target = nodes[target]; - let source = nodes[source]; - tcx.dep_graph.add_edge_directly(source, target); + let dep_graph = SerializedDepGraph::decode(&mut decoder) + .expect("Error reading cached dep-graph"); - if let DepKind::WorkProduct = target.kind { - let wp_id = WorkProductId::from_fingerprint(target.hash); - clean_work_products.insert(wp_id); - } + PreviousDepGraph::new(dep_graph) + } else { + empty } } diff --git a/src/librustc_incremental/persist/mod.rs b/src/librustc_incremental/persist/mod.rs index 5c1582daa7..88d49e7aed 100644 --- a/src/librustc_incremental/persist/mod.rs +++ b/src/librustc_incremental/persist/mod.rs @@ -15,16 +15,17 @@ mod data; mod dirty_clean; mod fs; -mod hash; mod load; -mod preds; mod save; mod work_product; mod file_format; +pub use self::fs::prepare_session_directory; pub use self::fs::finalize_session_directory; pub use self::fs::in_incr_comp_dir; pub use self::load::load_dep_graph; +pub use self::load::dep_graph_tcx_init; pub use self::save::save_dep_graph; pub use self::save::save_work_products; pub use self::work_product::save_trans_partition; +pub use self::work_product::delete_workproduct_files; diff --git a/src/librustc_incremental/persist/preds/compress/README.md b/src/librustc_incremental/persist/preds/compress/README.md deleted file mode 100644 index d2aa245c7c..0000000000 --- a/src/librustc_incremental/persist/preds/compress/README.md +++ /dev/null @@ -1,48 +0,0 @@ -Graph compression - -The graph compression algorithm is intended to remove and minimize the -size of the dependency graph so it can be saved, while preserving -everything we care about. In particular, given a set of input/output -nodes in the graph (which must be disjoint), we ensure that the set of -input nodes that can reach a given output node does not change, -although the intermediate nodes may change in various ways. In short, -the output nodes are intended to be the ones whose existence we care -about when we start up, because they have some associated data that we -will try to re-use (and hence if they are dirty, we have to throw that -data away). The other intermediate nodes don't really matter so much. - -### Overview - -The algorithm works as follows: - -1. Do a single walk of the graph to construct a DAG - - in this walk, we identify and unify all cycles, electing a representative "head" node - - this is done using the union-find implementation - - this code is found in the `classify` module -2. The result from this walk is a `Dag`: - - the set of SCCs, represented by the union-find table - - a set of edges in the new DAG, represented by: - - a vector of parent nodes for each child node - - a vector of cross-edges - - once these are canonicalized, some of these edges may turn out to be cyclic edges - (i.e., an edge A -> A where A is the head of some SCC) -3. We pass this `Dag` into the construct code, which then creates a - new graph. This graph has a smaller set of indices which includes - *at least* the inputs/outputs from the original graph, but may have - other nodes as well, if keeping them reduces the overall size of - the graph. - - This code is found in the `construct` module. - -### Some notes - -The input graph is assumed to have *read-by* edges. i.e., `A -> B` -means that the task B reads data from A. But the DAG defined by -classify is expressed in terms of *reads-from* edges, which are the -inverse. So `A -> B` is the same as `B -rf-> A`. *reads-from* edges -are more natural since we want to walk from the outputs to the inputs, -effectively. When we construct the final graph, we reverse these edges -back into the *read-by* edges common elsewhere. - - - - diff --git a/src/librustc_incremental/persist/preds/compress/classify/mod.rs b/src/librustc_incremental/persist/preds/compress/classify/mod.rs deleted file mode 100644 index aa29afd543..0000000000 --- a/src/librustc_incremental/persist/preds/compress/classify/mod.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! First phase. Detect cycles and cross-edges. - -use super::*; - -#[cfg(test)] -mod test; - -pub struct Classify<'a, 'g: 'a, N: 'g, I: 'a, O: 'a> - where N: Debug + Clone + 'g, - I: Fn(&N) -> bool, - O: Fn(&N) -> bool, -{ - r: &'a mut GraphReduce<'g, N, I, O>, - stack: Vec, - colors: Vec, - dag: Dag, -} - -#[derive(Copy, Clone, Debug, PartialEq)] -enum Color { - // not yet visited - White, - - // visiting; usize is index on stack - Grey(usize), - - // finished visiting - Black, -} - -impl<'a, 'g, N, I, O> Classify<'a, 'g, N, I, O> - where N: Debug + Clone + 'g, - I: Fn(&N) -> bool, - O: Fn(&N) -> bool, -{ - pub(super) fn new(r: &'a mut GraphReduce<'g, N, I, O>) -> Self { - Classify { - r, - colors: vec![Color::White; r.in_graph.len_nodes()], - stack: vec![], - dag: Dag { - parents: (0..r.in_graph.len_nodes()).map(|i| NodeIndex(i)).collect(), - cross_edges: vec![], - input_nodes: vec![], - output_nodes: vec![], - }, - } - } - - pub(super) fn walk(mut self) -> Dag { - for (index, node) in self.r.in_graph.all_nodes().iter().enumerate() { - if (self.r.is_output)(&node.data) { - let index = NodeIndex(index); - self.dag.output_nodes.push(index); - match self.colors[index.0] { - Color::White => self.open(index), - Color::Grey(_) => panic!("grey node but have not yet started a walk"), - Color::Black => (), // already visited, skip - } - } - } - - // At this point we've identifed all the cycles, and we've - // constructed a spanning tree over the original graph - // (encoded in `self.parents`) as well as a list of - // cross-edges that reflect additional edges from the DAG. - // - // If we converted each node to its `cycle-head` (a - // representative choice from each SCC, basically) and then - // take the union of `self.parents` and `self.cross_edges` - // (after canonicalization), that is basically our DAG. - // - // Note that both of those may well contain trivial `X -rf-> X` - // cycle edges after canonicalization, though. e.g., if you - // have a graph `{A -rf-> B, B -rf-> A}`, we will have unioned A and - // B, but A will also be B's parent (or vice versa), and hence - // when we canonicalize the parent edge it would become `A -rf-> - // A` (or `B -rf-> B`). - self.dag - } - - fn open(&mut self, node: NodeIndex) { - let index = self.stack.len(); - self.stack.push(node); - self.colors[node.0] = Color::Grey(index); - for child in self.r.inputs(node) { - self.walk_edge(node, child); - } - self.stack.pop().unwrap(); - self.colors[node.0] = Color::Black; - - if (self.r.is_input)(&self.r.in_graph.node_data(node)) { - // base inputs should have no inputs - assert!(self.r.inputs(node).next().is_none()); - debug!("input: `{:?}`", self.r.in_graph.node_data(node)); - self.dag.input_nodes.push(node); - } - } - - fn walk_edge(&mut self, parent: NodeIndex, child: NodeIndex) { - debug!("walk_edge: {:?} -rf-> {:?}, {:?}", - self.r.in_graph.node_data(parent), - self.r.in_graph.node_data(child), - self.colors[child.0]); - - // Ignore self-edges, just in case they exist. - if child == parent { - return; - } - - match self.colors[child.0] { - Color::White => { - // Not yet visited this node; start walking it. - assert_eq!(self.dag.parents[child.0], child); - self.dag.parents[child.0] = parent; - self.open(child); - } - - Color::Grey(stack_index) => { - // Back-edge; unify everything on stack between here and `stack_index` - // since we are all participating in a cycle - assert!(self.stack[stack_index] == child); - - for &n in &self.stack[stack_index..] { - debug!("cycle `{:?}` and `{:?}`", - self.r.in_graph.node_data(n), - self.r.in_graph.node_data(parent)); - self.r.mark_cycle(n, parent); - } - } - - Color::Black => { - // Cross-edge, record and ignore - self.dag.cross_edges.push((parent, child)); - debug!("cross-edge `{:?} -rf-> {:?}`", - self.r.in_graph.node_data(parent), - self.r.in_graph.node_data(child)); - } - } - } -} diff --git a/src/librustc_incremental/persist/preds/compress/classify/test.rs b/src/librustc_incremental/persist/preds/compress/classify/test.rs deleted file mode 100644 index ca26f714a2..0000000000 --- a/src/librustc_incremental/persist/preds/compress/classify/test.rs +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::*; - -#[test] -fn detect_cycles() { - let (graph, nodes) = graph! { - A -> C0, - A -> C1, - B -> C1, - C0 -> C1, - C1 -> C0, - C0 -> D, - C1 -> E, - }; - let inputs = ["A", "B"]; - let outputs = ["D", "E"]; - let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n)); - Classify::new(&mut reduce).walk(); - - assert!(!reduce.in_cycle(nodes("A"), nodes("C0"))); - assert!(!reduce.in_cycle(nodes("B"), nodes("C0"))); - assert!(reduce.in_cycle(nodes("C0"), nodes("C1"))); - assert!(!reduce.in_cycle(nodes("D"), nodes("C0"))); - assert!(!reduce.in_cycle(nodes("E"), nodes("C0"))); - assert!(!reduce.in_cycle(nodes("E"), nodes("A"))); -} - -/// Regr test for a bug where we forgot to pop nodes off of the stack -/// as we were walking. In this case, because edges are pushed to the front -/// of the list, we would visit OUT, then A, then IN, and then close IN (but forget -/// to POP. Then visit B, C, and then A, which would mark everything from A to C as -/// cycle. But since we failed to pop IN, the stack was `OUT, A, IN, B, C` so that -/// marked C and IN as being in a cycle. -#[test] -fn edge_order1() { - let (graph, nodes) = graph! { - A -> C, - C -> B, - B -> A, - IN -> B, - IN -> A, - A -> OUT, - }; - let inputs = ["IN"]; - let outputs = ["OUT"]; - let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n)); - Classify::new(&mut reduce).walk(); - - // A, B, and C are mutually in a cycle, but IN/OUT are not participating. - let names = ["A", "B", "C", "IN", "OUT"]; - let cycle_names = ["A", "B", "C"]; - for &i in &names { - for &j in names.iter().filter(|&&j| j != i) { - let in_cycle = cycle_names.contains(&i) && cycle_names.contains(&j); - assert_eq!(reduce.in_cycle(nodes(i), nodes(j)), in_cycle, - "cycle status for nodes {} and {} is incorrect", - i, j); - } - } -} - -/// Same as `edge_order1` but in reverse order so as to detect a failure -/// if we were to enqueue edges onto end of list instead. -#[test] -fn edge_order2() { - let (graph, nodes) = graph! { - A -> OUT, - IN -> A, - IN -> B, - B -> A, - C -> B, - A -> C, - }; - let inputs = ["IN"]; - let outputs = ["OUT"]; - let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n)); - Classify::new(&mut reduce).walk(); - - assert!(reduce.in_cycle(nodes("B"), nodes("C"))); - - assert!(!reduce.in_cycle(nodes("IN"), nodes("A"))); - assert!(!reduce.in_cycle(nodes("IN"), nodes("B"))); - assert!(!reduce.in_cycle(nodes("IN"), nodes("C"))); - assert!(!reduce.in_cycle(nodes("IN"), nodes("OUT"))); -} diff --git a/src/librustc_incremental/persist/preds/compress/construct.rs b/src/librustc_incremental/persist/preds/compress/construct.rs deleted file mode 100644 index 0ad8d17891..0000000000 --- a/src/librustc_incremental/persist/preds/compress/construct.rs +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Second phase. Construct new graph. The previous phase has -//! converted the input graph into a DAG by detecting and unifying -//! cycles. It provides us with the following (which is a -//! representation of the DAG): -//! -//! - SCCs, in the form of a union-find repr that can convert each node to -//! its *cycle head* (an arbitrarily chosen representative from the cycle) -//! - a vector of *leaf nodes*, just a convenience -//! - a vector of *parents* for each node (in some cases, nodes have no parents, -//! or their parent is another member of same cycle; in that case, the vector -//! will be stored `v[i] == i`, after canonicalization) -//! - a vector of *cross edges*, meaning add'l edges between graphs nodes beyond -//! the parents. - -use rustc_data_structures::fx::FxHashMap; - -use super::*; - -pub(super) fn construct_graph<'g, N, I, O>(r: &mut GraphReduce<'g, N, I, O>, dag: Dag) - -> Reduction<'g, N> - where N: Debug + Clone, I: Fn(&N) -> bool, O: Fn(&N) -> bool, -{ - let Dag { parents: old_parents, input_nodes, output_nodes, cross_edges } = dag; - let in_graph = r.in_graph; - - debug!("construct_graph"); - - // Create a canonical list of edges; this includes both parent and - // cross-edges. We store this in `(target -> Vec)` form. - // We call the first edge to any given target its "parent". - let mut edges = FxHashMap(); - let old_parent_edges = old_parents.iter().cloned().zip((0..).map(NodeIndex)); - for (source, target) in old_parent_edges.chain(cross_edges) { - debug!("original edge `{:?} -rf-> {:?}`", - in_graph.node_data(source), - in_graph.node_data(target)); - let source = r.cycle_head(source); - let target = r.cycle_head(target); - if source != target { - let v = edges.entry(target).or_insert(vec![]); - if !v.contains(&source) { - debug!("edge `{:?} -rf-> {:?}` is edge #{} with that target", - in_graph.node_data(source), - in_graph.node_data(target), - v.len()); - v.push(source); - } - } - } - let parent = |ni: NodeIndex| -> NodeIndex { - edges[&ni][0] - }; - - // `retain_map`: a map of those nodes that we will want to - // *retain* in the ultimate graph; the key is the node index in - // the old graph, the value is the node index in the new - // graph. These are nodes in the following categories: - // - // - inputs - // - work-products - // - targets of a cross-edge - // - // The first two categories hopefully make sense. We want the - // inputs so we can compare hashes later. We want the - // work-products so we can tell precisely when a given - // work-product is invalidated. But the last one isn't strictly - // needed; we keep cross-target edges so as to minimize the total - // graph size. - // - // Consider a graph like: - // - // WP0 -rf-> Y - // WP1 -rf-> Y - // Y -rf-> INPUT0 - // Y -rf-> INPUT1 - // Y -rf-> INPUT2 - // Y -rf-> INPUT3 - // - // Now if we were to remove Y, we would have a total of 8 edges: both WP0 and WP1 - // depend on INPUT0...INPUT3. As it is, we have 6 edges. - // - // NB: The current rules are not optimal. For example, given this - // input graph: - // - // OUT0 -rf-> X - // OUT1 -rf-> X - // X -rf -> INPUT0 - // - // we will preserve X because it has two "consumers" (OUT0 and - // OUT1). We could as easily skip it, but we'd have to tally up - // the number of input nodes that it (transitively) reaches, and I - // was too lazy to do so. This is the unit test `suboptimal`. - - let mut retain_map = FxHashMap(); - let mut new_graph = Graph::new(); - - { - // Start by adding start-nodes and inputs. - let retained_nodes = output_nodes.iter().chain(&input_nodes).map(|&n| r.cycle_head(n)); - - // Next add in targets of cross-edges. Due to the canonicalization, - // some of these may be self-edges or may may duplicate the parent - // edges, so ignore those. - let retained_nodes = retained_nodes.chain( - edges.iter() - .filter(|&(_, ref sources)| sources.len() > 1) - .map(|(&target, _)| target)); - - // Now create the new graph, adding in the entries from the map. - for n in retained_nodes { - retain_map.entry(n) - .or_insert_with(|| { - let data = in_graph.node_data(n); - debug!("retaining node `{:?}`", data); - new_graph.add_node(data) - }); - } - } - - // Given a cycle-head `ni`, converts it to the closest parent that has - // been retained in the output graph. - let retained_parent = |mut ni: NodeIndex| -> NodeIndex { - loop { - debug!("retained_parent({:?})", in_graph.node_data(ni)); - match retain_map.get(&ni) { - Some(&v) => return v, - None => ni = parent(ni), - } - } - }; - - // Now add in the edges into the graph. - for (&target, sources) in &edges { - if let Some(&r_target) = retain_map.get(&target) { - debug!("adding edges that target `{:?}`", in_graph.node_data(target)); - for &source in sources { - debug!("new edge `{:?} -rf-> {:?}`", - in_graph.node_data(source), - in_graph.node_data(target)); - let r_source = retained_parent(source); - - // NB. In the input graph, we have `a -> b` if b - // **reads from** a. But in the terminology of this - // code, we would describe that edge as `b -> a`, - // because we have edges *from* outputs *to* inputs. - // Therefore, when we create our new graph, we have to - // reverse the edge. - new_graph.add_edge(r_target, r_source, ()); - } - } else { - assert_eq!(sources.len(), 1); - } - } - - // One complication. In some cases, output nodes *may* participate in - // cycles. An example: - // - // [HIR0] [HIR1] - // | | - // v v - // TypeckClosureBody(X) -> ItemSignature(X::SomeClosureInX) - // | ^ | | - // | +-------------------------+ | - // | | - // v v - // Foo Bar - // - // In these cases, the output node may not wind up as the head - // of the cycle, in which case it would be absent from the - // final graph. We don't wish this to happen, therefore we go - // over the list of output nodes again and check for any that - // are not their own cycle-head. If we find such a node, we - // add it to the graph now with an edge from the cycle head. - // So the graph above could get transformed into this: - // - // [HIR0, HIR1] - // | - // v - // TypeckClosureBody(X) ItemSignature(X::SomeClosureInX) - // ^ | | - // +-------------------------+ | - // v - // [Foo, Bar] - // - // (Note that all the edges here are "read-by" edges, not - // "reads-from" edges.) - for &output_node in &output_nodes { - let head = r.cycle_head(output_node); - if output_node == head { - assert!(retain_map.contains_key(&output_node)); - } else { - assert!(!retain_map.contains_key(&output_node)); - let output_data = in_graph.node_data(output_node); - let new_node = new_graph.add_node(output_data); - let new_head_node = retain_map[&head]; - new_graph.add_edge(new_head_node, new_node, ()); - } - } - - // Finally, prepare a list of the input node indices as found in - // the new graph. Note that since all input nodes are leaves in - // the graph, they should never participate in a cycle. - let input_nodes = - input_nodes.iter() - .map(|&n| { - assert_eq!(r.cycle_head(n), n, "input node participating in a cycle"); - retain_map[&n] - }) - .collect(); - - Reduction { graph: new_graph, input_nodes: input_nodes } -} - diff --git a/src/librustc_incremental/persist/preds/compress/dag_id.rs b/src/librustc_incremental/persist/preds/compress/dag_id.rs deleted file mode 100644 index a286862e95..0000000000 --- a/src/librustc_incremental/persist/preds/compress/dag_id.rs +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc_data_structures::graph::NodeIndex; -use rustc_data_structures::unify::UnifyKey; - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct DagId { - index: u32, -} - -impl DagId { - pub fn from_input_index(n: NodeIndex) -> Self { - DagId { index: n.0 as u32 } - } - - pub fn as_input_index(&self) -> NodeIndex { - NodeIndex(self.index as usize) - } -} - -impl UnifyKey for DagId { - type Value = (); - - fn index(&self) -> u32 { - self.index - } - - fn from_index(u: u32) -> Self { - DagId { index: u } - } - - fn tag(_: Option) -> &'static str { - "DagId" - } -} diff --git a/src/librustc_incremental/persist/preds/compress/mod.rs b/src/librustc_incremental/persist/preds/compress/mod.rs deleted file mode 100644 index 974a2221a4..0000000000 --- a/src/librustc_incremental/persist/preds/compress/mod.rs +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Graph compression. See `README.md`. - -use rustc_data_structures::graph::{Graph, NodeIndex}; -use rustc_data_structures::unify::UnificationTable; -use std::fmt::Debug; - -#[cfg(test)] -#[macro_use] -mod test_macro; - -mod construct; - -mod classify; -use self::classify::Classify; - -mod dag_id; -use self::dag_id::DagId; - -#[cfg(test)] -mod test; - -pub fn reduce_graph(graph: &Graph, - is_input: I, - is_output: O) -> Reduction - where N: Debug + Clone, - I: Fn(&N) -> bool, - O: Fn(&N) -> bool, -{ - GraphReduce::new(graph, is_input, is_output).compute() -} - -pub struct Reduction<'q, N> where N: 'q + Debug + Clone { - pub graph: Graph<&'q N, ()>, - pub input_nodes: Vec, -} - -struct GraphReduce<'q, N, I, O> - where N: 'q + Debug + Clone, - I: Fn(&N) -> bool, - O: Fn(&N) -> bool, -{ - in_graph: &'q Graph, - unify: UnificationTable, - is_input: I, - is_output: O, -} - -struct Dag { - // The "parent" of a node is the node which reached it during the - // initial DFS. To encode the case of "no parent" (i.e., for the - // roots of the walk), we make `parents[i] == i` to start, which - // turns out be convenient. - parents: Vec, - - // Additional edges beyond the parents. - cross_edges: Vec<(NodeIndex, NodeIndex)>, - - // Nodes which we found that are considered "outputs" - output_nodes: Vec, - - // Nodes which we found that are considered "inputs" - input_nodes: Vec, -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash)] -struct DagNode { - in_index: NodeIndex -} - -impl<'q, N, I, O> GraphReduce<'q, N, I, O> - where N: Debug + Clone, - I: Fn(&N) -> bool, - O: Fn(&N) -> bool, -{ - fn new(in_graph: &'q Graph, is_input: I, is_output: O) -> Self { - let mut unify = UnificationTable::new(); - - // create a set of unification keys whose indices - // correspond to the indices from the input graph - for i in 0..in_graph.len_nodes() { - let k = unify.new_key(()); - assert!(k == DagId::from_input_index(NodeIndex(i))); - } - - GraphReduce { in_graph, unify, is_input, is_output } - } - - fn compute(mut self) -> Reduction<'q, N> { - let dag = Classify::new(&mut self).walk(); - construct::construct_graph(&mut self, dag) - } - - fn inputs(&self, in_node: NodeIndex) -> impl Iterator + 'q { - self.in_graph.predecessor_nodes(in_node) - } - - fn mark_cycle(&mut self, in_node1: NodeIndex, in_node2: NodeIndex) { - let dag_id1 = DagId::from_input_index(in_node1); - let dag_id2 = DagId::from_input_index(in_node2); - self.unify.union(dag_id1, dag_id2); - } - - /// Convert a dag-id into its cycle head representative. This will - /// be a no-op unless `in_node` participates in a cycle, in which - /// case a distinct node *may* be returned. - fn cycle_head(&mut self, in_node: NodeIndex) -> NodeIndex { - let i = DagId::from_input_index(in_node); - self.unify.find(i).as_input_index() - } - - #[cfg(test)] - fn in_cycle(&mut self, ni1: NodeIndex, ni2: NodeIndex) -> bool { - self.cycle_head(ni1) == self.cycle_head(ni2) - } -} diff --git a/src/librustc_incremental/persist/preds/compress/test.rs b/src/librustc_incremental/persist/preds/compress/test.rs deleted file mode 100644 index 1c5130845a..0000000000 --- a/src/librustc_incremental/persist/preds/compress/test.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::*; - -fn reduce(graph: &Graph<&'static str, ()>, - inputs: &[&'static str], - outputs: &[&'static str], - expected: &[&'static str]) -{ - let reduce = GraphReduce::new(&graph, - |n| inputs.contains(n), - |n| outputs.contains(n)); - let result = reduce.compute(); - let mut edges: Vec = - result.graph - .all_edges() - .iter() - .map(|edge| format!("{} -> {}", - result.graph.node_data(edge.source()), - result.graph.node_data(edge.target()))) - .collect(); - edges.sort(); - println!("{:#?}", edges); - assert_eq!(edges.len(), expected.len()); - for (expected, actual) in expected.iter().zip(&edges) { - assert_eq!(expected, actual); - } -} - -#[test] -fn test1() { - // +---------------+ - // | | - // | +--------|------+ - // | | v v - // [A] -> [C0] -> [C1] [D] - // [ ] <- [ ] -> [E] - // ^ - // [B] -------------+ - let (graph, _nodes) = graph! { - A -> C0, - A -> C1, - B -> C1, - C0 -> C1, - C1 -> C0, - C0 -> D, - C1 -> E, - }; - - // [A] -> [C1] -> [D] - // [B] -> [ ] -> [E] - reduce(&graph, &["A", "B"], &["D", "E"], &[ - "A -> C1", - "B -> C1", - "C1 -> D", - "C1 -> E", - ]); -} - -#[test] -fn test2() { - // +---------------+ - // | | - // | +--------|------+ - // | | v v - // [A] -> [C0] -> [C1] [D] -> [E] - // [ ] <- [ ] - // ^ - // [B] -------------+ - let (graph, _nodes) = graph! { - A -> C0, - A -> C1, - B -> C1, - C0 -> C1, - C1 -> C0, - C0 -> D, - D -> E, - }; - - // [A] -> [D] -> [E] - // [B] -> [ ] - reduce(&graph, &["A", "B"], &["D", "E"], &[ - "A -> D", - "B -> D", - "D -> E", - ]); -} - -#[test] -fn test2b() { - // Variant on test2 in which [B] is not - // considered an input. - let (graph, _nodes) = graph! { - A -> C0, - A -> C1, - B -> C1, - C0 -> C1, - C1 -> C0, - C0 -> D, - D -> E, - }; - - // [A] -> [D] -> [E] - reduce(&graph, &["A"], &["D", "E"], &[ - "A -> D", - "D -> E", - ]); -} - -#[test] -fn test3() { - - // Edges going *downwards*, so 0, 1 and 2 are inputs, - // while 7, 8, and 9 are outputs. - // - // 0 1 2 - // | \ / - // 3---+ | - // | | | - // | | | - // 4 5 6 - // \ / \ / \ - // | | | - // 7 8 9 - // - // Here the end result removes node 4, instead encoding an edge - // from n3 -> n7, but keeps nodes 5 and 6, as they are common - // inputs to nodes 8/9. - - let (graph, _nodes) = graph! { - n0 -> n3, - n3 -> n4, - n3 -> n5, - n4 -> n7, - n5 -> n7, - n5 -> n8, - n1 -> n6, - n2 -> n6, - n6 -> n8, - n6 -> n9, - }; - - reduce(&graph, &["n0", "n1", "n2"], &["n7", "n8", "n9"], &[ - "n0 -> n3", - "n1 -> n6", - "n2 -> n6", - "n3 -> n5", - "n3 -> n7", - "n5 -> n7", - "n5 -> n8", - "n6 -> n8", - "n6 -> n9" - ]); -} - -#[test] -fn test_cached_dfs_cyclic() { - - // 0 1 <---- 2 3 - // ^ | ^ ^ - // | v | | - // 4 ----> 5 ----> 6 ----> 7 - // ^ ^ ^ ^ - // | | | | - // 8 9 10 11 - - let (graph, _nodes) = graph! { - // edges from above diagram, in columns, top-to-bottom: - n4 -> n0, - n8 -> n4, - n4 -> n5, - n1 -> n5, - n9 -> n5, - n2 -> n1, - n5 -> n6, - n6 -> n2, - n10 -> n6, - n6 -> n7, - n7 -> n3, - n11 -> n7, - }; - - // 0 1 2 3 - // ^ ^ / ^ - // | |/ | - // 4 ----> 5 --------------+ - // ^ ^ \ | - // | | \ | - // 8 9 10 11 - - reduce(&graph, &["n8", "n9", "n10", "n11"], &["n0", "n1", "n2", "n3"], &[ - "n10 -> n5", - "n11 -> n3", - "n4 -> n0", - "n4 -> n5", - "n5 -> n1", - "n5 -> n2", - "n5 -> n3", - "n8 -> n4", - "n9 -> n5" - ]); -} - -/// Demonstrates the case where we don't reduce as much as we could. -#[test] -fn suboptimal() { - let (graph, _nodes) = graph! { - INPUT0 -> X, - X -> OUTPUT0, - X -> OUTPUT1, - }; - - reduce(&graph, &["INPUT0"], &["OUTPUT0", "OUTPUT1"], &[ - "INPUT0 -> X", - "X -> OUTPUT0", - "X -> OUTPUT1" - ]); -} - -#[test] -fn test_cycle_output() { - // +---------------+ - // | | - // | +--------|------+ - // | | v v - // [A] -> [C0] <-> [C1] <- [D] - // +----> [E] - // ^ - // [B] ----------------- ---+ - let (graph, _nodes) = graph! { - A -> C0, - A -> C1, - B -> E, - C0 -> C1, - C1 -> C0, - C0 -> D, - C1 -> E, - D -> C1, - }; - - // [A] -> [C0] --> [D] - // +----> [E] - // ^ - // [B] -------------+ - reduce(&graph, &["A", "B"], &["D", "E"], &[ - "A -> C0", - "B -> E", - "C0 -> D", - "C0 -> E", - ]); -} diff --git a/src/librustc_incremental/persist/preds/compress/test_macro.rs b/src/librustc_incremental/persist/preds/compress/test_macro.rs deleted file mode 100644 index 044b143e30..0000000000 --- a/src/librustc_incremental/persist/preds/compress/test_macro.rs +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -macro_rules! graph { - ($( $source:ident -> $target:ident, )*) => { - { - use $crate::rustc_data_structures::graph::{Graph, NodeIndex}; - use $crate::rustc_data_structures::fx::FxHashMap; - - let mut graph = Graph::new(); - let mut nodes: FxHashMap<&'static str, NodeIndex> = FxHashMap(); - - for &name in &[ $(stringify!($source), stringify!($target)),* ] { - let name: &'static str = name; - nodes.entry(name) - .or_insert_with(|| graph.add_node(name)); - } - - $( - { - let source = nodes[&stringify!($source)]; - let target = nodes[&stringify!($target)]; - graph.add_edge(source, target, ()); - } - )* - - let f = move |name: &'static str| -> NodeIndex { nodes[&name] }; - - (graph, f) - } - } -} diff --git a/src/librustc_incremental/persist/preds/mod.rs b/src/librustc_incremental/persist/preds/mod.rs deleted file mode 100644 index 5483134523..0000000000 --- a/src/librustc_incremental/persist/preds/mod.rs +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::dep_graph::{DepGraphQuery, DepNode, DepKind}; -use rustc::ich::Fingerprint; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::graph::{Graph, NodeIndex}; - -use super::hash::*; - -mod compress; - -/// A data-structure that makes it easy to enumerate the hashable -/// predecessors of any given dep-node. -pub struct Predecessors<'query> { - // A reduced version of the input graph that contains fewer nodes. - // This is intended to keep all of the base inputs (i.e., HIR - // nodes) and all of the "work-products" we may care about - // later. Other nodes may be retained if it keeps the overall size - // of the graph down. - pub reduced_graph: Graph<&'query DepNode, ()>, - - // These are output nodes that have no incoming edges. We have to - // track these specially because, when we load the data back up - // again, we want to make sure and recreate these nodes (we want - // to recreate the nodes where all incoming edges are clean; but - // since we ordinarily just serialize edges, we wind up just - // forgetting that bootstrap outputs even exist in that case.) - pub bootstrap_outputs: Vec<&'query DepNode>, - - // For the inputs (hir/foreign-metadata), we include hashes. - pub hashes: FxHashMap<&'query DepNode, Fingerprint>, -} - -impl<'q> Predecessors<'q> { - pub fn new(query: &'q DepGraphQuery, hcx: &mut HashContext) -> Self { - let tcx = hcx.tcx; - - // Find the set of "start nodes". These are nodes that we will - // possibly query later. - let is_output = |node: &DepNode| -> bool { - match node.kind { - DepKind::WorkProduct => true, - DepKind::MetaData => { - // We do *not* create dep-nodes for the current crate's - // metadata anymore, just for metadata that we import/read - // from other crates. - debug_assert!(!node.extract_def_id(tcx).unwrap().is_local()); - false - } - // if -Z query-dep-graph is passed, save more extended data - // to enable better unit testing - DepKind::TypeckTables => tcx.sess.opts.debugging_opts.query_dep_graph, - - _ => false, - } - }; - - // Reduce the graph to the most important nodes. - let compress::Reduction { graph, input_nodes } = - compress::reduce_graph(&query.graph, - |n| HashContext::is_hashable(tcx, n), - |n| is_output(n)); - - let mut hashes = FxHashMap(); - for input_index in input_nodes { - let input = *graph.node_data(input_index); - debug!("computing hash for input node `{:?}`", input); - hashes.entry(input) - .or_insert_with(|| hcx.hash(input).unwrap()); - } - - if tcx.sess.opts.debugging_opts.query_dep_graph { - // Not all inputs might have been reachable from an output node, - // but we still want their hash for our unit tests. - let hir_nodes = query.graph.all_nodes().iter().filter_map(|node| { - match node.data.kind { - DepKind::Hir => Some(&node.data), - _ => None, - } - }); - - for node in hir_nodes { - hashes.entry(node) - .or_insert_with(|| hcx.hash(node).unwrap()); - } - } - - let bootstrap_outputs: Vec<&'q DepNode> = - (0 .. graph.len_nodes()) - .map(NodeIndex) - .filter(|&n| graph.incoming_edges(n).next().is_none()) - .map(|n| *graph.node_data(n)) - .filter(|n| is_output(n)) - .collect(); - - Predecessors { - reduced_graph: graph, - bootstrap_outputs, - hashes, - } - } -} diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs index 339e2bdc15..4919870fcd 100644 --- a/src/librustc_incremental/persist/save.rs +++ b/src/librustc_incremental/persist/save.rs @@ -8,33 +8,31 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc::dep_graph::DepNode; +use rustc::dep_graph::DepGraph; use rustc::hir::def_id::DefId; use rustc::hir::svh::Svh; use rustc::ich::Fingerprint; use rustc::middle::cstore::EncodedMetadataHashes; use rustc::session::Session; use rustc::ty::TyCtxt; +use rustc::util::common::time; +use rustc::util::nodemap::DefIdMap; use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::graph; -use rustc_data_structures::indexed_vec::IndexVec; use rustc_serialize::Encodable as RustcEncodable; use rustc_serialize::opaque::Encoder; use std::io::{self, Cursor, Write}; use std::fs::{self, File}; use std::path::PathBuf; -use IncrementalHashesMap; use super::data::*; -use super::hash::*; -use super::preds::*; use super::fs::*; use super::dirty_clean; use super::file_format; use super::work_product; +use super::load::load_prev_metadata_hashes; + pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: IncrementalHashesMap, metadata_hashes: &EncodedMetadataHashes, svh: Svh) { debug!("save_dep_graph()"); @@ -44,20 +42,16 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return; } - let query = tcx.dep_graph.query(); - - if tcx.sess.opts.debugging_opts.incremental_info { - eprintln!("incremental: {} nodes in dep-graph", query.graph.len_nodes()); - eprintln!("incremental: {} edges in dep-graph", query.graph.len_edges()); - } + // We load the previous metadata hashes now before overwriting the file + // (if we need them for testing). + let prev_metadata_hashes = if tcx.sess.opts.debugging_opts.query_dep_graph { + load_prev_metadata_hashes(tcx) + } else { + DefIdMap() + }; - let mut hcx = HashContext::new(tcx, &incremental_hashes_map); - let preds = Predecessors::new(&query, &mut hcx); let mut current_metadata_hashes = FxHashMap(); - // IMPORTANT: We are saving the metadata hashes *before* the dep-graph, - // since metadata-encoding might add new entries to the - // DefIdDirectory (which is saved in the dep-graph file). if sess.opts.debugging_opts.incremental_cc || sess.opts.debugging_opts.query_dep_graph { save_in(sess, @@ -69,31 +63,33 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, e)); } - save_in(sess, - dep_graph_path(sess), - |e| encode_dep_graph(tcx, &preds, e)); + time(sess.time_passes(), "persist dep-graph", || { + save_in(sess, + dep_graph_path(sess), + |e| encode_dep_graph(tcx, e)); + }); - let prev_metadata_hashes = incremental_hashes_map.prev_metadata_hashes.borrow(); + dirty_clean::check_dirty_clean_annotations(tcx); dirty_clean::check_dirty_clean_metadata(tcx, - &*prev_metadata_hashes, + &prev_metadata_hashes, ¤t_metadata_hashes); } -pub fn save_work_products(sess: &Session) { +pub fn save_work_products(sess: &Session, dep_graph: &DepGraph) { if sess.opts.incremental.is_none() { return; } debug!("save_work_products()"); - let _ignore = sess.dep_graph.in_ignore(); + let _ignore = dep_graph.in_ignore(); let path = work_products_path(sess); - save_in(sess, path, |e| encode_work_products(sess, e)); + save_in(sess, path, |e| encode_work_products(dep_graph, e)); // We also need to clean out old work-products, as not all of them are // deleted during invalidation. Some object files don't change their // content, they are just not needed anymore. - let new_work_products = sess.dep_graph.work_products(); - let previous_work_products = sess.dep_graph.previous_work_products(); + let new_work_products = dep_graph.work_products(); + let previous_work_products = dep_graph.previous_work_products(); for (id, wp) in previous_work_products.iter() { if !new_work_products.contains_key(id) { @@ -166,118 +162,25 @@ fn save_in(sess: &Session, path_buf: PathBuf, encode: F) } } -pub fn encode_dep_graph(tcx: TyCtxt, - preds: &Predecessors, - encoder: &mut Encoder) - -> io::Result<()> { +fn encode_dep_graph(tcx: TyCtxt, + encoder: &mut Encoder) + -> io::Result<()> { // First encode the commandline arguments hash tcx.sess.opts.dep_tracking_hash().encode(encoder)?; - // NB: We rely on this Vec being indexable by reduced_graph's NodeIndex. - let mut nodes: IndexVec = preds - .reduced_graph - .all_nodes() - .iter() - .map(|node| node.data.clone()) - .collect(); - - let mut edge_list_indices = IndexVec::with_capacity(nodes.len()); - let mut edge_list_data = Vec::with_capacity(preds.reduced_graph.len_edges()); - - for node_index in 0 .. nodes.len() { - let start = edge_list_data.len() as u32; - - for target in preds.reduced_graph.successor_nodes(graph::NodeIndex(node_index)) { - edge_list_data.push(DepNodeIndex::new(target.node_id())); - } - - let end = edge_list_data.len() as u32; - debug_assert_eq!(node_index, edge_list_indices.len()); - edge_list_indices.push((start, end)); - } - - // Let's make sure we had no overflow there. - assert!(edge_list_data.len() <= ::std::u32::MAX as usize); - // Check that we have a consistent number of edges. - assert_eq!(edge_list_data.len(), preds.reduced_graph.len_edges()); - - let bootstrap_outputs = preds.bootstrap_outputs - .iter() - .map(|dep_node| (**dep_node).clone()) - .collect(); - - // Next, build the map of content hashes. To this end, we need to transform - // the (DepNode -> Fingerprint) map that we have into a - // (DepNodeIndex -> Fingerprint) map. This may necessitate adding nodes back - // to the dep-graph that have been filtered out during reduction. - let content_hashes = { - // We have to build a (DepNode -> DepNodeIndex) map. We over-allocate a - // little because we expect some more nodes to be added. - let capacity = (nodes.len() * 120) / 100; - let mut node_to_index = FxHashMap::with_capacity_and_hasher(capacity, - Default::default()); - // Add the nodes we already have in the graph. - node_to_index.extend(nodes.iter_enumerated() - .map(|(index, &node)| (node, index))); - - let mut content_hashes = Vec::with_capacity(preds.hashes.len()); - - for (&&dep_node, &hash) in preds.hashes.iter() { - let dep_node_index = *node_to_index - .entry(dep_node) - .or_insert_with(|| { - // There is no DepNodeIndex for this DepNode yet. This - // happens when the DepNode got filtered out during graph - // reduction. Since we have a content hash for the DepNode, - // we add it back to the graph. - let next_index = nodes.len(); - nodes.push(dep_node); - - debug_assert_eq!(next_index, edge_list_indices.len()); - // Push an empty list of edges - edge_list_indices.push((0,0)); - - DepNodeIndex::new(next_index) - }); - - content_hashes.push((dep_node_index, hash)); - } - - content_hashes - }; - - let graph = SerializedDepGraph { - nodes, - edge_list_indices, - edge_list_data, - bootstrap_outputs, - hashes: content_hashes, - }; - // Encode the graph data. - graph.encode(encoder)?; - - if tcx.sess.opts.debugging_opts.incremental_info { - eprintln!("incremental: {} nodes in reduced dep-graph", graph.nodes.len()); - eprintln!("incremental: {} edges in serialized dep-graph", graph.edge_list_data.len()); - eprintln!("incremental: {} hashes in serialized dep-graph", graph.hashes.len()); - } - - if tcx.sess.opts.debugging_opts.incremental_dump_hash { - for (dep_node, hash) in &preds.hashes { - println!("ICH for {:?} is {}", dep_node, hash); - } - } + let serialized_graph = tcx.dep_graph.serialize(); + serialized_graph.encode(encoder)?; Ok(()) } -pub fn encode_metadata_hashes(tcx: TyCtxt, - svh: Svh, - metadata_hashes: &EncodedMetadataHashes, - current_metadata_hashes: &mut FxHashMap, - encoder: &mut Encoder) - -> io::Result<()> { +fn encode_metadata_hashes(tcx: TyCtxt, + svh: Svh, + metadata_hashes: &EncodedMetadataHashes, + current_metadata_hashes: &mut FxHashMap, + encoder: &mut Encoder) + -> io::Result<()> { assert_eq!(metadata_hashes.hashes.len(), metadata_hashes.hashes.iter().map(|x| (x.def_index, ())).collect::>().len()); @@ -309,8 +212,9 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, Ok(()) } -pub fn encode_work_products(sess: &Session, encoder: &mut Encoder) -> io::Result<()> { - let work_products: Vec<_> = sess.dep_graph +fn encode_work_products(dep_graph: &DepGraph, + encoder: &mut Encoder) -> io::Result<()> { + let work_products: Vec<_> = dep_graph .work_products() .iter() .map(|(id, work_product)| { diff --git a/src/librustc_incremental/persist/work_product.rs b/src/librustc_incremental/persist/work_product.rs index e20d7a006b..9865e8fb17 100644 --- a/src/librustc_incremental/persist/work_product.rs +++ b/src/librustc_incremental/persist/work_product.rs @@ -11,7 +11,7 @@ //! This module contains files for saving intermediate work-products. use persist::fs::*; -use rustc::dep_graph::{WorkProduct, WorkProductId}; +use rustc::dep_graph::{WorkProduct, WorkProductId, DepGraph}; use rustc::session::Session; use rustc::session::config::OutputType; use rustc::util::fs::link_or_copy; @@ -19,12 +19,11 @@ use std::path::PathBuf; use std::fs as std_fs; pub fn save_trans_partition(sess: &Session, + dep_graph: &DepGraph, cgu_name: &str, - partition_hash: u64, files: &[(OutputType, PathBuf)]) { - debug!("save_trans_partition({:?},{},{:?})", + debug!("save_trans_partition({:?},{:?})", cgu_name, - partition_hash, files); if sess.opts.incremental.is_none() { return; @@ -56,11 +55,10 @@ pub fn save_trans_partition(sess: &Session, let work_product = WorkProduct { cgu_name: cgu_name.to_string(), - input_hash: partition_hash, saved_files, }; - sess.dep_graph.insert_work_product(&work_product_id, work_product); + dep_graph.insert_work_product(&work_product_id, work_product); } pub fn delete_workproduct_files(sess: &Session, work_product: &WorkProduct) { diff --git a/src/librustc_lint/bad_style.rs b/src/librustc_lint/bad_style.rs index d4b8f0a492..027ed84faf 100644 --- a/src/librustc_lint/bad_style.rs +++ b/src/librustc_lint/bad_style.rs @@ -13,6 +13,7 @@ use rustc::ty; use lint::{LateContext, LintContext, LintArray}; use lint::{LintPass, LateLintPass}; +use syntax::abi::Abi; use syntax::ast; use syntax::attr; use syntax_pos::Span; @@ -250,7 +251,11 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonSnakeCase { _ => (), } } - FnKind::ItemFn(name, ..) => { + FnKind::ItemFn(name, _, _, _, abi, _, attrs) => { + // Skip foreign-ABI #[no_mangle] functions (Issue #31924) + if abi != Abi::Rust && attr::find_by_name(attrs, "no_mangle").is_some() { + return; + } self.check_snake_case(cx, "function", &name.as_str(), Some(span)) } FnKind::Closure(_) => (), @@ -278,7 +283,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonSnakeCase { fn check_lifetime_def(&mut self, cx: &LateContext, t: &hir::LifetimeDef) { self.check_snake_case(cx, "lifetime", - &t.lifetime.name.as_str(), + &t.lifetime.name.name().as_str(), Some(t.lifetime.span)); } diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 3bfe2897de..7f331418d4 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -44,7 +44,7 @@ use std::collections::HashSet; use syntax::ast; use syntax::attr; use syntax::feature_gate::{AttributeGate, AttributeType, Stability, deprecated_attributes}; -use syntax_pos::Span; +use syntax_pos::{Span, SyntaxContext}; use syntax::symbol::keywords; use rustc::hir::{self, PatKind}; @@ -75,9 +75,15 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for WhileTrue { if let hir::ExprWhile(ref cond, ..) = e.node { if let hir::ExprLit(ref lit) = cond.node { if let ast::LitKind::Bool(true) = lit.node { - cx.span_lint(WHILE_TRUE, - e.span, - "denote infinite loops with loop { ... }"); + if lit.span.ctxt() == SyntaxContext::empty() { + let msg = "denote infinite loops with `loop { ... }`"; + let mut err = cx.struct_span_lint(WHILE_TRUE, e.span, msg); + let condition_span = cx.tcx.sess.codemap().def_span(e.span); + err.span_suggestion_short(condition_span, + "use `loop`", + "loop".to_owned()); + err.emit(); + } } } } @@ -545,7 +551,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDebugImplementations { _ => return, } - let debug = match cx.tcx.lang_items.debug_trait() { + let debug = match cx.tcx.lang_items().debug_trait() { Some(debug) => debug, None => return, }; @@ -648,10 +654,11 @@ impl EarlyLintPass for DeprecatedAttr { ref name, ref reason, _) = g { - cx.span_lint(DEPRECATED, - attr.span, - &format!("use of deprecated attribute `{}`: {}. See {}", - name, reason, link)); + let msg = format!("use of deprecated attribute `{}`: {}. See {}", + name, reason, link); + let mut err = cx.struct_span_lint(DEPRECATED, attr.span, &msg); + err.span_suggestion_short(attr.span, "remove this attribute", "".to_owned()); + err.emit(); } return; } @@ -850,23 +857,25 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnconditionalRecursion { } visited.insert(cfg_id); - let node_id = cfg.graph.node_data(idx).id(); - // is this a recursive call? - let self_recursive = if node_id != ast::DUMMY_NODE_ID { - match method { + let local_id = cfg.graph.node_data(idx).id(); + if local_id != hir::DUMMY_ITEM_LOCAL_ID { + let node_id = cx.tcx.hir.hir_to_node_id(hir::HirId { + owner: body.value.hir_id.owner, + local_id + }); + let self_recursive = match method { Some(ref method) => expr_refers_to_this_method(cx, method, node_id), None => expr_refers_to_this_fn(cx, id, node_id), + }; + if self_recursive { + self_call_spans.push(cx.tcx.hir.span(node_id)); + // this is a self call, so we shouldn't explore past + // this node in the CFG. + continue; } - } else { - false - }; - if self_recursive { - self_call_spans.push(cx.tcx.hir.span(node_id)); - // this is a self call, so we shouldn't explore past - // this node in the CFG. - continue; } + // add the successors of this node to explore the graph further. for (_, edge) in cfg.graph.outgoing_edges(idx) { let target_idx = edge.target(); @@ -909,7 +918,10 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnconditionalRecursion { } else { return false; }; - def.def_id() == cx.tcx.hir.local_def_id(fn_id) + match def { + Def::Local(..) | Def::Upvar(..) => false, + _ => def.def_id() == cx.tcx.hir.local_def_id(fn_id) + } } _ => false, } @@ -1059,8 +1071,9 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for PluginAsLibrary { _ => return, }; - let prfn = match cx.sess().cstore.extern_mod_stmt_cnum(it.id) { - Some(cnum) => cx.sess().cstore.plugin_registrar_fn(cnum), + let def_id = cx.tcx.hir.local_def_id(it.id); + let prfn = match cx.tcx.extern_mod_stmt_cnum(def_id) { + Some(cnum) => cx.tcx.plugin_registrar_fn(cnum), None => { // Probably means we aren't linking the crate for some reason. // diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index 5ef277f02a..fbf993f455 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -128,7 +128,6 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { NonSnakeCase, NonUpperCaseGlobals, NonShorthandFieldPatterns, - UnusedUnsafe, UnsafeCode, UnusedMut, UnusedAllocation, diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index cdf0996796..d3a5d52b29 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -76,7 +76,7 @@ impl LintPass for TypeLimits { } impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { - fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { + fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, e: &'tcx hir::Expr) { match e.node { hir::ExprUnary(hir::UnNeg, ref expr) => { // propagate negation, if the negation itself isn't negated @@ -93,8 +93,8 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { if binop.node.is_shift() { let opt_ty_bits = match cx.tables.node_id_to_type(l.hir_id).sty { - ty::TyInt(t) => Some(int_ty_bits(t, cx.sess().target.int_type)), - ty::TyUint(t) => Some(uint_ty_bits(t, cx.sess().target.uint_type)), + ty::TyInt(t) => Some(int_ty_bits(t, cx.sess().target.isize_ty)), + ty::TyUint(t) => Some(uint_ty_bits(t, cx.sess().target.usize_ty)), _ => None, }; @@ -117,7 +117,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { cx.param_env.and(substs), cx.tables); match const_cx.eval(&r) { - Ok(ConstVal::Integral(i)) => { + Ok(&ty::Const { val: ConstVal::Integral(i), .. }) => { i.is_negative() || i.to_u64() .map(|i| i >= bits) @@ -141,7 +141,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { ast::LitKind::Int(v, ast::LitIntType::Signed(_)) | ast::LitKind::Int(v, ast::LitIntType::Unsuffixed) => { let int_type = if let ast::IntTy::Is = t { - cx.sess().target.int_type + cx.sess().target.isize_ty } else { t }; @@ -164,7 +164,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { } ty::TyUint(t) => { let uint_type = if let ast::UintTy::Us = t { - cx.sess().target.uint_type + cx.sess().target.usize_ty } else { t }; @@ -250,9 +250,9 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { } } - fn int_ty_bits(int_ty: ast::IntTy, target_int_ty: ast::IntTy) -> u64 { + fn int_ty_bits(int_ty: ast::IntTy, isize_ty: ast::IntTy) -> u64 { match int_ty { - ast::IntTy::Is => int_ty_bits(target_int_ty, target_int_ty), + ast::IntTy::Is => int_ty_bits(isize_ty, isize_ty), ast::IntTy::I8 => 8, ast::IntTy::I16 => 16 as u64, ast::IntTy::I32 => 32, @@ -261,9 +261,9 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { } } - fn uint_ty_bits(uint_ty: ast::UintTy, target_uint_ty: ast::UintTy) -> u64 { + fn uint_ty_bits(uint_ty: ast::UintTy, usize_ty: ast::UintTy) -> u64 { match uint_ty { - ast::UintTy::Us => uint_ty_bits(target_uint_ty, target_uint_ty), + ast::UintTy::Us => uint_ty_bits(usize_ty, usize_ty), ast::UintTy::U8 => 8, ast::UintTy::U16 => 16, ast::UintTy::U32 => 32, @@ -543,6 +543,18 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { `u32` or `libc::wchar_t` should be used") } + ty::TyInt(ast::IntTy::I128) => { + FfiUnsafe("found Rust type `i128` in foreign module, but \ + 128-bit integers don't currently have a known \ + stable ABI") + } + + ty::TyUint(ast::UintTy::U128) => { + FfiUnsafe("found Rust type `u128` in foreign module, but \ + 128-bit integers don't currently have a known \ + stable ABI") + } + // Primitive types with a stable representation. ty::TyBool | ty::TyInt(..) | ty::TyUint(..) | ty::TyFloat(..) | ty::TyNever => FfiSafe, @@ -607,6 +619,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { ty::TyInfer(..) | ty::TyError | ty::TyClosure(..) | + ty::TyGenerator(..) | ty::TyProjection(..) | ty::TyAnon(..) | ty::TyFnDef(..) => bug!("Unexpected type in foreign function"), diff --git a/src/librustc_lint/unused.rs b/src/librustc_lint/unused.rs index cbc4ebe90f..e2ade19b6e 100644 --- a/src/librustc_lint/unused.rs +++ b/src/librustc_lint/unused.rs @@ -22,6 +22,8 @@ use syntax::attr; use syntax::feature_gate::{BUILTIN_ATTRIBUTES, AttributeType}; use syntax::symbol::keywords; use syntax::ptr::P; +use syntax::print::pprust; +use syntax::util::parser; use syntax_pos::Span; use rustc_back::slice; @@ -69,9 +71,13 @@ impl UnusedMut { let used_mutables = cx.tcx.used_mut_nodes.borrow(); for (_, v) in &mutables { if !v.iter().any(|e| used_mutables.contains(e)) { - cx.span_lint(UNUSED_MUT, - cx.tcx.hir.span(v[0]), - "variable does not need to be mutable"); + let binding_span = cx.tcx.hir.span(v[0]); + let mut_span = cx.tcx.sess.codemap().span_until_char(binding_span, ' '); + let mut err = cx.struct_span_lint(UNUSED_MUT, + binding_span, + "variable does not need to be mutable"); + err.span_suggestion_short(mut_span, "remove this `mut`", "".to_owned()); + err.emit(); } } } @@ -84,20 +90,12 @@ impl LintPass for UnusedMut { } impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedMut { - fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { - if let hir::ExprMatch(_, ref arms, _) = e.node { - for a in arms { - self.check_unused_mut_pat(cx, &a.pats) - } - } + fn check_arm(&mut self, cx: &LateContext, a: &hir::Arm) { + self.check_unused_mut_pat(cx, &a.pats) } - fn check_stmt(&mut self, cx: &LateContext, s: &hir::Stmt) { - if let hir::StmtDecl(ref d, _) = s.node { - if let hir::DeclLocal(ref l) = d.node { - self.check_unused_mut_pat(cx, slice::ref_slice(&l.pat)); - } - } + fn check_local(&mut self, cx: &LateContext, l: &hir::Local) { + self.check_unused_mut_pat(cx, slice::ref_slice(&l.pat)); } fn check_fn(&mut self, @@ -160,6 +158,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { }; let mut fn_warned = false; + let mut op_warned = false; if cx.tcx.sess.features.borrow().fn_must_use { let maybe_def = match expr.node { hir::ExprCall(ref callee, _) => { @@ -179,9 +178,24 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { let def_id = def.def_id(); fn_warned = check_must_use(cx, def_id, s.span, "return value of "); } + + if let hir::ExprBinary(bin_op, ..) = expr.node { + match bin_op.node { + // Hardcoding the comparison operators here seemed more + // expedient than the refactoring that would be needed to + // look up the `#[must_use]` attribute which does exist on + // the comparison trait methods + hir::BiEq | hir::BiLt | hir::BiLe | hir::BiNe | hir::BiGe | hir::BiGt => { + let msg = "unused comparison which must be used"; + cx.span_lint(UNUSED_MUST_USE, expr.span, msg); + op_warned = true; + }, + _ => {}, + } + } } - if !(ty_warned || fn_warned) { + if !(ty_warned || fn_warned || op_warned) { cx.span_lint(UNUSED_RESULTS, s.span, "unused result"); } @@ -204,60 +218,6 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { } } -declare_lint! { - pub UNUSED_UNSAFE, - Warn, - "unnecessary use of an `unsafe` block" -} - -#[derive(Copy, Clone)] -pub struct UnusedUnsafe; - -impl LintPass for UnusedUnsafe { - fn get_lints(&self) -> LintArray { - lint_array!(UNUSED_UNSAFE) - } -} - -impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedUnsafe { - fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { - /// Return the NodeId for an enclosing scope that is also `unsafe` - fn is_enclosed(cx: &LateContext, id: ast::NodeId) -> Option<(String, ast::NodeId)> { - let parent_id = cx.tcx.hir.get_parent_node(id); - if parent_id != id { - if cx.tcx.used_unsafe.borrow().contains(&parent_id) { - Some(("block".to_string(), parent_id)) - } else if let Some(hir::map::NodeItem(&hir::Item { - node: hir::ItemFn(_, hir::Unsafety::Unsafe, _, _, _, _), - .. - })) = cx.tcx.hir.find(parent_id) { - Some(("fn".to_string(), parent_id)) - } else { - is_enclosed(cx, parent_id) - } - } else { - None - } - } - if let hir::ExprBlock(ref blk) = e.node { - // Don't warn about generated blocks, that'll just pollute the output. - if blk.rules == hir::UnsafeBlock(hir::UserProvided) && - !cx.tcx.used_unsafe.borrow().contains(&blk.id) { - - let mut db = cx.struct_span_lint(UNUSED_UNSAFE, blk.span, - "unnecessary `unsafe` block"); - - db.span_label(blk.span, "unnecessary `unsafe` block"); - if let Some((kind, id)) = is_enclosed(cx, blk.id) { - db.span_note(cx.tcx.hir.span(id), - &format!("because it's nested under this `unsafe` {}", kind)); - } - db.emit(); - } - } - } -} - declare_lint! { pub PATH_STATEMENTS, Warn, @@ -367,45 +327,43 @@ impl UnusedParens { msg: &str, struct_lit_needs_parens: bool) { if let ast::ExprKind::Paren(ref inner) = value.node { - let necessary = struct_lit_needs_parens && contains_exterior_struct_lit(&inner); + let necessary = struct_lit_needs_parens && + parser::contains_exterior_struct_lit(&inner); if !necessary { - cx.span_lint(UNUSED_PARENS, - value.span, - &format!("unnecessary parentheses around {}", msg)) - } - } - - /// Expressions that syntactically contain an "exterior" struct - /// literal i.e. not surrounded by any parens or other - /// delimiters, e.g. `X { y: 1 }`, `X { y: 1 }.method()`, `foo - /// == X { y: 1 }` and `X { y: 1 } == foo` all do, but `(X { - /// y: 1 }) == foo` does not. - fn contains_exterior_struct_lit(value: &ast::Expr) -> bool { - match value.node { - ast::ExprKind::Struct(..) => true, - - ast::ExprKind::Assign(ref lhs, ref rhs) | - ast::ExprKind::AssignOp(_, ref lhs, ref rhs) | - ast::ExprKind::Binary(_, ref lhs, ref rhs) => { - // X { y: 1 } + X { y: 2 } - contains_exterior_struct_lit(&lhs) || contains_exterior_struct_lit(&rhs) - } - ast::ExprKind::Unary(_, ref x) | - ast::ExprKind::Cast(ref x, _) | - ast::ExprKind::Type(ref x, _) | - ast::ExprKind::Field(ref x, _) | - ast::ExprKind::TupField(ref x, _) | - ast::ExprKind::Index(ref x, _) => { - // &X { y: 1 }, X { y: 1 }.y - contains_exterior_struct_lit(&x) - } - - ast::ExprKind::MethodCall(.., ref exprs) => { - // X { y: 1 }.bar(...) - contains_exterior_struct_lit(&exprs[0]) - } - - _ => false, + let span_msg = format!("unnecessary parentheses around {}", msg); + let mut err = cx.struct_span_lint(UNUSED_PARENS, + value.span, + &span_msg); + // Remove exactly one pair of parentheses (rather than naïvely + // stripping all paren characters) + let mut ate_left_paren = false; + let mut ate_right_paren = false; + let parens_removed = pprust::expr_to_string(value) + .trim_matches(|c| { + match c { + '(' => { + if ate_left_paren { + false + } else { + ate_left_paren = true; + true + } + }, + ')' => { + if ate_right_paren { + false + } else { + ate_right_paren = true; + true + } + }, + _ => false, + } + }).to_owned(); + err.span_suggestion_short(value.span, + "remove these parentheses", + parens_removed); + err.emit(); } } } diff --git a/src/librustc_llvm/Cargo.toml b/src/librustc_llvm/Cargo.toml index f2eea014dd..de5add56b7 100644 --- a/src/librustc_llvm/Cargo.toml +++ b/src/librustc_llvm/Cargo.toml @@ -13,8 +13,9 @@ crate-type = ["dylib"] static-libstdcpp = [] [dependencies] -rustc_bitflags = { path = "../librustc_bitflags" } +bitflags = "1.0" +rustc_cratesio_shim = { path = "../librustc_cratesio_shim" } [build-dependencies] build_helper = { path = "../build_helper" } -gcc = "0.3.50" +cc = "1.0" diff --git a/src/librustc_llvm/build.rs b/src/librustc_llvm/build.rs index 3f0f536960..75efe135f6 100644 --- a/src/librustc_llvm/build.rs +++ b/src/librustc_llvm/build.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -extern crate gcc; +extern crate cc; extern crate build_helper; use std::process::Command; @@ -88,7 +88,7 @@ fn main() { let is_crossed = target != host; let mut optional_components = - vec!["x86", "arm", "aarch64", "mips", "powerpc", "pnacl", + vec!["x86", "arm", "aarch64", "mips", "powerpc", "systemz", "jsbackend", "webassembly", "msp430", "sparc", "nvptx"]; let mut version_cmd = Command::new(&llvm_config); @@ -115,6 +115,7 @@ fn main() { "linker", "asmparser", "mcjit", + "lto", "interpreter", "instrumentation"]; @@ -136,7 +137,8 @@ fn main() { let mut cmd = Command::new(&llvm_config); cmd.arg("--cxxflags"); let cxxflags = output(&mut cmd); - let mut cfg = gcc::Config::new(); + let mut cfg = cc::Build::new(); + cfg.warnings(false); for flag in cxxflags.split_whitespace() { // Ignore flags like `-m64` when we're doing a cross build if is_crossed && flag.starts_with("-m") { diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 20735af69e..3399bf2acd 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -345,6 +345,20 @@ pub enum PassKind { Module, } +/// LLVMRustThinLTOData +pub enum ThinLTOData {} + +/// LLVMRustThinLTOBuffer +pub enum ThinLTOBuffer {} + +/// LLVMRustThinLTOModule +#[repr(C)] +pub struct ThinLTOModule { + pub identifier: *const c_char, + pub data: *const u8, + pub len: usize, +} + // Opaque pointer types #[allow(missing_copy_implementations)] pub enum Module_opaque {} @@ -454,30 +468,31 @@ pub mod debuginfo { // These values **must** match with LLVMRustDIFlags!! bitflags! { #[repr(C)] - #[derive(Debug, Default)] - flags DIFlags: ::libc::uint32_t { - const FlagZero = 0, - const FlagPrivate = 1, - const FlagProtected = 2, - const FlagPublic = 3, - const FlagFwdDecl = (1 << 2), - const FlagAppleBlock = (1 << 3), - const FlagBlockByrefStruct = (1 << 4), - const FlagVirtual = (1 << 5), - const FlagArtificial = (1 << 6), - const FlagExplicit = (1 << 7), - const FlagPrototyped = (1 << 8), - const FlagObjcClassComplete = (1 << 9), - const FlagObjectPointer = (1 << 10), - const FlagVector = (1 << 11), - const FlagStaticMember = (1 << 12), - const FlagLValueReference = (1 << 13), - const FlagRValueReference = (1 << 14), - const FlagMainSubprogram = (1 << 21), + #[derive(Default)] + pub struct DIFlags: ::libc::uint32_t { + const FlagZero = 0; + const FlagPrivate = 1; + const FlagProtected = 2; + const FlagPublic = 3; + const FlagFwdDecl = (1 << 2); + const FlagAppleBlock = (1 << 3); + const FlagBlockByrefStruct = (1 << 4); + const FlagVirtual = (1 << 5); + const FlagArtificial = (1 << 6); + const FlagExplicit = (1 << 7); + const FlagPrototyped = (1 << 8); + const FlagObjcClassComplete = (1 << 9); + const FlagObjectPointer = (1 << 10); + const FlagVector = (1 << 11); + const FlagStaticMember = (1 << 12); + const FlagLValueReference = (1 << 13); + const FlagRValueReference = (1 << 14); + const FlagMainSubprogram = (1 << 21); } } } +pub enum ModuleBuffer {} // Link to our native llvm bindings (things that we need to use the C++ api // for) and because llvm is written in C++ we need to link against libstdc++ @@ -1270,6 +1285,9 @@ extern "C" { PM: PassManagerRef, Internalize: Bool, RunInliner: Bool); + pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager( + PMB: PassManagerBuilderRef, + PM: PassManagerRef) -> bool; // Stuff that's in rustllvm/ because it's not upstream yet. @@ -1609,6 +1627,7 @@ extern "C" { pub fn LLVMRustSetNormalizedTarget(M: ModuleRef, triple: *const c_char); pub fn LLVMRustAddAlwaysInlinePass(P: PassManagerBuilderRef, AddLifetimes: bool); pub fn LLVMRustLinkInExternalBitcode(M: ModuleRef, bc: *const c_char, len: size_t) -> bool; + pub fn LLVMRustLinkInParsedExternalBitcode(M: ModuleRef, M: ModuleRef) -> bool; pub fn LLVMRustRunRestrictionPass(M: ModuleRef, syms: *const *const c_char, len: size_t); pub fn LLVMRustMarkAllFunctionsNounwind(M: ModuleRef); @@ -1678,4 +1697,48 @@ extern "C" { pub fn LLVMRustSetComdat(M: ModuleRef, V: ValueRef, Name: *const c_char); pub fn LLVMRustUnsetComdat(V: ValueRef); pub fn LLVMRustSetModulePIELevel(M: ModuleRef); + pub fn LLVMRustModuleBufferCreate(M: ModuleRef) -> *mut ModuleBuffer; + pub fn LLVMRustModuleBufferPtr(p: *const ModuleBuffer) -> *const u8; + pub fn LLVMRustModuleBufferLen(p: *const ModuleBuffer) -> usize; + pub fn LLVMRustModuleBufferFree(p: *mut ModuleBuffer); + pub fn LLVMRustModuleCost(M: ModuleRef) -> u64; + + pub fn LLVMRustThinLTOAvailable() -> bool; + pub fn LLVMRustWriteThinBitcodeToFile(PMR: PassManagerRef, + M: ModuleRef, + BC: *const c_char) -> bool; + pub fn LLVMRustThinLTOBufferCreate(M: ModuleRef) -> *mut ThinLTOBuffer; + pub fn LLVMRustThinLTOBufferFree(M: *mut ThinLTOBuffer); + pub fn LLVMRustThinLTOBufferPtr(M: *const ThinLTOBuffer) -> *const c_char; + pub fn LLVMRustThinLTOBufferLen(M: *const ThinLTOBuffer) -> size_t; + pub fn LLVMRustCreateThinLTOData( + Modules: *const ThinLTOModule, + NumModules: c_uint, + PreservedSymbols: *const *const c_char, + PreservedSymbolsLen: c_uint, + ) -> *mut ThinLTOData; + pub fn LLVMRustPrepareThinLTORename( + Data: *const ThinLTOData, + Module: ModuleRef, + ) -> bool; + pub fn LLVMRustPrepareThinLTOResolveWeak( + Data: *const ThinLTOData, + Module: ModuleRef, + ) -> bool; + pub fn LLVMRustPrepareThinLTOInternalize( + Data: *const ThinLTOData, + Module: ModuleRef, + ) -> bool; + pub fn LLVMRustPrepareThinLTOImport( + Data: *const ThinLTOData, + Module: ModuleRef, + ) -> bool; + pub fn LLVMRustFreeThinLTOData(Data: *mut ThinLTOData); + pub fn LLVMRustParseBitcodeForThinLTO( + Context: ContextRef, + Data: *const u8, + len: usize, + Identifier: *const c_char, + ) -> ModuleRef; + pub fn LLVMGetModuleIdentifier(M: ModuleRef, size: *mut usize) -> *const c_char; } diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs index 3c3e627ee4..98172bca17 100644 --- a/src/librustc_llvm/lib.rs +++ b/src/librustc_llvm/lib.rs @@ -24,10 +24,13 @@ #![feature(link_args)] #![feature(static_nobundle)] -extern crate libc; +// See librustc_cratesio_shim/Cargo.toml for a comment explaining this. +#[allow(unused_extern_crates)] +extern crate rustc_cratesio_shim; + #[macro_use] -#[no_link] -extern crate rustc_bitflags; +extern crate bitflags; +extern crate libc; pub use self::IntPredicate::*; pub use self::RealPredicate::*; @@ -343,10 +346,6 @@ pub fn initialize_available_targets() { LLVMInitializePowerPCTargetMC, LLVMInitializePowerPCAsmPrinter, LLVMInitializePowerPCAsmParser); - init_target!(llvm_component = "pnacl", - LLVMInitializePNaClTargetInfo, - LLVMInitializePNaClTarget, - LLVMInitializePNaClTargetMC); init_target!(llvm_component = "systemz", LLVMInitializeSystemZTargetInfo, LLVMInitializeSystemZTarget, diff --git a/src/librustc_metadata/astencode.rs b/src/librustc_metadata/astencode.rs index b1b3e92347..d9ab2562ef 100644 --- a/src/librustc_metadata/astencode.rs +++ b/src/librustc_metadata/astencode.rs @@ -14,7 +14,10 @@ use isolated_encoder::IsolatedEncoder; use schema::*; use rustc::hir; -use rustc::ty; +use rustc::ty::{self, TyCtxt}; + +use rustc::ich::Fingerprint; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; #[derive(RustcEncodable, RustcDecodable)] pub struct Ast<'tcx> { @@ -22,20 +25,37 @@ pub struct Ast<'tcx> { pub tables: Lazy>, pub nested_bodies: LazySeq, pub rvalue_promotable_to_static: bool, + pub stable_bodies_hash: Fingerprint, } impl_stable_hash_for!(struct Ast<'tcx> { body, tables, nested_bodies, - rvalue_promotable_to_static + rvalue_promotable_to_static, + stable_bodies_hash }); impl<'a, 'b, 'tcx> IsolatedEncoder<'a, 'b, 'tcx> { pub fn encode_body(&mut self, body_id: hir::BodyId) -> Lazy> { let body = self.tcx.hir.body(body_id); - let lazy_body = self.lazy(body); + // In order to avoid having to hash hir::Bodies from extern crates, we + // hash them here, during export, and store the hash with metadata. + let stable_bodies_hash = { + let mut hcx = self.tcx.create_stable_hashing_context(); + let mut hasher = StableHasher::new(); + + hcx.while_hashing_hir_bodies(true, |hcx| { + hcx.while_hashing_spans(false, |hcx| { + body.hash_stable(hcx, &mut hasher); + }); + }); + + hasher.finish() + }; + + let lazy_body = self.lazy(body); let tables = self.tcx.body_tables(body_id); let lazy_tables = self.lazy(tables); @@ -54,12 +74,13 @@ impl<'a, 'b, 'tcx> IsolatedEncoder<'a, 'b, 'tcx> { tables: lazy_tables, nested_bodies: lazy_nested_bodies, rvalue_promotable_to_static, + stable_bodies_hash, }) } } struct NestedBodyCollector<'a, 'tcx: 'a> { - tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, bodies_found: Vec<&'tcx hir::Body>, } diff --git a/src/librustc_metadata/creader.rs b/src/librustc_metadata/creader.rs index 4c25d03b44..39bdf88925 100644 --- a/src/librustc_metadata/creader.rs +++ b/src/librustc_metadata/creader.rs @@ -12,9 +12,10 @@ use cstore::{self, CStore, CrateSource, MetadataBlob}; use locator::{self, CratePaths}; -use schema::{CrateRoot, Tracked}; +use native_libs::relevant_lib; +use schema::CrateRoot; -use rustc::hir::def_id::{CrateNum, DefIndex}; +use rustc::hir::def_id::{CrateNum, DefIndex, CRATE_DEF_INDEX}; use rustc::hir::svh::Svh; use rustc::middle::allocator::AllocatorKind; use rustc::middle::cstore::DepKind; @@ -23,10 +24,9 @@ use rustc::session::config::{Sanitizer, self}; use rustc_back::PanicStrategy; use rustc::session::search_paths::PathKind; use rustc::middle; -use rustc::middle::cstore::{CrateStore, validate_crate_name, ExternCrate}; +use rustc::middle::cstore::{validate_crate_name, ExternCrate}; use rustc::util::common::record_time; use rustc::util::nodemap::FxHashSet; -use rustc::middle::cstore::NativeLibrary; use rustc::hir::map::Definitions; use std::cell::{RefCell, Cell}; @@ -36,10 +36,8 @@ use std::rc::Rc; use std::{cmp, fs}; use syntax::ast; -use syntax::abi::Abi; use syntax::attr; use syntax::ext::base::SyntaxExtension; -use syntax::feature_gate::{self, GateIssue}; use syntax::symbol::Symbol; use syntax::visit; use syntax_pos::{Span, DUMMY_SP}; @@ -81,56 +79,6 @@ struct ExternCrateInfo { dep_kind: DepKind, } -fn register_native_lib(sess: &Session, - cstore: &CStore, - span: Option, - lib: NativeLibrary) { - if lib.name.as_str().is_empty() { - match span { - Some(span) => { - struct_span_err!(sess, span, E0454, - "#[link(name = \"\")] given with empty name") - .span_label(span, "empty name given") - .emit(); - } - None => { - sess.err("empty library name given via `-l`"); - } - } - return - } - let is_osx = sess.target.target.options.is_like_osx; - if lib.kind == cstore::NativeFramework && !is_osx { - let msg = "native frameworks are only available on macOS targets"; - match span { - Some(span) => span_err!(sess, span, E0455, "{}", msg), - None => sess.err(msg), - } - } - if lib.cfg.is_some() && !sess.features.borrow().link_cfg { - feature_gate::emit_feature_err(&sess.parse_sess, - "link_cfg", - span.unwrap(), - GateIssue::Language, - "is feature gated"); - } - if lib.kind == cstore::NativeStaticNobundle && !sess.features.borrow().static_nobundle { - feature_gate::emit_feature_err(&sess.parse_sess, - "static_nobundle", - span.unwrap(), - GateIssue::Language, - "kind=\"static-nobundle\" is feature gated"); - } - cstore.add_used_library(lib); -} - -fn relevant_lib(sess: &Session, lib: &NativeLibrary) -> bool { - match lib.cfg { - Some(ref cfg) => attr::cfg_matches(cfg, &sess.parse_sess, None), - None => true, - } -} - // Extra info about a crate loaded for plugins or exported macros. struct ExtensionCrate { metadata: PMDSource, @@ -218,7 +166,7 @@ impl<'a> CrateLoader<'a> { // We're also sure to compare *paths*, not actual byte slices. The // `source` stores paths which are normalized which may be different // from the strings on the command line. - let source = self.cstore.used_crate_source(cnum); + let source = &self.cstore.get_crate_data(cnum).source; if let Some(locs) = self.sess.opts.externs.get(&*name.as_str()) { let found = locs.iter().any(|l| { let l = fs::canonicalize(l).ok(); @@ -313,16 +261,13 @@ impl<'a> CrateLoader<'a> { crate_root.def_path_table.decode(&metadata) }); - let exported_symbols = crate_root.exported_symbols - .map(|x| x.decode(&metadata).collect()); + let exported_symbols = crate_root.exported_symbols.decode(&metadata).collect(); let trait_impls = crate_root .impls - .map(|impls| { - impls.decode(&metadata) - .map(|trait_impls| (trait_impls.trait_id, trait_impls.impls)) - .collect() - }); + .decode(&metadata) + .map(|trait_impls| (trait_impls.trait_id, trait_impls.impls)) + .collect(); let mut cmeta = cstore::CrateMetadata { name, @@ -347,23 +292,20 @@ impl<'a> CrateLoader<'a> { }, // Initialize this with an empty set. The field is populated below // after we were able to deserialize its contents. - dllimport_foreign_items: Tracked::new(FxHashSet()), + dllimport_foreign_items: FxHashSet(), }; - let dllimports: Tracked> = cmeta + let dllimports: FxHashSet<_> = cmeta .root .native_libraries - .map(|native_libraries| { - let native_libraries: Vec<_> = native_libraries.decode(&cmeta) - .collect(); - native_libraries - .iter() - .filter(|lib| relevant_lib(self.sess, lib) && - lib.kind == cstore::NativeLibraryKind::NativeUnknown) - .flat_map(|lib| lib.foreign_items.iter()) - .map(|id| *id) - .collect() - }); + .decode(&cmeta) + .filter(|lib| relevant_lib(self.sess, lib) && + lib.kind == cstore::NativeLibraryKind::NativeUnknown) + .flat_map(|lib| { + assert!(lib.foreign_items.iter().all(|def_id| def_id.krate == cnum)); + lib.foreign_items.into_iter().map(|def_id| def_id.index) + }) + .collect(); cmeta.dllimport_foreign_items = dllimports; @@ -521,7 +463,6 @@ impl<'a> CrateLoader<'a> { // We map 0 and all other holes in the map to our parent crate. The "additional" // self-dependencies should be harmless. ::std::iter::once(krate).chain(crate_root.crate_deps - .get_untracked() .decode(metadata) .map(|dep| { debug!("resolving dep crate {} hash: `{}`", dep.name, dep.hash); @@ -721,33 +662,6 @@ impl<'a> CrateLoader<'a> { } } - fn get_foreign_items_of_kind(&self, kind: cstore::NativeLibraryKind) -> Vec { - let mut items = vec![]; - let libs = self.cstore.get_used_libraries(); - for lib in libs.borrow().iter() { - if relevant_lib(self.sess, lib) && lib.kind == kind { - items.extend(&lib.foreign_items); - } - } - items - } - - fn register_statically_included_foreign_items(&mut self) { - for id in self.get_foreign_items_of_kind(cstore::NativeStatic) { - self.cstore.add_statically_included_foreign_item(id); - } - for id in self.get_foreign_items_of_kind(cstore::NativeStaticNobundle) { - self.cstore.add_statically_included_foreign_item(id); - } - } - - fn register_dllimport_foreign_items(&mut self) { - let mut dllimports = self.cstore.dllimport_foreign_items.borrow_mut(); - for id in self.get_foreign_items_of_kind(cstore::NativeUnknown) { - dllimports.insert(id); - } - } - fn inject_panic_runtime(&mut self, krate: &ast::Crate) { // If we're only compiling an rlib, then there's no need to select a // panic runtime, so we just skip this section entirely. @@ -771,16 +685,14 @@ impl<'a> CrateLoader<'a> { let mut needs_panic_runtime = attr::contains_name(&krate.attrs, "needs_panic_runtime"); - let dep_graph = &self.sess.dep_graph; - self.cstore.iter_crate_data(|cnum, data| { needs_panic_runtime = needs_panic_runtime || - data.needs_panic_runtime(dep_graph); - if data.is_panic_runtime(dep_graph) { + data.needs_panic_runtime(); + if data.is_panic_runtime() { // Inject a dependency from all #![needs_panic_runtime] to this // #![panic_runtime] crate. self.inject_dependency_if(cnum, "a panic runtime", - &|data| data.needs_panic_runtime(dep_graph)); + &|data| data.needs_panic_runtime()); runtime_found = runtime_found || data.dep_kind.get() == DepKind::Explicit; } }); @@ -816,11 +728,11 @@ impl<'a> CrateLoader<'a> { // Sanity check the loaded crate to ensure it is indeed a panic runtime // and the panic strategy is indeed what we thought it was. - if !data.is_panic_runtime(dep_graph) { + if !data.is_panic_runtime() { self.sess.err(&format!("the crate `{}` is not a panic runtime", name)); } - if data.panic_strategy(dep_graph) != desired_strategy { + if data.panic_strategy() != desired_strategy { self.sess.err(&format!("the crate `{}` does not have the panic \ strategy `{}`", name, desired_strategy.desc())); @@ -828,7 +740,7 @@ impl<'a> CrateLoader<'a> { self.sess.injected_panic_runtime.set(Some(cnum)); self.inject_dependency_if(cnum, "a panic runtime", - &|data| data.needs_panic_runtime(dep_graph)); + &|data| data.needs_panic_runtime()); } fn inject_sanitizer_runtime(&mut self) { @@ -923,7 +835,7 @@ impl<'a> CrateLoader<'a> { PathKind::Crate, dep_kind); // Sanity check the loaded crate to ensure it is indeed a sanitizer runtime - if !data.is_sanitizer_runtime(&self.sess.dep_graph) { + if !data.is_sanitizer_runtime() { self.sess.err(&format!("the crate `{}` is not a sanitizer runtime", name)); } @@ -944,7 +856,7 @@ impl<'a> CrateLoader<'a> { PathKind::Crate, dep_kind); // Sanity check the loaded crate to ensure it is indeed a profiler runtime - if !data.is_profiler_runtime(&self.sess.dep_graph) { + if !data.is_profiler_runtime() { self.sess.err(&format!("the crate `profiler_builtins` is not \ a profiler runtime")); } @@ -962,9 +874,8 @@ impl<'a> CrateLoader<'a> { // written down in liballoc. let mut needs_allocator = attr::contains_name(&krate.attrs, "needs_allocator"); - let dep_graph = &self.sess.dep_graph; self.cstore.iter_crate_data(|_, data| { - needs_allocator = needs_allocator || data.needs_allocator(dep_graph); + needs_allocator = needs_allocator || data.needs_allocator(); }); if !needs_allocator { return @@ -996,14 +907,13 @@ impl<'a> CrateLoader<'a> { // First up we check for global allocators. Look at the crate graph here // and see what's a global allocator, including if we ourselves are a // global allocator. - let dep_graph = &self.sess.dep_graph; let mut global_allocator = if has_global_allocator { Some(None) } else { None }; self.cstore.iter_crate_data(|_, data| { - if !data.has_global_allocator(dep_graph) { + if !data.has_global_allocator() { return } match global_allocator { @@ -1037,59 +947,80 @@ impl<'a> CrateLoader<'a> { // (need_lib_alloc and prefer_dynamic) then we select `None`, and if the // exe allocation crate doesn't exist for this target then we also // select `None`. - let exe_allocation_crate = + let exe_allocation_crate_data = if need_lib_alloc && !self.sess.opts.cg.prefer_dynamic { None } else { - self.sess.target.target.options.exe_allocation_crate.as_ref() + self.sess + .target + .target + .options + .exe_allocation_crate + .as_ref() + .map(|name| { + // We've determined that we're injecting an "exe allocator" which means + // that we're going to load up a whole new crate. An example of this is + // that we're producing a normal binary on Linux which means we need to + // load the `alloc_jemalloc` crate to link as an allocator. + let name = Symbol::intern(name); + let (cnum, data) = self.resolve_crate(&None, + name, + name, + None, + DUMMY_SP, + PathKind::Crate, + DepKind::Implicit); + self.sess.injected_allocator.set(Some(cnum)); + data + }) }; - match exe_allocation_crate { - // We've determined that we're injecting an "exe allocator" which - // means that we're going to load up a whole new crate. An example - // of this is that we're producing a normal binary on Linux which - // means we need to load the `alloc_jemalloc` crate to link as an - // allocator. - Some(krate) => { - self.sess.allocator_kind.set(Some(AllocatorKind::DefaultExe)); - let name = Symbol::intern(krate); - let dep_kind = DepKind::Implicit; - let (cnum, _data) = - self.resolve_crate(&None, - name, - name, - None, - DUMMY_SP, - PathKind::Crate, dep_kind); - self.sess.injected_allocator.set(Some(cnum)); - // self.cstore.iter_crate_data(|_, data| { - // if !data.needs_allocator(dep_graph) { - // return - // } - // data.cnum_map.borrow_mut().push(cnum); - // }); + let allocation_crate_data = exe_allocation_crate_data.or_else(|| { + if attr::contains_name(&krate.attrs, "default_lib_allocator") { + // Prefer self as the allocator if there's a collision + return None; } - // We're not actually going to inject an allocator, we're going to // require that something in our crate graph is the default lib // allocator. This is typically libstd, so this'll rarely be an // error. - None => { - self.sess.allocator_kind.set(Some(AllocatorKind::DefaultLib)); - let mut found_lib_allocator = - attr::contains_name(&krate.attrs, "default_lib_allocator"); - self.cstore.iter_crate_data(|_, data| { - if !found_lib_allocator { - if data.has_default_lib_allocator(dep_graph) { - found_lib_allocator = true; - } + let mut allocator = None; + self.cstore.iter_crate_data(|_, data| { + if allocator.is_none() && data.has_default_lib_allocator() { + allocator = Some(data.clone()); + } + }); + allocator + }); + + match allocation_crate_data { + Some(data) => { + // We have an allocator. We detect separately what kind it is, to allow for some + // flexibility in misconfiguration. + let attrs = data.get_item_attrs(CRATE_DEF_INDEX); + let kind_interned = attr::first_attr_value_str_by_name(&attrs, "rustc_alloc_kind") + .map(Symbol::as_str); + let kind_str = kind_interned + .as_ref() + .map(|s| s as &str); + let alloc_kind = match kind_str { + None | + Some("lib") => AllocatorKind::DefaultLib, + Some("exe") => AllocatorKind::DefaultExe, + Some(other) => { + self.sess.err(&format!("Allocator kind {} not known", other)); + return; } - }); - if found_lib_allocator { - return + }; + self.sess.allocator_kind.set(Some(alloc_kind)); + }, + None => { + if !attr::contains_name(&krate.attrs, "default_lib_allocator") { + self.sess.err("no #[default_lib_allocator] found but one is \ + required; is libstd not linked?"); + return; } - self.sess.err("no #[default_lib_allocator] found but one is \ - required; is libstd not linked?"); + self.sess.allocator_kind.set(Some(AllocatorKind::DefaultLib)); } } @@ -1152,84 +1083,6 @@ impl<'a> CrateLoader<'a> { } } -impl<'a> CrateLoader<'a> { - pub fn preprocess(&mut self, krate: &ast::Crate) { - for attr in &krate.attrs { - if attr.path == "link_args" { - if let Some(linkarg) = attr.value_str() { - self.cstore.add_used_link_args(&linkarg.as_str()); - } - } - } - } - - fn process_foreign_mod(&mut self, i: &ast::Item, fm: &ast::ForeignMod, - definitions: &Definitions) { - if fm.abi == Abi::Rust || fm.abi == Abi::RustIntrinsic || fm.abi == Abi::PlatformIntrinsic { - return; - } - - // First, add all of the custom #[link_args] attributes - for m in i.attrs.iter().filter(|a| a.check_name("link_args")) { - if let Some(linkarg) = m.value_str() { - self.cstore.add_used_link_args(&linkarg.as_str()); - } - } - - // Next, process all of the #[link(..)]-style arguments - for m in i.attrs.iter().filter(|a| a.check_name("link")) { - let items = match m.meta_item_list() { - Some(item) => item, - None => continue, - }; - let kind = items.iter().find(|k| { - k.check_name("kind") - }).and_then(|a| a.value_str()).map(Symbol::as_str); - let kind = match kind.as_ref().map(|s| &s[..]) { - Some("static") => cstore::NativeStatic, - Some("static-nobundle") => cstore::NativeStaticNobundle, - Some("dylib") => cstore::NativeUnknown, - Some("framework") => cstore::NativeFramework, - Some(k) => { - struct_span_err!(self.sess, m.span, E0458, - "unknown kind: `{}`", k) - .span_label(m.span, "unknown kind").emit(); - cstore::NativeUnknown - } - None => cstore::NativeUnknown - }; - let n = items.iter().find(|n| { - n.check_name("name") - }).and_then(|a| a.value_str()); - let n = match n { - Some(n) => n, - None => { - struct_span_err!(self.sess, m.span, E0459, - "#[link(...)] specified without `name = \"foo\"`") - .span_label(m.span, "missing `name` argument").emit(); - Symbol::intern("foo") - } - }; - let cfg = items.iter().find(|k| { - k.check_name("cfg") - }).and_then(|a| a.meta_item_list()); - let cfg = cfg.map(|list| { - list[0].meta_item().unwrap().clone() - }); - let foreign_items = fm.items.iter() - .map(|it| definitions.opt_def_index(it.id).unwrap()) - .collect(); - let lib = NativeLibrary { - name: n, - kind, - cfg, - foreign_items, - }; - register_native_lib(self.sess, self.cstore, Some(m.span), lib); - } - } -} - impl<'a> middle::cstore::CrateLoader for CrateLoader<'a> { fn postprocess(&mut self, krate: &ast::Crate) { // inject the sanitizer runtime before the allocator runtime because all @@ -1242,72 +1095,10 @@ impl<'a> middle::cstore::CrateLoader for CrateLoader<'a> { if log_enabled!(log::LogLevel::Info) { dump_crates(&self.cstore); } - - // Process libs passed on the command line - // First, check for errors - let mut renames = FxHashSet(); - for &(ref name, ref new_name, _) in &self.sess.opts.libs { - if let &Some(ref new_name) = new_name { - if new_name.is_empty() { - self.sess.err( - &format!("an empty renaming target was specified for library `{}`",name)); - } else if !self.cstore.get_used_libraries().borrow().iter() - .any(|lib| lib.name == name as &str) { - self.sess.err(&format!("renaming of the library `{}` was specified, \ - however this crate contains no #[link(...)] \ - attributes referencing this library.", name)); - } else if renames.contains(name) { - self.sess.err(&format!("multiple renamings were specified for library `{}` .", - name)); - } else { - renames.insert(name); - } - } - } - // Update kind and, optionally, the name of all native libaries - // (there may be more than one) with the specified name. - for &(ref name, ref new_name, kind) in &self.sess.opts.libs { - let mut found = false; - for lib in self.cstore.get_used_libraries().borrow_mut().iter_mut() { - if lib.name == name as &str { - let mut changed = false; - if let Some(k) = kind { - lib.kind = k; - changed = true; - } - if let &Some(ref new_name) = new_name { - lib.name = Symbol::intern(new_name); - changed = true; - } - if !changed { - self.sess.warn(&format!("redundant linker flag specified for library `{}`", - name)); - } - - found = true; - } - } - if !found { - // Add if not found - let new_name = new_name.as_ref().map(|s| &**s); // &Option -> Option<&str> - let lib = NativeLibrary { - name: Symbol::intern(new_name.unwrap_or(name)), - kind: if let Some(k) = kind { k } else { cstore::NativeUnknown }, - cfg: None, - foreign_items: Vec::new(), - }; - register_native_lib(self.sess, self.cstore, None, lib); - } - } - self.register_statically_included_foreign_items(); - self.register_dllimport_foreign_items(); } fn process_item(&mut self, item: &ast::Item, definitions: &Definitions) { match item.node { - ast::ItemKind::ForeignMod(ref fm) => { - self.process_foreign_mod(item, fm, definitions) - }, ast::ItemKind::ExternCrate(_) => { let info = self.extract_crate_info(item).unwrap(); let (cnum, ..) = self.resolve_crate( diff --git a/src/librustc_metadata/cstore.rs b/src/librustc_metadata/cstore.rs index 0ed8ab2bc5..9e47e96aee 100644 --- a/src/librustc_metadata/cstore.rs +++ b/src/librustc_metadata/cstore.rs @@ -11,16 +11,15 @@ // The crate store - a central repo for information collected about external // crates and libraries -use schema::{self, Tracked}; +use schema; -use rustc::dep_graph::DepGraph; -use rustc::hir::def_id::{CRATE_DEF_INDEX, LOCAL_CRATE, CrateNum, DefIndex, DefId}; -use rustc::hir::map::definitions::{DefPathTable, GlobalMetaDataKind}; +use rustc::hir::def_id::{CRATE_DEF_INDEX, CrateNum, DefIndex}; +use rustc::hir::map::definitions::DefPathTable; use rustc::hir::svh::Svh; use rustc::middle::cstore::{DepKind, ExternCrate, MetadataLoader}; use rustc_back::PanicStrategy; use rustc_data_structures::indexed_vec::IndexVec; -use rustc::util::nodemap::{FxHashMap, FxHashSet, NodeMap, DefIdMap}; +use rustc::util::nodemap::{FxHashMap, FxHashSet, NodeMap}; use std::cell::{RefCell, Cell}; use std::rc::Rc; @@ -78,42 +77,30 @@ pub struct CrateMetadata { /// compilation support. pub def_path_table: Rc, - pub exported_symbols: Tracked>, + pub exported_symbols: FxHashSet, - pub trait_impls: Tracked>>, + pub trait_impls: FxHashMap<(u32, DefIndex), schema::LazySeq>, pub dep_kind: Cell, pub source: CrateSource, pub proc_macros: Option)>>, // Foreign items imported from a dylib (Windows only) - pub dllimport_foreign_items: Tracked>, + pub dllimport_foreign_items: FxHashSet, } pub struct CStore { - pub dep_graph: DepGraph, metas: RefCell>>, /// Map from NodeId's of local extern crate statements to crate numbers extern_mod_crate_map: RefCell>, - used_libraries: RefCell>, - used_link_args: RefCell>, - statically_included_foreign_items: RefCell>, - pub dllimport_foreign_items: RefCell>, - pub visible_parent_map: RefCell>, pub metadata_loader: Box, } impl CStore { - pub fn new(dep_graph: &DepGraph, metadata_loader: Box) -> CStore { + pub fn new(metadata_loader: Box) -> CStore { CStore { - dep_graph: dep_graph.clone(), metas: RefCell::new(FxHashMap()), extern_mod_crate_map: RefCell::new(FxHashMap()), - used_libraries: RefCell::new(Vec::new()), - used_link_args: RefCell::new(Vec::new()), - statically_included_foreign_items: RefCell::new(FxHashSet()), - dllimport_foreign_items: RefCell::new(FxHashSet()), - visible_parent_map: RefCell::new(FxHashMap()), metadata_loader, } } @@ -126,10 +113,6 @@ impl CStore { self.metas.borrow().get(&cnum).unwrap().clone() } - pub fn get_crate_hash(&self, cnum: CrateNum) -> Svh { - self.get_crate_data(cnum).hash() - } - pub fn set_crate_data(&self, cnum: CrateNum, data: Rc) { self.metas.borrow_mut().insert(cnum, data); } @@ -164,96 +147,21 @@ impl CStore { ordering.push(krate); } - // This method is used when generating the command line to pass through to - // system linker. The linker expects undefined symbols on the left of the - // command line to be defined in libraries on the right, not the other way - // around. For more info, see some comments in the add_used_library function - // below. - // - // In order to get this left-to-right dependency ordering, we perform a - // topological sort of all crates putting the leaves at the right-most - // positions. - pub fn do_get_used_crates(&self, - prefer: LinkagePreference) - -> Vec<(CrateNum, LibSource)> { + pub fn do_postorder_cnums_untracked(&self) -> Vec { let mut ordering = Vec::new(); for (&num, _) in self.metas.borrow().iter() { self.push_dependencies_in_postorder(&mut ordering, num); } - info!("topological ordering: {:?}", ordering); - ordering.reverse(); - let mut libs = self.metas - .borrow() - .iter() - .filter_map(|(&cnum, data)| { - if data.dep_kind.get().macros_only() { return None; } - let path = match prefer { - LinkagePreference::RequireDynamic => data.source.dylib.clone().map(|p| p.0), - LinkagePreference::RequireStatic => data.source.rlib.clone().map(|p| p.0), - }; - let path = match path { - Some(p) => LibSource::Some(p), - None => { - if data.source.rmeta.is_some() { - LibSource::MetadataOnly - } else { - LibSource::None - } - } - }; - Some((cnum, path)) - }) - .collect::>(); - libs.sort_by(|&(a, _), &(b, _)| { - let a = ordering.iter().position(|x| *x == a); - let b = ordering.iter().position(|x| *x == b); - a.cmp(&b) - }); - libs - } - - pub fn add_used_library(&self, lib: NativeLibrary) { - assert!(!lib.name.as_str().is_empty()); - self.used_libraries.borrow_mut().push(lib); - } - - pub fn get_used_libraries(&self) -> &RefCell> { - &self.used_libraries - } - - pub fn add_used_link_args(&self, args: &str) { - for s in args.split(' ').filter(|s| !s.is_empty()) { - self.used_link_args.borrow_mut().push(s.to_string()); - } - } - - pub fn get_used_link_args<'a>(&'a self) -> &'a RefCell> { - &self.used_link_args + return ordering } pub fn add_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId, cnum: CrateNum) { self.extern_mod_crate_map.borrow_mut().insert(emod_id, cnum); } - pub fn add_statically_included_foreign_item(&self, id: DefIndex) { - self.statically_included_foreign_items.borrow_mut().insert(id); - } - - pub fn do_is_statically_included_foreign_item(&self, def_id: DefId) -> bool { - assert!(def_id.krate == LOCAL_CRATE); - self.statically_included_foreign_items.borrow().contains(&def_id.index) - } - pub fn do_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option { self.extern_mod_crate_map.borrow().get(&emod_id).cloned() } - - pub fn read_dep_node(&self, def_id: DefId) { - use rustc::middle::cstore::CrateStore; - let def_path_hash = self.def_path_hash(def_id); - let dep_node = def_path_hash.to_dep_node(::rustc::dep_graph::DepKind::MetaData); - self.dep_graph.read(dep_node); - } } impl CrateMetadata { @@ -267,62 +175,60 @@ impl CrateMetadata { self.root.disambiguator } - pub fn needs_allocator(&self, dep_graph: &DepGraph) -> bool { - let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph); + pub fn needs_allocator(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "needs_allocator") } - pub fn has_global_allocator(&self, dep_graph: &DepGraph) -> bool { - let dep_node = self.metadata_dep_node(GlobalMetaDataKind::Krate); - self.root - .has_global_allocator - .get(dep_graph, dep_node) - .clone() + pub fn has_global_allocator(&self) -> bool { + self.root.has_global_allocator.clone() } - pub fn has_default_lib_allocator(&self, dep_graph: &DepGraph) -> bool { - let dep_node = self.metadata_dep_node(GlobalMetaDataKind::Krate); - self.root - .has_default_lib_allocator - .get(dep_graph, dep_node) - .clone() + pub fn has_default_lib_allocator(&self) -> bool { + self.root.has_default_lib_allocator.clone() } - pub fn is_panic_runtime(&self, dep_graph: &DepGraph) -> bool { - let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph); + pub fn is_panic_runtime(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "panic_runtime") } - pub fn needs_panic_runtime(&self, dep_graph: &DepGraph) -> bool { - let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph); + pub fn needs_panic_runtime(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "needs_panic_runtime") } - pub fn is_compiler_builtins(&self, dep_graph: &DepGraph) -> bool { - let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph); + pub fn is_compiler_builtins(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "compiler_builtins") } - pub fn is_sanitizer_runtime(&self, dep_graph: &DepGraph) -> bool { - let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph); + pub fn is_sanitizer_runtime(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "sanitizer_runtime") } - pub fn is_profiler_runtime(&self, dep_graph: &DepGraph) -> bool { - let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph); + pub fn is_profiler_runtime(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "profiler_runtime") } - pub fn is_no_builtins(&self, dep_graph: &DepGraph) -> bool { - let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph); + pub fn is_no_builtins(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "no_builtins") } - pub fn panic_strategy(&self, dep_graph: &DepGraph) -> PanicStrategy { - let dep_node = self.metadata_dep_node(GlobalMetaDataKind::Krate); - self.root - .panic_strategy - .get(dep_graph, dep_node) - .clone() + pub fn has_copy_closures(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); + attr::contains_feature_attr(&attrs, "copy_closures") + } + + pub fn has_clone_closures(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); + attr::contains_feature_attr(&attrs, "clone_closures") + } + + pub fn panic_strategy(&self) -> PanicStrategy { + self.root.panic_strategy.clone() } } diff --git a/src/librustc_metadata/cstore_impl.rs b/src/librustc_metadata/cstore_impl.rs index ad320a7ff3..8eacc21ab0 100644 --- a/src/librustc_metadata/cstore_impl.rs +++ b/src/librustc_metadata/cstore_impl.rs @@ -10,24 +10,25 @@ use cstore; use encoder; +use link_args; +use native_libs; use schema; use rustc::ty::maps::QueryConfig; -use rustc::middle::cstore::{CrateStore, CrateSource, LibSource, DepKind, - NativeLibrary, MetadataLoader, LinkMeta, - LinkagePreference, LoadedMacro, EncodedMetadata, - EncodedMetadataHashes}; +use rustc::middle::cstore::{CrateStore, DepKind, + MetadataLoader, LinkMeta, + LoadedMacro, EncodedMetadata, + EncodedMetadataHashes, NativeLibraryKind}; +use rustc::middle::stability::DeprecationEntry; use rustc::hir::def; -use rustc::middle::lang_items; use rustc::session::Session; use rustc::ty::{self, TyCtxt}; use rustc::ty::maps::Providers; -use rustc::hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE}; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX}; use rustc::hir::map::{DefKey, DefPath, DefPathHash}; use rustc::hir::map::blocks::FnLikeNode; -use rustc::hir::map::definitions::{DefPathTable, GlobalMetaDataKind}; +use rustc::hir::map::definitions::DefPathTable; use rustc::util::nodemap::{NodeSet, DefIdMap}; -use rustc_back::PanicStrategy; use std::any::Any; use std::rc::Rc; @@ -38,23 +39,33 @@ use syntax::ext::base::SyntaxExtension; use syntax::parse::filemap_to_stream; use syntax::symbol::Symbol; use syntax_pos::{Span, NO_EXPANSION}; -use rustc::hir::svh::Svh; +use rustc_data_structures::indexed_set::IdxSetBuf; use rustc::hir; macro_rules! provide { - (<$lt:tt> $tcx:ident, $def_id:ident, $cdata:ident, $($name:ident => $compute:block)*) => { + (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident, + $($name:ident => $compute:block)*) => { pub fn provide<$lt>(providers: &mut Providers<$lt>) { - $(fn $name<'a, $lt:$lt>($tcx: TyCtxt<'a, $lt, $lt>, $def_id: DefId) + $(fn $name<'a, $lt:$lt, T>($tcx: TyCtxt<'a, $lt, $lt>, def_id_arg: T) -> as - QueryConfig>::Value { + QueryConfig>::Value + where T: IntoArgs, + { + #[allow(unused_variables)] + let ($def_id, $other) = def_id_arg.into_args(); assert!(!$def_id.is_local()); - let def_path_hash = $tcx.def_path_hash($def_id); - let dep_node = def_path_hash.to_dep_node(::rustc::dep_graph::DepKind::MetaData); - + let def_path_hash = $tcx.def_path_hash(DefId { + krate: $def_id.krate, + index: CRATE_DEF_INDEX + }); + let dep_node = def_path_hash + .to_dep_node(::rustc::dep_graph::DepKind::CrateMetadata); + // The DepNodeIndex of the DepNode::CrateMetadata should be + // cached somewhere, so that we can use read_index(). $tcx.dep_graph.read(dep_node); - let $cdata = $tcx.sess.cstore.crate_data_as_rc_any($def_id.krate); + let $cdata = $tcx.crate_data_as_rc_any($def_id.krate); let $cdata = $cdata.downcast_ref::() .expect("CrateStore crated ata is not a CrateMetadata"); $compute @@ -68,7 +79,25 @@ macro_rules! provide { } } -provide! { <'tcx> tcx, def_id, cdata, +// small trait to work around different signature queries all being defined via +// the macro above. +trait IntoArgs { + fn into_args(self) -> (DefId, DefId); +} + +impl IntoArgs for DefId { + fn into_args(self) -> (DefId, DefId) { (self, self) } +} + +impl IntoArgs for CrateNum { + fn into_args(self) -> (DefId, DefId) { (self.as_def_id(), self.as_def_id()) } +} + +impl IntoArgs for (CrateNum, DefId) { + fn into_args(self) -> (DefId, DefId) { (self.0.as_def_id(), self.1) } +} + +provide! { <'tcx> tcx, def_id, other, cdata, type_of => { cdata.get_type(def_id.index, tcx) } generics_of => { tcx.alloc_generics(cdata.get_generics(def_id.index)) } predicates_of => { cdata.get_predicates(def_id.index, tcx) } @@ -105,7 +134,10 @@ provide! { <'tcx> tcx, def_id, cdata, mir } - mir_const_qualif => { cdata.mir_const_qualif(def_id.index) } + generator_sig => { cdata.generator_sig(def_id.index, tcx) } + mir_const_qualif => { + (cdata.mir_const_qualif(def_id.index), Rc::new(IdxSetBuf::new_empty(0))) + } typeck_tables_of => { cdata.item_body_tables(def_id.index, tcx) } closure_kind => { cdata.closure_kind(def_id.index) } fn_sig => { cdata.fn_sig(def_id.index, tcx) } @@ -115,9 +147,13 @@ provide! { <'tcx> tcx, def_id, cdata, is_default_impl => { cdata.is_default_impl(def_id.index) } describe_def => { cdata.get_def(def_id.index) } def_span => { cdata.get_span(def_id.index, &tcx.sess) } - stability => { cdata.get_stability(def_id.index) } - deprecation => { cdata.get_deprecation(def_id.index) } - item_attrs => { cdata.get_item_attrs(def_id.index, &tcx.dep_graph) } + lookup_stability => { + cdata.get_stability(def_id.index).map(|s| tcx.intern_stability(s)) + } + lookup_deprecation_entry => { + cdata.get_deprecation(def_id.index).map(DeprecationEntry::external) + } + item_attrs => { cdata.get_item_attrs(def_id.index) } // FIXME(#38501) We've skipped a `read` on the `HirBody` of // a `fn` when encoding, so the dep-tracking wouldn't work. // This is only used by rustdoc anyway, which shouldn't have @@ -126,20 +162,83 @@ provide! { <'tcx> tcx, def_id, cdata, impl_parent => { cdata.get_parent_impl(def_id.index) } trait_of_item => { cdata.get_trait_of_item(def_id.index) } is_exported_symbol => { - let dep_node = cdata.metadata_dep_node(GlobalMetaDataKind::ExportedSymbols); - cdata.exported_symbols.get(&tcx.dep_graph, dep_node).contains(&def_id.index) + cdata.exported_symbols.contains(&def_id.index) } - item_body_nested_bodies => { Rc::new(cdata.item_body_nested_bodies(def_id.index)) } + item_body_nested_bodies => { cdata.item_body_nested_bodies(def_id.index) } const_is_rvalue_promotable_to_static => { cdata.const_is_rvalue_promotable_to_static(def_id.index) } is_mir_available => { cdata.is_item_mir_available(def_id.index) } - dylib_dependency_formats => { Rc::new(cdata.get_dylib_dependency_formats(&tcx.dep_graph)) } - is_panic_runtime => { cdata.is_panic_runtime(&tcx.dep_graph) } - is_compiler_builtins => { cdata.is_compiler_builtins(&tcx.dep_graph) } - has_global_allocator => { cdata.has_global_allocator(&tcx.dep_graph) } + dylib_dependency_formats => { Rc::new(cdata.get_dylib_dependency_formats()) } + is_panic_runtime => { cdata.is_panic_runtime() } + is_compiler_builtins => { cdata.is_compiler_builtins() } + has_global_allocator => { cdata.has_global_allocator() } + is_sanitizer_runtime => { cdata.is_sanitizer_runtime() } + is_profiler_runtime => { cdata.is_profiler_runtime() } + panic_strategy => { cdata.panic_strategy() } extern_crate => { Rc::new(cdata.extern_crate.get()) } + is_no_builtins => { cdata.is_no_builtins() } + impl_defaultness => { cdata.get_impl_defaultness(def_id.index) } + exported_symbol_ids => { Rc::new(cdata.get_exported_symbols()) } + native_libraries => { Rc::new(cdata.get_native_libraries()) } + plugin_registrar_fn => { + cdata.root.plugin_registrar_fn.map(|index| { + DefId { krate: def_id.krate, index } + }) + } + derive_registrar_fn => { + cdata.root.macro_derive_registrar.map(|index| { + DefId { krate: def_id.krate, index } + }) + } + crate_disambiguator => { cdata.disambiguator() } + crate_hash => { cdata.hash() } + original_crate_name => { cdata.name() } + + implementations_of_trait => { + let mut result = vec![]; + let filter = Some(other); + cdata.get_implementations_for_trait(filter, &mut result); + Rc::new(result) + } + + all_trait_implementations => { + let mut result = vec![]; + cdata.get_implementations_for_trait(None, &mut result); + Rc::new(result) + } + + is_dllimport_foreign_item => { + cdata.is_dllimport_foreign_item(def_id.index) + } + visibility => { cdata.get_visibility(def_id.index) } + dep_kind => { cdata.dep_kind.get() } + crate_name => { cdata.name } + item_children => { + let mut result = vec![]; + cdata.each_child_of_item(def_id.index, |child| result.push(child), tcx.sess); + Rc::new(result) + } + defined_lang_items => { Rc::new(cdata.get_lang_items()) } + missing_lang_items => { Rc::new(cdata.get_missing_lang_items()) } + + extern_const_body => { + debug!("item_body({:?}): inlining item", def_id); + cdata.extern_const_body(tcx, def_id.index) + } + + missing_extern_crate_item => { + match cdata.extern_crate.get() { + Some(extern_crate) if !extern_crate.direct => true, + _ => false, + } + } + + used_crate_source => { Rc::new(cdata.source.clone()) } + + has_copy_closures => { cdata.has_copy_closures() } + has_clone_closures => { cdata.has_clone_closures() } } pub fn provide_local<'tcx>(providers: &mut Providers<'tcx>) { @@ -154,8 +253,94 @@ pub fn provide_local<'tcx>(providers: &mut Providers<'tcx>) { } } + // FIXME(#44234) - almost all of these queries have no sub-queries and + // therefore no actual inputs, they're just reading tables calculated in + // resolve! Does this work? Unsure! That's what the issue is about *providers = Providers { is_const_fn, + is_dllimport_foreign_item: |tcx, id| { + tcx.native_library_kind(id) == Some(NativeLibraryKind::NativeUnknown) + }, + is_statically_included_foreign_item: |tcx, id| { + match tcx.native_library_kind(id) { + Some(NativeLibraryKind::NativeStatic) | + Some(NativeLibraryKind::NativeStaticNobundle) => true, + _ => false, + } + }, + native_library_kind: |tcx, id| { + tcx.native_libraries(id.krate) + .iter() + .filter(|lib| native_libs::relevant_lib(&tcx.sess, lib)) + .find(|l| l.foreign_items.contains(&id)) + .map(|l| l.kind) + }, + native_libraries: |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + Rc::new(native_libs::collect(tcx)) + }, + link_args: |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + Rc::new(link_args::collect(tcx)) + }, + + // Returns a map from a sufficiently visible external item (i.e. an + // external item that is visible from at least one local module) to a + // sufficiently visible parent (considering modules that re-export the + // external item to be parents). + visible_parent_map: |tcx, cnum| { + use std::collections::vec_deque::VecDeque; + use std::collections::hash_map::Entry; + + assert_eq!(cnum, LOCAL_CRATE); + let mut visible_parent_map: DefIdMap = DefIdMap(); + + for &cnum in tcx.crates().iter() { + // Ignore crates without a corresponding local `extern crate` item. + if tcx.missing_extern_crate_item(cnum) { + continue + } + + let bfs_queue = &mut VecDeque::new(); + let visible_parent_map = &mut visible_parent_map; + let mut add_child = |bfs_queue: &mut VecDeque<_>, + child: &def::Export, + parent: DefId| { + let child = child.def.def_id(); + + if tcx.visibility(child) != ty::Visibility::Public { + return; + } + + match visible_parent_map.entry(child) { + Entry::Occupied(mut entry) => { + // If `child` is defined in crate `cnum`, ensure + // that it is mapped to a parent in `cnum`. + if child.krate == cnum && entry.get().krate != cnum { + entry.insert(parent); + } + } + Entry::Vacant(entry) => { + entry.insert(parent); + bfs_queue.push_back(child); + } + } + }; + + bfs_queue.push_back(DefId { + krate: cnum, + index: CRATE_DEF_INDEX + }); + while let Some(def) = bfs_queue.pop_front() { + for child in tcx.item_children(def).iter() { + add_child(bfs_queue, child, def); + } + } + } + + Rc::new(visible_parent_map) + }, + ..*providers }; } @@ -169,145 +354,44 @@ impl CrateStore for cstore::CStore { &*self.metadata_loader } - fn visibility(&self, def: DefId) -> ty::Visibility { - self.read_dep_node(def); + fn visibility_untracked(&self, def: DefId) -> ty::Visibility { self.get_crate_data(def.krate).get_visibility(def.index) } - fn item_generics_cloned(&self, def: DefId) -> ty::Generics { - self.read_dep_node(def); + fn item_generics_cloned_untracked(&self, def: DefId) -> ty::Generics { self.get_crate_data(def.krate).get_generics(def.index) } - fn implementations_of_trait(&self, filter: Option) -> Vec + fn associated_item_cloned_untracked(&self, def: DefId) -> ty::AssociatedItem { - let mut result = vec![]; - - self.iter_crate_data(|_, cdata| { - cdata.get_implementations_for_trait(filter, &self.dep_graph, &mut result) - }); - result - } - - fn impl_defaultness(&self, def: DefId) -> hir::Defaultness - { - self.read_dep_node(def); - self.get_crate_data(def.krate).get_impl_defaultness(def.index) - } - - fn associated_item_cloned(&self, def: DefId) -> ty::AssociatedItem - { - self.read_dep_node(def); self.get_crate_data(def.krate).get_associated_item(def.index) } - fn is_statically_included_foreign_item(&self, def_id: DefId) -> bool - { - self.do_is_statically_included_foreign_item(def_id) - } - - fn is_dllimport_foreign_item(&self, def_id: DefId) -> bool { - if def_id.krate == LOCAL_CRATE { - self.dllimport_foreign_items.borrow().contains(&def_id.index) - } else { - self.get_crate_data(def_id.krate) - .is_dllimport_foreign_item(def_id.index, &self.dep_graph) - } - } - - fn dep_kind(&self, cnum: CrateNum) -> DepKind + fn dep_kind_untracked(&self, cnum: CrateNum) -> DepKind { - let data = self.get_crate_data(cnum); - let dep_node = data.metadata_dep_node(GlobalMetaDataKind::CrateDeps); - self.dep_graph.read(dep_node); - data.dep_kind.get() + self.get_crate_data(cnum).dep_kind.get() } - fn export_macros(&self, cnum: CrateNum) { + fn export_macros_untracked(&self, cnum: CrateNum) { let data = self.get_crate_data(cnum); - let dep_node = data.metadata_dep_node(GlobalMetaDataKind::CrateDeps); - - self.dep_graph.read(dep_node); if data.dep_kind.get() == DepKind::UnexportedMacrosOnly { data.dep_kind.set(DepKind::MacrosOnly) } } - fn lang_items(&self, cnum: CrateNum) -> Vec<(DefIndex, usize)> - { - self.get_crate_data(cnum).get_lang_items(&self.dep_graph) - } - - fn missing_lang_items(&self, cnum: CrateNum) - -> Vec - { - self.get_crate_data(cnum).get_missing_lang_items(&self.dep_graph) - } - - fn is_compiler_builtins(&self, cnum: CrateNum) -> bool { - self.get_crate_data(cnum).is_compiler_builtins(&self.dep_graph) - } - - fn is_sanitizer_runtime(&self, cnum: CrateNum) -> bool { - self.get_crate_data(cnum).is_sanitizer_runtime(&self.dep_graph) - } - - fn is_profiler_runtime(&self, cnum: CrateNum) -> bool { - self.get_crate_data(cnum).is_profiler_runtime(&self.dep_graph) - } - - fn panic_strategy(&self, cnum: CrateNum) -> PanicStrategy { - self.get_crate_data(cnum).panic_strategy(&self.dep_graph) - } - - fn crate_name(&self, cnum: CrateNum) -> Symbol + fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol { self.get_crate_data(cnum).name } - fn original_crate_name(&self, cnum: CrateNum) -> Symbol - { - self.get_crate_data(cnum).name() - } - - fn crate_hash(&self, cnum: CrateNum) -> Svh - { - self.get_crate_hash(cnum) - } - - fn crate_disambiguator(&self, cnum: CrateNum) -> Symbol + fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> Symbol { self.get_crate_data(cnum).disambiguator() } - fn plugin_registrar_fn(&self, cnum: CrateNum) -> Option + fn crate_hash_untracked(&self, cnum: CrateNum) -> hir::svh::Svh { - self.get_crate_data(cnum).root.plugin_registrar_fn.map(|index| DefId { - krate: cnum, - index, - }) - } - - fn derive_registrar_fn(&self, cnum: CrateNum) -> Option - { - self.get_crate_data(cnum).root.macro_derive_registrar.map(|index| DefId { - krate: cnum, - index, - }) - } - - fn native_libraries(&self, cnum: CrateNum) -> Vec - { - self.get_crate_data(cnum).get_native_libraries(&self.dep_graph) - } - - fn exported_symbols(&self, cnum: CrateNum) -> Vec - { - self.get_crate_data(cnum).get_exported_symbols(&self.dep_graph) - } - - fn is_no_builtins(&self, cnum: CrateNum) -> bool { - self.get_crate_data(cnum).is_no_builtins(&self.dep_graph) + self.get_crate_data(cnum).hash() } /// Returns the `DefKey` for a given `DefId`. This indicates the @@ -339,22 +423,20 @@ impl CrateStore for cstore::CStore { self.get_crate_data(cnum).def_path_table.clone() } - fn struct_field_names(&self, def: DefId) -> Vec + fn struct_field_names_untracked(&self, def: DefId) -> Vec { - self.read_dep_node(def); self.get_crate_data(def.krate).get_struct_field_names(def.index) } - fn item_children(&self, def_id: DefId, sess: &Session) -> Vec + fn item_children_untracked(&self, def_id: DefId, sess: &Session) -> Vec { - self.read_dep_node(def_id); let mut result = vec![]; self.get_crate_data(def_id.krate) .each_child_of_item(def_id.index, |child| result.push(child), sess); result } - fn load_macro(&self, id: DefId, sess: &Session) -> LoadedMacro { + fn load_macro_untracked(&self, id: DefId, sess: &Session) -> LoadedMacro { let data = self.get_crate_data(id.krate); if let Some(ref proc_macros) = data.proc_macros { return LoadedMacro::ProcMacro(proc_macros[id.index.as_usize() - 1].1.clone()); @@ -368,11 +450,11 @@ impl CrateStore for cstore::CStore { let source_name = format!("<{} macros>", name); let filemap = sess.parse_sess.codemap().new_filemap(source_name, def.body); - let local_span = Span { lo: filemap.start_pos, hi: filemap.end_pos, ctxt: NO_EXPANSION }; + let local_span = Span::new(filemap.start_pos, filemap.end_pos, NO_EXPANSION); let body = filemap_to_stream(&sess.parse_sess, filemap, None); // Mark the attrs as used - let attrs = data.get_item_attrs(id.index, &self.dep_graph); + let attrs = data.get_item_attrs(id.index); for attr in attrs.iter() { attr::mark_used(attr); } @@ -383,7 +465,7 @@ impl CrateStore for cstore::CStore { .insert(local_span, (name.to_string(), data.get_span(id.index, sess))); LoadedMacro::MacroDef(ast::Item { - ident: ast::Ident::with_empty_ctxt(name), + ident: ast::Ident::from_str(&name), id: ast::DUMMY_NODE_ID, span: local_span, attrs: attrs.iter().cloned().collect(), @@ -396,50 +478,20 @@ impl CrateStore for cstore::CStore { }) } - fn item_body<'a, 'tcx>(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - def_id: DefId) - -> &'tcx hir::Body { - self.read_dep_node(def_id); - - if let Some(cached) = tcx.hir.get_inlined_body_untracked(def_id) { - return cached; - } - - debug!("item_body({:?}): inlining item", def_id); - - self.get_crate_data(def_id.krate).item_body(tcx, def_id.index) - } - - fn crates(&self) -> Vec + fn crates_untracked(&self) -> Vec { let mut result = vec![]; self.iter_crate_data(|cnum, _| result.push(cnum)); result } - fn used_libraries(&self) -> Vec - { - self.get_used_libraries().borrow().clone() - } - - fn used_link_args(&self) -> Vec - { - self.get_used_link_args().borrow().clone() - } - fn used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, LibSource)> + fn extern_mod_stmt_cnum_untracked(&self, emod_id: ast::NodeId) -> Option { - self.do_get_used_crates(prefer) + self.do_extern_mod_stmt_cnum(emod_id) } - fn used_crate_source(&self, cnum: CrateNum) -> CrateSource - { - self.get_crate_data(cnum).source.clone() - } - - fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option - { - self.do_extern_mod_stmt_cnum(emod_id) + fn postorder_cnums_untracked(&self) -> Vec { + self.do_postorder_cnums_untracked() } fn encode_metadata<'a, 'tcx>(&self, @@ -455,67 +507,4 @@ impl CrateStore for cstore::CStore { { schema::METADATA_HEADER } - - /// Returns a map from a sufficiently visible external item (i.e. an external item that is - /// visible from at least one local module) to a sufficiently visible parent (considering - /// modules that re-export the external item to be parents). - fn visible_parent_map<'a>(&'a self, sess: &Session) -> ::std::cell::Ref<'a, DefIdMap> { - { - let visible_parent_map = self.visible_parent_map.borrow(); - if !visible_parent_map.is_empty() { - return visible_parent_map; - } - } - - use std::collections::vec_deque::VecDeque; - use std::collections::hash_map::Entry; - - let mut visible_parent_map = self.visible_parent_map.borrow_mut(); - - for cnum in (1 .. self.next_crate_num().as_usize()).map(CrateNum::new) { - let cdata = self.get_crate_data(cnum); - - match cdata.extern_crate.get() { - // Ignore crates without a corresponding local `extern crate` item. - Some(extern_crate) if !extern_crate.direct => continue, - _ => {}, - } - - let bfs_queue = &mut VecDeque::new(); - let mut add_child = |bfs_queue: &mut VecDeque<_>, child: def::Export, parent: DefId| { - let child = child.def.def_id(); - - if self.visibility(child) != ty::Visibility::Public { - return; - } - - match visible_parent_map.entry(child) { - Entry::Occupied(mut entry) => { - // If `child` is defined in crate `cnum`, ensure - // that it is mapped to a parent in `cnum`. - if child.krate == cnum && entry.get().krate != cnum { - entry.insert(parent); - } - } - Entry::Vacant(entry) => { - entry.insert(parent); - bfs_queue.push_back(child); - } - } - }; - - bfs_queue.push_back(DefId { - krate: cnum, - index: CRATE_DEF_INDEX - }); - while let Some(def) = bfs_queue.pop_front() { - for child in self.item_children(def, sess) { - add_child(bfs_queue, child, def); - } - } - } - - drop(visible_parent_map); - self.visible_parent_map.borrow() - } } diff --git a/src/librustc_metadata/decoder.rs b/src/librustc_metadata/decoder.rs index 73b2e972b9..65cf15e5a0 100644 --- a/src/librustc_metadata/decoder.rs +++ b/src/librustc_metadata/decoder.rs @@ -13,18 +13,20 @@ use cstore::{self, CrateMetadata, MetadataBlob, NativeLibrary}; use schema::*; -use rustc::dep_graph::{DepGraph, DepNode, DepKind}; use rustc::hir::map::{DefKey, DefPath, DefPathData, DefPathHash}; -use rustc::hir::map::definitions::GlobalMetaDataKind; use rustc::hir; -use rustc::middle::cstore::LinkagePreference; +use rustc::middle::const_val::ByteArray; +use rustc::middle::cstore::{LinkagePreference, ExternConstBody, + ExternBodyNestedBodies}; use rustc::hir::def::{self, Def, CtorKind}; use rustc::hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE}; +use rustc::ich::Fingerprint; use rustc::middle::lang_items; use rustc::session::Session; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::Substs; +use rustc::util::nodemap::DefIdSet; use rustc::mir::Mir; @@ -41,6 +43,7 @@ use rustc_serialize::{Decodable, Decoder, SpecializedDecoder, opaque}; use syntax::attr; use syntax::ast::{self, Ident}; use syntax::codemap; +use syntax::symbol::{InternedString, Symbol}; use syntax::ext::base::MacroKind; use syntax_pos::{self, Span, BytePos, Pos, DUMMY_SP, NO_EXPANSION}; @@ -242,7 +245,7 @@ impl<'a, 'tcx> SpecializedDecoder for DecodeContext<'a, 'tcx> { let sess = if let Some(sess) = self.sess { sess } else { - return Ok(Span { lo: lo, hi: hi, ctxt: NO_EXPANSION }); + return Ok(Span::new(lo, hi, NO_EXPANSION)); }; let (lo, hi) = if lo > hi { @@ -289,7 +292,7 @@ impl<'a, 'tcx> SpecializedDecoder for DecodeContext<'a, 'tcx> { let lo = (lo - filemap.original_start_pos) + filemap.translated_filemap.start_pos; let hi = (hi - filemap.original_start_pos) + filemap.translated_filemap.start_pos; - Ok(Span { lo: lo, hi: hi, ctxt: NO_EXPANSION }) + Ok(Span::new(lo, hi, NO_EXPANSION)) } } @@ -378,6 +381,20 @@ impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::Slice } } +impl<'a, 'tcx> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + Ok(ByteArray { + data: self.tcx().alloc_byte_array(&Vec::decode(self)?) + }) + } +} + +impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::Const<'tcx>> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx ty::Const<'tcx>, Self::Error> { + Ok(self.tcx().mk_const(Decodable::decode(self)?)) + } +} + impl<'a, 'tcx> MetadataBlob { pub fn is_compatible(&self) -> bool { self.raw_bytes().starts_with(METADATA_HEADER) @@ -401,7 +418,6 @@ impl<'a, 'tcx> MetadataBlob { write!(out, "=External Dependencies=\n")?; let root = self.get_root(); for (i, dep) in root.crate_deps - .get_untracked() .decode(self) .enumerate() { write!(out, "{} {}-{}\n", i + 1, dep.name, dep.hash)?; @@ -438,6 +454,7 @@ impl<'tcx> EntryKind<'tcx> { EntryKind::Impl(_) | EntryKind::DefaultImpl(_) | EntryKind::Field | + EntryKind::Generator(_) | EntryKind::Closure(_) => return None, }) } @@ -472,7 +489,7 @@ impl<'a, 'tcx> CrateMetadata { } } - pub fn item_name(&self, item_index: DefIndex) -> ast::Name { + pub fn item_name(&self, item_index: DefIndex) -> InternedString { self.def_key(item_index) .disambiguated_data .data @@ -519,12 +536,12 @@ impl<'a, 'tcx> CrateMetadata { ty::VariantDef { did: self.local_def_id(data.struct_ctor.unwrap_or(index)), - name: self.item_name(index), + name: Symbol::intern(&self.item_name(index)), fields: item.children.decode(self).map(|index| { let f = self.entry(index); ty::FieldDef { did: self.local_def_id(index), - name: self.item_name(index), + name: Symbol::intern(&self.item_name(index)), vis: f.visibility.decode(self) } }).collect(), @@ -644,12 +661,11 @@ impl<'a, 'tcx> CrateMetadata { } /// Iterates over the language items in the given crate. - pub fn get_lang_items(&self, dep_graph: &DepGraph) -> Vec<(DefIndex, usize)> { - let dep_node = self.metadata_dep_node(GlobalMetaDataKind::LangItems); + pub fn get_lang_items(&self) -> Vec<(DefId, usize)> { self.root .lang_items - .get(dep_graph, dep_node) .decode(self) + .map(|(def_index, index)| (self.local_def_id(def_index), index)) .collect() } @@ -704,7 +720,7 @@ impl<'a, 'tcx> CrateMetadata { if let Some(def) = self.get_def(child_index) { callback(def::Export { def, - ident: Ident::with_empty_ctxt(self.item_name(child_index)), + ident: Ident::from_str(&self.item_name(child_index)), span: self.entry(child_index).span.decode((self, sess)), }); } @@ -721,7 +737,7 @@ impl<'a, 'tcx> CrateMetadata { let span = child.span.decode((self, sess)); if let (Some(def), Some(name)) = (self.get_def(child_index), def_key.disambiguated_data.data.get_opt_name()) { - let ident = Ident::with_empty_ctxt(name); + let ident = Ident::from_str(&name); callback(def::Export { def: def, ident: ident, span: span }); // For non-reexport structs and variants add their constructors to children. // Reexport lists automatically contain constructors when necessary. @@ -758,15 +774,19 @@ impl<'a, 'tcx> CrateMetadata { } } - pub fn item_body(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: DefIndex) - -> &'tcx hir::Body { + pub fn extern_const_body(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: DefIndex) + -> ExternConstBody<'tcx> { assert!(!self.is_proc_macro(id)); let ast = self.entry(id).ast.unwrap(); let def_id = self.local_def_id(id); - let body = ast.decode((self, tcx)).body.decode((self, tcx)); - tcx.hir.intern_inlined_body(def_id, body) + let ast = ast.decode((self, tcx)); + let body = ast.body.decode((self, tcx)); + ExternConstBody { + body: tcx.hir.intern_inlined_body(def_id, body), + fingerprint: ast.stable_bodies_hash, + } } pub fn item_body_tables(&self, @@ -777,10 +797,23 @@ impl<'a, 'tcx> CrateMetadata { tcx.alloc_tables(ast.tables.decode((self, tcx))) } - pub fn item_body_nested_bodies(&self, id: DefIndex) -> BTreeMap { - self.entry(id).ast.into_iter().flat_map(|ast| { - ast.decode(self).nested_bodies.decode(self).map(|body| (body.id(), body)) - }).collect() + pub fn item_body_nested_bodies(&self, id: DefIndex) -> ExternBodyNestedBodies { + if let Some(ref ast) = self.entry(id).ast { + let ast = ast.decode(self); + let nested_bodies: BTreeMap<_, _> = ast.nested_bodies + .decode(self) + .map(|body| (body.id(), body)) + .collect(); + ExternBodyNestedBodies { + nested_bodies: Rc::new(nested_bodies), + fingerprint: ast.stable_bodies_hash, + } + } else { + ExternBodyNestedBodies { + nested_bodies: Rc::new(BTreeMap::new()), + fingerprint: Fingerprint::zero(), + } + } } pub fn const_is_rvalue_promotable_to_static(&self, id: DefIndex) -> bool { @@ -835,7 +868,7 @@ impl<'a, 'tcx> CrateMetadata { }; ty::AssociatedItem { - name, + name: Symbol::intern(&name), kind, vis: item.visibility.decode(self), defaultness: container.defaultness(), @@ -867,18 +900,13 @@ impl<'a, 'tcx> CrateMetadata { } } - pub fn get_item_attrs(&self, - node_id: DefIndex, - dep_graph: &DepGraph) -> Rc<[ast::Attribute]> { + pub fn get_item_attrs(&self, node_id: DefIndex) -> Rc<[ast::Attribute]> { let (node_as, node_index) = (node_id.address_space().index(), node_id.as_array_index()); if self.is_proc_macro(node_id) { return Rc::new([]); } - let dep_node = self.def_path_hash(node_id).to_dep_node(DepKind::MetaData); - dep_graph.read(dep_node); - if let Some(&Some(ref val)) = self.attribute_cache.borrow()[node_as].get(node_index) { return val.clone(); @@ -905,7 +933,7 @@ impl<'a, 'tcx> CrateMetadata { self.entry(id) .children .decode(self) - .map(|index| self.item_name(index)) + .map(|index| Symbol::intern(&self.item_name(index))) .collect() } @@ -945,7 +973,6 @@ impl<'a, 'tcx> CrateMetadata { pub fn get_implementations_for_trait(&self, filter: Option, - dep_graph: &DepGraph, result: &mut Vec) { // Do a reverse lookup beforehand to avoid touching the crate_num // hash map in the loop below. @@ -956,16 +983,13 @@ impl<'a, 'tcx> CrateMetadata { None => None, }; - let dep_node = self.metadata_dep_node(GlobalMetaDataKind::Impls); - if let Some(filter) = filter { if let Some(impls) = self.trait_impls - .get(dep_graph, dep_node) .get(&filter) { result.extend(impls.decode(self).map(|idx| self.local_def_id(idx))); } } else { - for impls in self.trait_impls.get(dep_graph, dep_node).values() { + for impls in self.trait_impls.values() { result.extend(impls.decode(self).map(|idx| self.local_def_id(idx))); } } @@ -981,25 +1005,13 @@ impl<'a, 'tcx> CrateMetadata { } - pub fn get_native_libraries(&self, - dep_graph: &DepGraph) - -> Vec { - let dep_node = self.metadata_dep_node(GlobalMetaDataKind::NativeLibraries); - self.root - .native_libraries - .get(dep_graph, dep_node) - .decode(self) - .collect() + pub fn get_native_libraries(&self) -> Vec { + self.root.native_libraries.decode(self).collect() } - pub fn get_dylib_dependency_formats(&self, - dep_graph: &DepGraph) - -> Vec<(CrateNum, LinkagePreference)> { - let dep_node = - self.metadata_dep_node(GlobalMetaDataKind::DylibDependencyFormats); + pub fn get_dylib_dependency_formats(&self) -> Vec<(CrateNum, LinkagePreference)> { self.root .dylib_dependency_formats - .get(dep_graph, dep_node) .decode(self) .enumerate() .flat_map(|(i, link)| { @@ -1009,11 +1021,9 @@ impl<'a, 'tcx> CrateMetadata { .collect() } - pub fn get_missing_lang_items(&self, dep_graph: &DepGraph) -> Vec { - let dep_node = self.metadata_dep_node(GlobalMetaDataKind::LangItemsMissing); + pub fn get_missing_lang_items(&self) -> Vec { self.root .lang_items_missing - .get(dep_graph, dep_node) .decode(self) .collect() } @@ -1028,16 +1038,14 @@ impl<'a, 'tcx> CrateMetadata { arg_names.decode(self).collect() } - pub fn get_exported_symbols(&self, dep_graph: &DepGraph) -> Vec { - let dep_node = self.metadata_dep_node(GlobalMetaDataKind::ExportedSymbols); + pub fn get_exported_symbols(&self) -> DefIdSet { self.exported_symbols - .get(dep_graph, dep_node) .iter() .map(|&index| self.local_def_id(index)) .collect() } - pub fn get_macro(&self, id: DefIndex) -> (ast::Name, MacroDef) { + pub fn get_macro(&self, id: DefIndex) -> (InternedString, MacroDef) { let entry = self.entry(id); match entry.kind { EntryKind::MacroDef(macro_def) => (self.item_name(id), macro_def.decode(self)), @@ -1063,11 +1071,8 @@ impl<'a, 'tcx> CrateMetadata { } } - pub fn is_dllimport_foreign_item(&self, id: DefIndex, dep_graph: &DepGraph) -> bool { - let dep_node = self.metadata_dep_node(GlobalMetaDataKind::NativeLibraries); - self.dllimport_foreign_items - .get(dep_graph, dep_node) - .contains(&id) + pub fn is_dllimport_foreign_item(&self, id: DefIndex) -> bool { + self.dllimport_foreign_items.contains(&id) } pub fn is_default_impl(&self, impl_id: DefIndex) -> bool { @@ -1100,6 +1105,23 @@ impl<'a, 'tcx> CrateMetadata { sig.decode((self, tcx)) } + fn get_generator_data(&self, + id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> Option> { + match self.entry(id).kind { + EntryKind::Generator(data) => Some(data.decode((self, tcx))), + _ => None, + } + } + + pub fn generator_sig(&self, + id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> Option> { + self.get_generator_data(id, tcx).map(|d| d.sig) + } + #[inline] pub fn def_key(&self, index: DefIndex) -> DefKey { self.def_path_table.def_key(index) @@ -1202,10 +1224,4 @@ impl<'a, 'tcx> CrateMetadata { *self.codemap_import_info.borrow_mut() = imported_filemaps; self.codemap_import_info.borrow() } - - pub fn metadata_dep_node(&self, kind: GlobalMetaDataKind) -> DepNode { - let def_index = kind.def_index(&self.def_path_table); - let def_path_hash = self.def_path_table.def_path_hash(def_index); - def_path_hash.to_dep_node(DepKind::MetaData) - } } diff --git a/src/librustc_metadata/diagnostics.rs b/src/librustc_metadata/diagnostics.rs index 1fa1a896dd..0a1662dd42 100644 --- a/src/librustc_metadata/diagnostics.rs +++ b/src/librustc_metadata/diagnostics.rs @@ -14,7 +14,7 @@ register_long_diagnostics! { E0454: r##" A link name was given with an empty name. Erroneous code example: -```compile_fail,E0454 +```ignore (cannot-test-this-because-rustdoc-stops-compile-fail-before-trans) #[link(name = "")] extern {} // error: #[link(name = "")] given with empty name ``` @@ -51,7 +51,7 @@ https://doc.rust-lang.org/book/first-edition/conditional-compilation.html E0458: r##" An unknown "kind" was specified for a link attribute. Erroneous code example: -```compile_fail,E0458 +```ignore (cannot-test-this-because-rustdoc-stops-compile-fail-before-trans) #[link(kind = "wonderful_unicorn")] extern {} // error: unknown kind: `wonderful_unicorn` ``` @@ -67,7 +67,7 @@ Please specify a valid "kind" value, from one of the following: E0459: r##" A link was used without a name parameter. Erroneous code example: -```compile_fail,E0459 +```ignore (cannot-test-this-because-rustdoc-stops-compile-fail-before-trans) #[link(kind = "dylib")] extern {} // error: #[link(...)] specified without `name = "foo"` ``` diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index 1dc5d69534..6b49be3e12 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -248,7 +248,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { def_index: DefIndex, op: fn(&mut IsolatedEncoder<'x, 'a, 'tcx>, DATA) -> R, data: DATA) - -> Tracked { + -> R { let mut entry_builder = IsolatedEncoder::new(self); let ret = op(&mut entry_builder, data); let (fingerprint, this) = entry_builder.finish(); @@ -260,7 +260,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { }) } - Tracked::new(ret) + ret } fn encode_info_for_items(&mut self) -> Index { @@ -408,9 +408,9 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { triple: tcx.sess.opts.target_triple.clone(), hash: link_meta.crate_hash, disambiguator: tcx.sess.local_crate_disambiguator(), - panic_strategy: Tracked::new(tcx.sess.panic_strategy()), - has_global_allocator: Tracked::new(has_global_allocator), - has_default_lib_allocator: Tracked::new(has_default_lib_allocator), + panic_strategy: tcx.sess.panic_strategy(), + has_global_allocator: has_global_allocator, + has_default_lib_allocator: has_default_lib_allocator, plugin_registrar_fn: tcx.sess .plugin_registrar_fn .get() @@ -552,8 +552,8 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { debug!("IsolatedEncoder::encode_info_for_mod({:?})", def_id); let data = ModData { - reexports: match tcx.export_map.get(&id) { - Some(exports) if *vis == hir::Public => { + reexports: match tcx.module_exports(def_id) { + Some(ref exports) if *vis == hir::Public => { self.lazy_seq_from_slice(exports.as_slice()) } _ => LazySeq::empty(), @@ -792,7 +792,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { let kind = match impl_item.kind { ty::AssociatedKind::Const => { EntryKind::AssociatedConst(container, - self.tcx.at(ast_item.span).mir_const_qualif(def_id)) + self.tcx.at(ast_item.span).mir_const_qualif(def_id).0) } ty::AssociatedKind::Method => { let fn_data = if let hir::ImplItemKind::Method(ref sig, body) = ast_item.node { @@ -911,7 +911,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { hir::ItemStatic(_, hir::MutMutable, _) => EntryKind::MutStatic, hir::ItemStatic(_, hir::MutImmutable, _) => EntryKind::ImmStatic, hir::ItemConst(..) => { - EntryKind::Const(tcx.at(item.span).mir_const_qualif(def_id)) + EntryKind::Const(tcx.at(item.span).mir_const_qualif(def_id).0) } hir::ItemFn(_, _, constness, .., body) => { let data = FnData { @@ -990,7 +990,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { // "unsized info", else just store None let coerce_unsized_info = trait_ref.and_then(|t| { - if Some(t.def_id) == tcx.lang_items.coerce_unsized_trait() { + if Some(t.def_id) == tcx.lang_items().coerce_unsized_trait() { Some(tcx.at(item.span).coerce_unsized_info(def_id)) } else { None @@ -1213,13 +1213,23 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { debug!("IsolatedEncoder::encode_info_for_closure({:?})", def_id); let tcx = self.tcx; - let data = ClosureData { - kind: tcx.closure_kind(def_id), - sig: self.lazy(&tcx.fn_sig(def_id)), + let kind = if let Some(sig) = self.tcx.generator_sig(def_id) { + let layout = self.tcx.generator_layout(def_id); + let data = GeneratorData { + sig, + layout: layout.clone(), + }; + EntryKind::Generator(self.lazy(&data)) + } else { + let data = ClosureData { + kind: tcx.closure_kind(def_id), + sig: self.lazy(&tcx.fn_sig(def_id)), + }; + EntryKind::Closure(self.lazy(&data)) }; Entry { - kind: EntryKind::Closure(self.lazy(&data)), + kind, visibility: self.lazy(&ty::Visibility::Public), span: self.lazy(&tcx.def_span(def_id)), attributes: self.encode_attributes(&tcx.get_attrs(def_id)), @@ -1245,7 +1255,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { let body = tcx.hir.body_owned_by(id); Entry { - kind: EntryKind::Const(tcx.mir_const_qualif(def_id)), + kind: EntryKind::Const(tcx.mir_const_qualif(def_id).0), visibility: self.lazy(&ty::Visibility::Public), span: self.lazy(&tcx.def_span(def_id)), attributes: LazySeq::empty(), @@ -1272,21 +1282,20 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { } fn encode_native_libraries(&mut self, _: ()) -> LazySeq { - let used_libraries = self.tcx.sess.cstore.used_libraries(); - self.lazy_seq(used_libraries) + let used_libraries = self.tcx.native_libraries(LOCAL_CRATE); + self.lazy_seq(used_libraries.iter().cloned()) } fn encode_crate_deps(&mut self, _: ()) -> LazySeq { - let cstore = &*self.tcx.sess.cstore; - let crates = cstore.crates(); + let crates = self.tcx.crates(); let mut deps = crates .iter() .map(|&cnum| { let dep = CrateDep { - name: cstore.original_crate_name(cnum), - hash: cstore.crate_hash(cnum), - kind: cstore.dep_kind(cnum), + name: self.tcx.original_crate_name(cnum), + hash: self.tcx.crate_hash(cnum), + kind: self.tcx.dep_kind(cnum), }; (cnum, dep) }) @@ -1312,7 +1321,8 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { fn encode_lang_items(&mut self, _: ()) -> LazySeq<(DefIndex, usize)> { let tcx = self.tcx; - let lang_items = tcx.lang_items.items().iter(); + let lang_items = tcx.lang_items(); + let lang_items = lang_items.items().iter(); self.lazy_seq(lang_items.enumerate().filter_map(|(i, &opt_def_id)| { if let Some(def_id) = opt_def_id { if def_id.is_local() { @@ -1325,7 +1335,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { fn encode_lang_items_missing(&mut self, _: ()) -> LazySeq { let tcx = self.tcx; - self.lazy_seq_ref(&tcx.lang_items.missing) + self.lazy_seq_ref(&tcx.lang_items().missing) } /// Encodes an index, mapping each trait to its (local) implementations. @@ -1510,9 +1520,16 @@ impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { } fn encode_info_for_ty(&mut self, ty: &hir::Ty) { - if let hir::TyImplTrait(_) = ty.node { - let def_id = self.tcx.hir.local_def_id(ty.id); - self.record(def_id, IsolatedEncoder::encode_info_for_anon_ty, def_id); + match ty.node { + hir::TyImplTrait(_) => { + let def_id = self.tcx.hir.local_def_id(ty.id); + self.record(def_id, IsolatedEncoder::encode_info_for_anon_ty, def_id); + } + hir::TyArray(_, len) => { + let def_id = self.tcx.hir.body_owner_def_id(len); + self.record(def_id, IsolatedEncoder::encode_info_for_embedded_const, def_id); + } + _ => {} } } diff --git a/src/librustc_metadata/isolated_encoder.rs b/src/librustc_metadata/isolated_encoder.rs index b9ad9086c0..7dc50fe29d 100644 --- a/src/librustc_metadata/isolated_encoder.rs +++ b/src/librustc_metadata/isolated_encoder.rs @@ -23,7 +23,7 @@ use rustc_serialize::Encodable; pub struct IsolatedEncoder<'a, 'b: 'a, 'tcx: 'b> { pub tcx: TyCtxt<'b, 'tcx, 'tcx>, ecx: &'a mut EncodeContext<'b, 'tcx>, - hcx: Option<(StableHashingContext<'b, 'tcx, 'tcx>, StableHasher)>, + hcx: Option<(StableHashingContext<'tcx>, StableHasher)>, } impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { @@ -40,9 +40,9 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { // Except when -Zquery-dep-graph is specified because we don't // want to mess up our tests. let hcx = if tcx.sess.opts.debugging_opts.query_dep_graph { - StableHashingContext::new(tcx) + tcx.create_stable_hashing_context() } else { - StableHashingContext::new(tcx).force_span_hashing() + tcx.create_stable_hashing_context().force_span_hashing() }; Some((hcx, StableHasher::new())) @@ -61,7 +61,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { } pub fn lazy(&mut self, value: &T) -> Lazy - where T: Encodable + HashStable> + where T: Encodable + HashStable> { if let Some((ref mut hcx, ref mut hasher)) = self.hcx { value.hash_stable(hcx, hasher); @@ -72,7 +72,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { pub fn lazy_seq(&mut self, iter: I) -> LazySeq where I: IntoIterator, - T: Encodable + HashStable> + T: Encodable + HashStable> { if let Some((ref mut hcx, ref mut hasher)) = self.hcx { let iter = iter.into_iter(); @@ -111,7 +111,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { pub fn lazy_seq_ref<'x, I, T>(&mut self, iter: I) -> LazySeq where I: IntoIterator, - T: 'x + Encodable + HashStable> + T: 'x + Encodable + HashStable> { if let Some((ref mut hcx, ref mut hasher)) = self.hcx { let iter = iter.into_iter(); @@ -149,7 +149,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { } pub fn lazy_seq_from_slice(&mut self, slice: &[T]) -> LazySeq - where T: Encodable + HashStable> + where T: Encodable + HashStable> { if let Some((ref mut hcx, ref mut hasher)) = self.hcx { slice.hash_stable(hcx, hasher); @@ -159,7 +159,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { } pub fn lazy_seq_ref_from_slice(&mut self, slice: &[&T]) -> LazySeq - where T: Encodable + HashStable> + where T: Encodable + HashStable> { if let Some((ref mut hcx, ref mut hasher)) = self.hcx { slice.hash_stable(hcx, hasher); diff --git a/src/librustc_metadata/lib.rs b/src/librustc_metadata/lib.rs index f4e6f57c43..54dbb68667 100644 --- a/src/librustc_metadata/lib.rs +++ b/src/librustc_metadata/lib.rs @@ -50,6 +50,8 @@ mod decoder; mod cstore_impl; mod isolated_encoder; mod schema; +mod native_libs; +mod link_args; pub mod creader; pub mod cstore; diff --git a/src/librustc_metadata/link_args.rs b/src/librustc_metadata/link_args.rs new file mode 100644 index 0000000000..6fafde0d09 --- /dev/null +++ b/src/librustc_metadata/link_args.rs @@ -0,0 +1,65 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir; +use rustc::ty::TyCtxt; +use syntax::abi::Abi; + +pub fn collect<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Vec { + let mut collector = Collector { + args: Vec::new(), + }; + tcx.hir.krate().visit_all_item_likes(&mut collector); + + for attr in tcx.hir.krate().attrs.iter() { + if attr.path == "link_args" { + if let Some(linkarg) = attr.value_str() { + collector.add_link_args(&linkarg.as_str()); + } + } + } + + return collector.args +} + +struct Collector { + args: Vec, +} + +impl<'tcx> ItemLikeVisitor<'tcx> for Collector { + fn visit_item(&mut self, it: &'tcx hir::Item) { + let fm = match it.node { + hir::ItemForeignMod(ref fm) => fm, + _ => return, + }; + if fm.abi == Abi::Rust || + fm.abi == Abi::RustIntrinsic || + fm.abi == Abi::PlatformIntrinsic { + return + } + + // First, add all of the custom #[link_args] attributes + for m in it.attrs.iter().filter(|a| a.check_name("link_args")) { + if let Some(linkarg) = m.value_str() { + self.add_link_args(&linkarg.as_str()); + } + } + } + + fn visit_trait_item(&mut self, _it: &'tcx hir::TraitItem) {} + fn visit_impl_item(&mut self, _it: &'tcx hir::ImplItem) {} +} + +impl Collector { + fn add_link_args(&mut self, args: &str) { + self.args.extend(args.split(' ').filter(|s| !s.is_empty()).map(|s| s.to_string())) + } +} diff --git a/src/librustc_metadata/native_libs.rs b/src/librustc_metadata/native_libs.rs new file mode 100644 index 0000000000..cc332acb5b --- /dev/null +++ b/src/librustc_metadata/native_libs.rs @@ -0,0 +1,217 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir; +use rustc::middle::cstore::{self, NativeLibrary}; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc::util::nodemap::FxHashSet; +use syntax::abi::Abi; +use syntax::attr; +use syntax::codemap::Span; +use syntax::feature_gate::{self, GateIssue}; +use syntax::symbol::Symbol; + +pub fn collect<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Vec { + let mut collector = Collector { + tcx, + libs: Vec::new(), + }; + tcx.hir.krate().visit_all_item_likes(&mut collector); + collector.process_command_line(); + return collector.libs +} + +pub fn relevant_lib(sess: &Session, lib: &NativeLibrary) -> bool { + match lib.cfg { + Some(ref cfg) => attr::cfg_matches(cfg, &sess.parse_sess, None), + None => true, + } +} + +struct Collector<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + libs: Vec, +} + +impl<'a, 'tcx> ItemLikeVisitor<'tcx> for Collector<'a, 'tcx> { + fn visit_item(&mut self, it: &'tcx hir::Item) { + let fm = match it.node { + hir::ItemForeignMod(ref fm) => fm, + _ => return, + }; + + if fm.abi == Abi::Rust || + fm.abi == Abi::RustIntrinsic || + fm.abi == Abi::PlatformIntrinsic { + return + } + + // Process all of the #[link(..)]-style arguments + for m in it.attrs.iter().filter(|a| a.check_name("link")) { + let items = match m.meta_item_list() { + Some(item) => item, + None => continue, + }; + let kind = items.iter().find(|k| { + k.check_name("kind") + }).and_then(|a| a.value_str()).map(Symbol::as_str); + let kind = match kind.as_ref().map(|s| &s[..]) { + Some("static") => cstore::NativeStatic, + Some("static-nobundle") => cstore::NativeStaticNobundle, + Some("dylib") => cstore::NativeUnknown, + Some("framework") => cstore::NativeFramework, + Some(k) => { + struct_span_err!(self.tcx.sess, m.span, E0458, + "unknown kind: `{}`", k) + .span_label(m.span, "unknown kind").emit(); + cstore::NativeUnknown + } + None => cstore::NativeUnknown + }; + let n = items.iter().find(|n| { + n.check_name("name") + }).and_then(|a| a.value_str()); + let n = match n { + Some(n) => n, + None => { + struct_span_err!(self.tcx.sess, m.span, E0459, + "#[link(...)] specified without `name = \"foo\"`") + .span_label(m.span, "missing `name` argument").emit(); + Symbol::intern("foo") + } + }; + let cfg = items.iter().find(|k| { + k.check_name("cfg") + }).and_then(|a| a.meta_item_list()); + let cfg = cfg.map(|list| { + list[0].meta_item().unwrap().clone() + }); + let foreign_items = fm.items.iter() + .map(|it| self.tcx.hir.local_def_id(it.id)) + .collect(); + let lib = NativeLibrary { + name: n, + kind, + cfg, + foreign_items, + }; + self.register_native_lib(Some(m.span), lib); + } + } + + fn visit_trait_item(&mut self, _it: &'tcx hir::TraitItem) {} + fn visit_impl_item(&mut self, _it: &'tcx hir::ImplItem) {} +} + +impl<'a, 'tcx> Collector<'a, 'tcx> { + fn register_native_lib(&mut self, span: Option, lib: NativeLibrary) { + if lib.name.as_str().is_empty() { + match span { + Some(span) => { + struct_span_err!(self.tcx.sess, span, E0454, + "#[link(name = \"\")] given with empty name") + .span_label(span, "empty name given") + .emit(); + } + None => { + self.tcx.sess.err("empty library name given via `-l`"); + } + } + return + } + let is_osx = self.tcx.sess.target.target.options.is_like_osx; + if lib.kind == cstore::NativeFramework && !is_osx { + let msg = "native frameworks are only available on macOS targets"; + match span { + Some(span) => span_err!(self.tcx.sess, span, E0455, "{}", msg), + None => self.tcx.sess.err(msg), + } + } + if lib.cfg.is_some() && !self.tcx.sess.features.borrow().link_cfg { + feature_gate::emit_feature_err(&self.tcx.sess.parse_sess, + "link_cfg", + span.unwrap(), + GateIssue::Language, + "is feature gated"); + } + if lib.kind == cstore::NativeStaticNobundle && + !self.tcx.sess.features.borrow().static_nobundle { + feature_gate::emit_feature_err(&self.tcx.sess.parse_sess, + "static_nobundle", + span.unwrap(), + GateIssue::Language, + "kind=\"static-nobundle\" is feature gated"); + } + self.libs.push(lib); + } + + // Process libs passed on the command line + fn process_command_line(&mut self) { + // First, check for errors + let mut renames = FxHashSet(); + for &(ref name, ref new_name, _) in &self.tcx.sess.opts.libs { + if let &Some(ref new_name) = new_name { + if new_name.is_empty() { + self.tcx.sess.err( + &format!("an empty renaming target was specified for library `{}`",name)); + } else if !self.libs.iter().any(|lib| lib.name == name as &str) { + self.tcx.sess.err(&format!("renaming of the library `{}` was specified, \ + however this crate contains no #[link(...)] \ + attributes referencing this library.", name)); + } else if renames.contains(name) { + self.tcx.sess.err(&format!("multiple renamings were \ + specified for library `{}` .", + name)); + } else { + renames.insert(name); + } + } + } + + // Update kind and, optionally, the name of all native libaries + // (there may be more than one) with the specified name. + for &(ref name, ref new_name, kind) in &self.tcx.sess.opts.libs { + let mut found = false; + for lib in self.libs.iter_mut() { + if lib.name == name as &str { + let mut changed = false; + if let Some(k) = kind { + lib.kind = k; + changed = true; + } + if let &Some(ref new_name) = new_name { + lib.name = Symbol::intern(new_name); + changed = true; + } + if !changed { + let msg = format!("redundant linker flag specified for \ + library `{}`", name); + self.tcx.sess.warn(&msg); + } + + found = true; + } + } + if !found { + // Add if not found + let new_name = new_name.as_ref().map(|s| &**s); // &Option -> Option<&str> + let lib = NativeLibrary { + name: Symbol::intern(new_name.unwrap_or(name)), + kind: if let Some(k) = kind { k } else { cstore::NativeUnknown }, + cfg: None, + foreign_items: Vec::new(), + }; + self.register_native_lib(None, lib); + } + } + } +} diff --git a/src/librustc_metadata/schema.rs b/src/librustc_metadata/schema.rs index b71f4d0d98..dad0d26d27 100644 --- a/src/librustc_metadata/schema.rs +++ b/src/librustc_metadata/schema.rs @@ -32,8 +32,6 @@ use std::mem; use rustc_data_structures::stable_hasher::{StableHasher, HashStable, StableHasherResult}; -use rustc::dep_graph::{DepGraph, DepNode}; - pub fn rustc_version() -> String { format!("rustc {}", option_env!("CFG_VERSION").unwrap_or("unknown version")) @@ -188,75 +186,27 @@ pub enum LazyState { Previous(usize), } -/// A `Tracked` wraps a value so that one can only access it when specifying -/// the `DepNode` for that value. This makes it harder to forget registering -/// reads. -#[derive(RustcEncodable, RustcDecodable)] -pub struct Tracked { - state: T, -} - -impl Tracked { - pub fn new(state: T) -> Tracked { - Tracked { - state, - } - } - - pub fn get(&self, dep_graph: &DepGraph, dep_node: DepNode) -> &T { - dep_graph.read(dep_node); - &self.state - } - - pub fn get_untracked(&self) -> &T { - &self.state - } - - pub fn map(&self, f: F) -> Tracked - where F: FnOnce(&T) -> R - { - Tracked { - state: f(&self.state), - } - } -} - -impl<'a, 'gcx, 'tcx, T> HashStable> for Tracked - where T: HashStable> -{ - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, - hasher: &mut StableHasher) { - let Tracked { - ref state - } = *self; - - state.hash_stable(hcx, hasher); - } -} - - #[derive(RustcEncodable, RustcDecodable)] pub struct CrateRoot { pub name: Symbol, pub triple: String, pub hash: hir::svh::Svh, pub disambiguator: Symbol, - pub panic_strategy: Tracked, - pub has_global_allocator: Tracked, - pub has_default_lib_allocator: Tracked, + pub panic_strategy: PanicStrategy, + pub has_global_allocator: bool, + pub has_default_lib_allocator: bool, pub plugin_registrar_fn: Option, pub macro_derive_registrar: Option, - pub crate_deps: Tracked>, - pub dylib_dependency_formats: Tracked>>, - pub lang_items: Tracked>, - pub lang_items_missing: Tracked>, - pub native_libraries: Tracked>, + pub crate_deps: LazySeq, + pub dylib_dependency_formats: LazySeq>, + pub lang_items: LazySeq<(DefIndex, usize)>, + pub lang_items_missing: LazySeq, + pub native_libraries: LazySeq, pub codemap: LazySeq, pub def_path_table: Lazy, - pub impls: Tracked>, - pub exported_symbols: Tracked>, + pub impls: LazySeq, + pub exported_symbols: LazySeq, pub index: LazySeq, } @@ -279,9 +229,9 @@ pub struct TraitImpls { pub impls: LazySeq, } -impl<'a, 'gcx, 'tcx> HashStable> for TraitImpls { +impl<'gcx> HashStable> for TraitImpls { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { let TraitImpls { trait_id: (krate, def_index), @@ -353,6 +303,7 @@ pub enum EntryKind<'tcx> { Mod(Lazy), MacroDef(Lazy), Closure(Lazy>), + Generator(Lazy>), Trait(Lazy>), Impl(Lazy>), DefaultImpl(Lazy>), @@ -361,9 +312,9 @@ pub enum EntryKind<'tcx> { AssociatedConst(AssociatedContainer, u8), } -impl<'a, 'gcx, 'tcx> HashStable> for EntryKind<'tcx> { +impl<'gcx> HashStable> for EntryKind<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -401,6 +352,9 @@ impl<'a, 'gcx, 'tcx> HashStable> for EntryK EntryKind::MacroDef(ref macro_def) => { macro_def.hash_stable(hcx, hasher); } + EntryKind::Generator(data) => { + data.hash_stable(hcx, hasher); + } EntryKind::Closure(closure_data) => { closure_data.hash_stable(hcx, hasher); } @@ -564,3 +518,10 @@ pub struct ClosureData<'tcx> { pub sig: Lazy>, } impl_stable_hash_for!(struct ClosureData<'tcx> { kind, sig }); + +#[derive(RustcEncodable, RustcDecodable)] +pub struct GeneratorData<'tcx> { + pub sig: ty::PolyGenSig<'tcx>, + pub layout: mir::GeneratorLayout<'tcx>, +} +impl_stable_hash_for!(struct GeneratorData<'tcx> { sig, layout }); diff --git a/src/librustc_mir/Cargo.toml b/src/librustc_mir/Cargo.toml index 49e626c540..b7a576babe 100644 --- a/src/librustc_mir/Cargo.toml +++ b/src/librustc_mir/Cargo.toml @@ -9,6 +9,7 @@ path = "lib.rs" crate-type = ["dylib"] [dependencies] +bitflags = "1.0" graphviz = { path = "../libgraphviz" } log = "0.3" rustc = { path = "../librustc" } @@ -16,6 +17,6 @@ rustc_const_eval = { path = "../librustc_const_eval" } rustc_const_math = { path = "../librustc_const_math" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } -rustc_bitflags = { path = "../librustc_bitflags" } +serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_mir/borrow_check.rs b/src/librustc_mir/borrow_check.rs index eda2bacb11..db6a0ee4ba 100644 --- a/src/librustc_mir/borrow_check.rs +++ b/src/librustc_mir/borrow_check.rs @@ -30,6 +30,7 @@ use dataflow::{MoveDataParamEnv}; use dataflow::{BitDenotation, BlockSets, DataflowResults, DataflowResultsConsumer}; use dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals}; use dataflow::{Borrows, BorrowData, BorrowIndex}; +use dataflow::move_paths::{MoveError, IllegalMoveOriginKind}; use dataflow::move_paths::{HasMoveData, MoveData, MovePathIndex, LookupResult}; use util::borrowck_errors::{BorrowckErrors, Origin}; @@ -59,7 +60,33 @@ fn mir_borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { let param_env = tcx.param_env(def_id); tcx.infer_ctxt().enter(|_infcx| { - let move_data = MoveData::gather_moves(mir, tcx, param_env); + let move_data = match MoveData::gather_moves(mir, tcx, param_env) { + Ok(move_data) => move_data, + Err((move_data, move_errors)) => { + for move_error in move_errors { + let (span, kind): (Span, IllegalMoveOriginKind) = match move_error { + MoveError::UnionMove { .. } => + unimplemented!("dont know how to report union move errors yet."), + MoveError::IllegalMove { cannot_move_out_of: o } => (o.span, o.kind), + }; + let origin = Origin::Mir; + let mut err = match kind { + IllegalMoveOriginKind::Static => + tcx.cannot_move_out_of(span, "static item", origin), + IllegalMoveOriginKind::BorrowedContent => + tcx.cannot_move_out_of(span, "borrowed_content", origin), + IllegalMoveOriginKind::InteriorOfTypeWithDestructor { container_ty: ty } => + tcx.cannot_move_out_of_interior_of_drop(span, ty, origin), + IllegalMoveOriginKind::InteriorOfSlice { elem_ty: ty, is_index } => + tcx.cannot_move_out_of_interior_noncopy(span, ty, is_index, origin), + IllegalMoveOriginKind::InteriorOfArray { elem_ty: ty, is_index } => + tcx.cannot_move_out_of_interior_noncopy(span, ty, is_index, origin), + }; + err.emit(); + } + move_data + } + }; let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env }; let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len()); let flow_borrows = do_dataflow(tcx, mir, id, &attributes, &dead_unwinds, @@ -173,14 +200,23 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> DataflowResultsConsumer<'b, 'gcx> let span = stmt.source_info.span; match stmt.kind { StatementKind::Assign(ref lhs, ref rhs) => { + // NOTE: NLL RFC calls for *shallow* write; using Deep + // for short-term compat w/ AST-borrowck. Also, switch + // to shallow requires to dataflow: "if this is an + // assignment `lv = `, then any loan for some + // path P of which `lv` is a prefix is killed." self.mutate_lvalue(ContextKind::AssignLhs.new(location), - (lhs, span), JustWrite, flow_state); + (lhs, span), Deep, JustWrite, flow_state); + self.consume_rvalue(ContextKind::AssignRhs.new(location), (rhs, span), location, flow_state); } StatementKind::SetDiscriminant { ref lvalue, variant_index: _ } => { self.mutate_lvalue(ContextKind::SetDiscrim.new(location), - (lvalue, span), JustWrite, flow_state); + (lvalue, span), + Shallow(Some(ArtificialField::Discriminant)), + JustWrite, + flow_state); } StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { for (o, output) in asm.outputs.iter().zip(outputs) { @@ -192,6 +228,7 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> DataflowResultsConsumer<'b, 'gcx> } else { self.mutate_lvalue(ContextKind::InlineAsm.new(location), (output, span), + Deep, if o.is_rw { WriteAndRead } else { JustWrite }, flow_state); } @@ -209,15 +246,15 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> DataflowResultsConsumer<'b, 'gcx> StatementKind::Nop | StatementKind::Validate(..) | StatementKind::StorageLive(..) => { - // ignored by borrowck + // `Nop`, `Validate`, and `StorageLive` are irrelevant + // to borrow check. } - StatementKind::StorageDead(ref lvalue) => { - // causes non-drop values to be dropped. - self.consume_lvalue(ContextKind::StorageDead.new(location), - ConsumeKind::Consume, - (lvalue, span), - flow_state) + StatementKind::StorageDead(local) => { + self.access_lvalue(ContextKind::StorageDead.new(location), + (&Lvalue::Local(local), span), + (Shallow(None), Write(WriteKind::StorageDead)), + flow_state); } } } @@ -246,7 +283,10 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> DataflowResultsConsumer<'b, 'gcx> target: _, unwind: _ } => { self.mutate_lvalue(ContextKind::DropAndReplace.new(loc), - (drop_lvalue, span), JustWrite, flow_state); + (drop_lvalue, span), + Deep, + JustWrite, + flow_state); self.consume_operand(ContextKind::DropAndReplace.new(loc), ConsumeKind::Drop, (new_value, span), flow_state); @@ -262,7 +302,10 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> DataflowResultsConsumer<'b, 'gcx> } if let Some((ref dest, _/*bb*/)) = *destination { self.mutate_lvalue(ContextKind::CallDest.new(loc), - (dest, span), JustWrite, flow_state); + (dest, span), + Deep, + JustWrite, + flow_state); } } TerminatorKind::Assert { ref cond, expected: _, ref msg, target: _, cleanup: _ } => { @@ -279,12 +322,20 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> DataflowResultsConsumer<'b, 'gcx> (index, span), flow_state); } AssertMessage::Math(_/*const_math_err*/) => {} + AssertMessage::GeneratorResumedAfterReturn => {} + AssertMessage::GeneratorResumedAfterPanic => {} } } + TerminatorKind::Yield { ref value, resume: _, drop: _} => { + self.consume_operand(ContextKind::Yield.new(loc), + Consume, (value, span), flow_state); + } + TerminatorKind::Goto { target: _ } | TerminatorKind::Resume | TerminatorKind::Return | + TerminatorKind::GeneratorDrop | TerminatorKind::Unreachable => { // no data used, thus irrelevant to borrowck } @@ -301,29 +352,129 @@ enum ConsumeKind { Drop, Consume } #[derive(Copy, Clone, PartialEq, Eq, Debug)] enum Control { Continue, Break } +use self::ShallowOrDeep::{Shallow, Deep}; +use self::ReadOrWrite::{Read, Write}; + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum ArtificialField { + Discriminant, + ArrayLength, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum ShallowOrDeep { + /// From the RFC: "A *shallow* access means that the immediate + /// fields reached at LV are accessed, but references or pointers + /// found within are not dereferenced. Right now, the only access + /// that is shallow is an assignment like `x = ...;`, which would + /// be a *shallow write* of `x`." + Shallow(Option), + + /// From the RFC: "A *deep* access means that all data reachable + /// through the given lvalue may be invalidated or accesses by + /// this action." + Deep, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum ReadOrWrite { + /// From the RFC: "A *read* means that the existing data may be + /// read, but will not be changed." + Read(ReadKind), + + /// From the RFC: "A *write* means that the data may be mutated to + /// new values or otherwise invalidated (for example, it could be + /// de-initialized, as in a move operation). + Write(WriteKind), +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum ReadKind { + Borrow(BorrowKind), + Copy, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum WriteKind { + StorageDead, + MutableBorrow(BorrowKind), + Mutate, + Move, +} + impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> { + fn access_lvalue(&mut self, + context: Context, + lvalue_span: (&Lvalue<'gcx>, Span), + kind: (ShallowOrDeep, ReadOrWrite), + flow_state: &InProgress<'b, 'gcx>) { + // FIXME: also need to check permissions (e.g. reject mut + // borrow of immutable ref, moves through non-`Box`-ref) + let (sd, rw) = kind; + self.each_borrow_involving_path( + context, (sd, lvalue_span.0), flow_state, |this, _index, borrow, common_prefix| { + match (rw, borrow.kind) { + (Read(_), BorrowKind::Shared) => { + Control::Continue + } + (Read(kind), BorrowKind::Unique) | + (Read(kind), BorrowKind::Mut) => { + match kind { + ReadKind::Copy => + this.report_use_while_mutably_borrowed( + context, lvalue_span, borrow), + ReadKind::Borrow(bk) => { + let end_issued_loan_span = + flow_state.borrows.base_results.operator().region_span( + &borrow.region).end_point(); + this.report_conflicting_borrow( + context, common_prefix, lvalue_span, bk, + &borrow, end_issued_loan_span) + } + } + Control::Break + } + (Write(kind), _) => { + match kind { + WriteKind::MutableBorrow(bk) => { + let end_issued_loan_span = + flow_state.borrows.base_results.operator().region_span( + &borrow.region).end_point(); + this.report_conflicting_borrow( + context, common_prefix, lvalue_span, bk, + &borrow, end_issued_loan_span) + } + WriteKind::StorageDead | + WriteKind::Mutate => + this.report_illegal_mutation_of_borrowed( + context, lvalue_span, borrow), + WriteKind::Move => + this.report_move_out_while_borrowed( + context, lvalue_span, &borrow), + } + Control::Break + } + } + }); + } + fn mutate_lvalue(&mut self, context: Context, lvalue_span: (&Lvalue<'gcx>, Span), + kind: ShallowOrDeep, mode: MutateMode, flow_state: &InProgress<'b, 'gcx>) { // Write of P[i] or *P, or WriteAndRead of any P, requires P init'd. match mode { MutateMode::WriteAndRead => { - self.check_if_path_is_moved(context, lvalue_span, flow_state); + self.check_if_path_is_moved(context, "update", lvalue_span, flow_state); } MutateMode::JustWrite => { self.check_if_assigned_path_is_moved(context, lvalue_span, flow_state); } } - // check we don't invalidate any outstanding loans - self.each_borrow_involving_path(context, - lvalue_span.0, flow_state, |this, _index, _data| { - this.report_illegal_mutation_of_borrowed(context, - lvalue_span); - Control::Break - }); + self.access_lvalue(context, lvalue_span, (kind, Write(WriteKind::Mutate)), flow_state); // check for reassignments to immutable local variables self.check_if_reassignment_to_immutable_state(context, lvalue_span, flow_state); @@ -332,11 +483,17 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> fn consume_rvalue(&mut self, context: Context, (rvalue, span): (&Rvalue<'gcx>, Span), - location: Location, + _location: Location, flow_state: &InProgress<'b, 'gcx>) { match *rvalue { Rvalue::Ref(_/*rgn*/, bk, ref lvalue) => { - self.borrow(context, location, bk, (lvalue, span), flow_state) + let access_kind = match bk { + BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))), + BorrowKind::Unique | + BorrowKind::Mut => (Deep, Write(WriteKind::MutableBorrow(bk))), + }; + self.access_lvalue(context, (lvalue, span), access_kind, flow_state); + self.check_if_path_is_moved(context, "borrow", (lvalue, span), flow_state); } Rvalue::Use(ref operand) | @@ -348,8 +505,14 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> Rvalue::Len(ref lvalue) | Rvalue::Discriminant(ref lvalue) => { - // len(_)/discriminant(_) merely read, not consume. - self.check_if_path_is_moved(context, (lvalue, span), flow_state); + let af = match *rvalue { + Rvalue::Len(..) => ArtificialField::ArrayLength, + Rvalue::Discriminant(..) => ArtificialField::Discriminant, + _ => unreachable!(), + }; + self.access_lvalue( + context, (lvalue, span), (Shallow(Some(af)), Read(ReadKind::Copy)), flow_state); + self.check_if_path_is_moved(context, "use", (lvalue, span), flow_state); } Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2) | @@ -380,8 +543,9 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> (operand, span): (&Operand<'gcx>, Span), flow_state: &InProgress<'b, 'gcx>) { match *operand { - Operand::Consume(ref lvalue) => - self.consume_lvalue(context, consume_via_drop, (lvalue, span), flow_state), + Operand::Consume(ref lvalue) => { + self.consume_lvalue(context, consume_via_drop, (lvalue, span), flow_state) + } Operand::Constant(_) => {} } } @@ -397,26 +561,10 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> self.fake_infer_ctxt.type_moves_by_default(self.param_env, ty, DUMMY_SP); if moves_by_default { // move of lvalue: check if this is move of already borrowed path - self.each_borrow_involving_path( - context, lvalue_span.0, flow_state, |this, _idx, borrow| { - if !borrow.compatible_with(BorrowKind::Mut) { - this.report_move_out_while_borrowed(context, lvalue_span); - Control::Break - } else { - Control::Continue - } - }); + self.access_lvalue(context, lvalue_span, (Deep, Write(WriteKind::Move)), flow_state); } else { // copy of lvalue: check if this is "copy of frozen path" (FIXME: see check_loans.rs) - self.each_borrow_involving_path( - context, lvalue_span.0, flow_state, |this, _idx, borrow| { - if !borrow.compatible_with(BorrowKind::Shared) { - this.report_use_while_mutably_borrowed(context, lvalue_span); - Control::Break - } else { - Control::Continue - } - }); + self.access_lvalue(context, lvalue_span, (Deep, Read(ReadKind::Copy)), flow_state); } // Finally, check if path was already moved. @@ -427,22 +575,10 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> // skip this check in that case). } ConsumeKind::Consume => { - self.check_if_path_is_moved(context, lvalue_span, flow_state); + self.check_if_path_is_moved(context, "use", lvalue_span, flow_state); } } } - - fn borrow(&mut self, - context: Context, - location: Location, - bk: BorrowKind, - lvalue_span: (&Lvalue<'gcx>, Span), - flow_state: &InProgress<'b, 'gcx>) { - debug!("borrow location: {:?} lvalue: {:?} span: {:?}", - location, lvalue_span.0, lvalue_span.1); - self.check_if_path_is_moved(context, lvalue_span, flow_state); - self.check_for_conflicting_loans(context, location, bk, lvalue_span, flow_state); - } } impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> { @@ -479,13 +615,26 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> if flow_state.inits.curr_state.contains(&mpi) { // may already be assigned before reaching this statement; // report error. - self.report_illegal_reassignment(context, (lvalue, span)); + // FIXME: Not ideal, it only finds the assignment that lexically comes first + let assigned_lvalue = &move_data.move_paths[mpi].lvalue; + let assignment_stmt = self.mir.basic_blocks().iter().filter_map(|bb| { + bb.statements.iter().find(|stmt| { + if let StatementKind::Assign(ref lv, _) = stmt.kind { + *lv == *assigned_lvalue + } else { + false + } + }) + }).next().unwrap(); + self.report_illegal_reassignment( + context, (lvalue, span), assignment_stmt.source_info.span); } } } fn check_if_path_is_moved(&mut self, context: Context, + desired_action: &str, lvalue_span: (&Lvalue<'gcx>, Span), flow_state: &InProgress<'b, 'gcx>) { // FIXME: analogous code in check_loans first maps `lvalue` to @@ -497,7 +646,7 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> if let Some(mpi) = self.move_path_for_lvalue(context, move_data, lvalue) { if maybe_uninits.curr_state.contains(&mpi) { // find and report move(s) that could cause this to be uninitialized - self.report_use_of_moved(context, lvalue_span); + self.report_use_of_moved(context, desired_action, lvalue_span); } else { // sanity check: initialized on *some* path, right? assert!(flow_state.inits.curr_state.contains(&mpi)); @@ -564,8 +713,8 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> // check_loans.rs first maps // `base` to its base_path. - self.check_if_path_is_moved(context, - (base, span), flow_state); + self.check_if_path_is_moved( + context, "assignment", (base, span), flow_state); // (base initialized; no need to // recur further) @@ -582,72 +731,18 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> } } } - - fn check_for_conflicting_loans(&mut self, - context: Context, - _location: Location, - _bk: BorrowKind, - lvalue_span: (&Lvalue<'gcx>, Span), - flow_state: &InProgress<'b, 'gcx>) { - // NOTE FIXME: The analogous code in old borrowck - // check_loans.rs is careful to iterate over every *issued* - // loan, as opposed to just the in scope ones. - // - // (Or if you prefer, all the *other* iterations over loans - // only consider loans that are in scope of some given - // CodeExtent) - // - // The (currently skeletal) code here does not encode such a - // distinction, which means it is almost certainly over - // looking something. - // - // (It is probably going to reject code that should be - // accepted, I suspect, by treated issued-but-out-of-scope - // loans as issued-and-in-scope, and thus causing them to - // interfere with other loans.) - // - // However, I just want to get something running, especially - // since I am trying to move into new territory with NLL, so - // lets get this going first, and then address the issued vs - // in-scope distinction later. - - let state = &flow_state.borrows; - let data = &state.base_results.operator().borrows(); - - debug!("check_for_conflicting_loans location: {:?}", _location); - - // does any loan generated here conflict with a previously issued loan? - let mut loans_generated = 0; - for (g, gen) in state.elems_generated().map(|g| (g, &data[g])) { - loans_generated += 1; - for (i, issued) in state.elems_incoming().map(|i| (i, &data[i])) { - debug!("check_for_conflicting_loans gen: {:?} issued: {:?} conflicts: {}", - (g, gen, self.base_path(&gen.lvalue), - self.restrictions(&gen.lvalue).collect::>()), - (i, issued, self.base_path(&issued.lvalue), - self.restrictions(&issued.lvalue).collect::>()), - self.conflicts_with(gen, issued)); - if self.conflicts_with(gen, issued) { - self.report_conflicting_borrow(context, lvalue_span, gen, issued); - } - } - } - - // MIR statically ensures each statement gens *at most one* - // loan; mutual conflict (within a statement) can't arise. - // - // As safe-guard, assert that above property actually holds. - assert!(loans_generated <= 1); - } } +} impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> { fn each_borrow_involving_path(&mut self, _context: Context, - lvalue: &Lvalue<'gcx>, + access_lvalue: (ShallowOrDeep, &Lvalue<'gcx>), flow_state: &InProgress<'b, 'gcx>, mut op: F) - where F: FnMut(&mut Self, BorrowIndex, &BorrowData<'gcx>) -> Control + where F: FnMut(&mut Self, BorrowIndex, &BorrowData<'gcx>, &Lvalue) -> Control { + let (access, lvalue) = access_lvalue; + // FIXME: analogous code in check_loans first maps `lvalue` to // its base_path. @@ -656,233 +751,204 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> // check for loan restricting path P being used. Accounts for // borrows of P, P.a.b, etc. - for i in flow_state.borrows.elems_incoming() { - // FIXME: check_loans.rs filtered this to "in scope" - // loans; i.e. it took a scope S and checked that each - // restriction's kill_scope was a superscope of S. + 'next_borrow: for i in flow_state.borrows.elems_incoming() { let borrowed = &data[i]; - for restricted in self.restrictions(&borrowed.lvalue) { - if restricted == lvalue { - let ctrl = op(self, i, borrowed); + + // Is `lvalue` (or a prefix of it) already borrowed? If + // so, that's relevant. + // + // FIXME: Differs from AST-borrowck; includes drive-by fix + // to #38899. Will probably need back-compat mode flag. + for accessed_prefix in self.prefixes(lvalue, PrefixSet::All) { + if *accessed_prefix == borrowed.lvalue { + // FIXME: pass in enum describing case we are in? + let ctrl = op(self, i, borrowed, accessed_prefix); if ctrl == Control::Break { return; } } } - } - // check for loans (not restrictions) on any base path. - // e.g. Rejects `{ let x = &mut a.b; let y = a.b.c; }`, - // since that moves out of borrowed path `a.b`. - // - // Limiting to loans (not restrictions) keeps this one - // working: `{ let x = &mut a.b; let y = a.c; }` - let mut cursor = lvalue; - loop { - // FIXME: check_loans.rs invoked `op` *before* cursor - // shift here. Might just work (and even avoid redundant - // errors?) given code above? But for now, I want to try - // doing what I think is more "natural" check. - for i in flow_state.borrows.elems_incoming() { - let borrowed = &data[i]; - if borrowed.lvalue == *cursor { - let ctrl = op(self, i, borrowed); - if ctrl == Control::Break { return; } + // Is `lvalue` a prefix (modulo access type) of the + // `borrowed.lvalue`? If so, that's relevant. + + let prefix_kind = match access { + Shallow(Some(ArtificialField::Discriminant)) | + Shallow(Some(ArtificialField::ArrayLength)) => { + // The discriminant and array length are like + // additional fields on the type; they do not + // overlap any existing data there. Furthermore, + // they cannot actually be a prefix of any + // borrowed lvalue (at least in MIR as it is + // currently.) + continue 'next_borrow; } - } + Shallow(None) => PrefixSet::Shallow, + Deep => PrefixSet::Supporting, + }; - match *cursor { - Lvalue::Local(_) | Lvalue::Static(_) => break, - Lvalue::Projection(ref proj) => cursor = &proj.base, + for borrowed_prefix in self.prefixes(&borrowed.lvalue, prefix_kind) { + if borrowed_prefix == lvalue { + // FIXME: pass in enum describing case we are in? + let ctrl = op(self, i, borrowed, borrowed_prefix); + if ctrl == Control::Break { return; } + } } } } } -mod restrictions { - use super::MirBorrowckCtxt; +use self::prefixes::PrefixSet; + +/// From the NLL RFC: "The deep [aka 'supporting'] prefixes for an +/// lvalue are formed by stripping away fields and derefs, except that +/// we stop when we reach the deref of a shared reference. [...] " +/// +/// "Shallow prefixes are found by stripping away fields, but stop at +/// any dereference. So: writing a path like `a` is illegal if `a.b` +/// is borrowed. But: writing `a` is legal if `*a` is borrowed, +/// whether or not `a` is a shared or mutable reference. [...] " +mod prefixes { + use super::{MirBorrowckCtxt}; use rustc::hir; use rustc::ty::{self, TyCtxt}; - use rustc::mir::{Lvalue, Mir, Operand, ProjectionElem}; + use rustc::mir::{Lvalue, Mir, ProjectionElem}; + + pub trait IsPrefixOf<'tcx> { + fn is_prefix_of(&self, other: &Lvalue<'tcx>) -> bool; + } + + impl<'tcx> IsPrefixOf<'tcx> for Lvalue<'tcx> { + fn is_prefix_of(&self, other: &Lvalue<'tcx>) -> bool { + let mut cursor = other; + loop { + if self == cursor { + return true; + } + + match *cursor { + Lvalue::Local(_) | + Lvalue::Static(_) => return false, + Lvalue::Projection(ref proj) => { + cursor = &proj.base; + } + } + } + } + } + - pub(super) struct Restrictions<'c, 'tcx: 'c> { + pub(super) struct Prefixes<'c, 'tcx: 'c> { mir: &'c Mir<'tcx>, tcx: TyCtxt<'c, 'tcx, 'tcx>, - lvalue_stack: Vec<&'c Lvalue<'tcx>>, + kind: PrefixSet, + next: Option<&'c Lvalue<'tcx>>, + } + + #[derive(Copy, Clone, PartialEq, Eq, Debug)] + pub(super) enum PrefixSet { + All, + Shallow, + Supporting, } impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> { - pub(super) fn restrictions<'d>(&self, - lvalue: &'d Lvalue<'gcx>) - -> Restrictions<'d, 'gcx> where 'b: 'd + pub(super) fn prefixes<'d>(&self, + lvalue: &'d Lvalue<'gcx>, + kind: PrefixSet) + -> Prefixes<'d, 'gcx> where 'b: 'd { - let lvalue_stack = if self.has_restrictions(lvalue) { vec![lvalue] } else { vec![] }; - Restrictions { lvalue_stack: lvalue_stack, mir: self.mir, tcx: self.tcx } + Prefixes { next: Some(lvalue), kind, mir: self.mir, tcx: self.tcx } } + } - fn has_restrictions(&self, lvalue: &Lvalue<'gcx>) -> bool { - let mut cursor = lvalue; - loop { + impl<'c, 'tcx> Iterator for Prefixes<'c, 'tcx> { + type Item = &'c Lvalue<'tcx>; + fn next(&mut self) -> Option { + let mut cursor = match self.next { + None => return None, + Some(lvalue) => lvalue, + }; + + // Post-processing `lvalue`: Enqueue any remaining + // work. Also, `lvalue` may not be a prefix itself, but + // may hold one further down (e.g. we never return + // downcasts here, but may return a base of a downcast). + + 'cursor: loop { let proj = match *cursor { - Lvalue::Local(_) => return true, - Lvalue::Static(_) => return false, + Lvalue::Local(_) | // search yielded this leaf + Lvalue::Static(_) => { + self.next = None; + return Some(cursor); + } + Lvalue::Projection(ref proj) => proj, }; + match proj.elem { - ProjectionElem::Index(..) | - ProjectionElem::ConstantIndex { .. } | + ProjectionElem::Field(_/*field*/, _/*ty*/) => { + // FIXME: add union handling + self.next = Some(&proj.base); + return Some(cursor); + } ProjectionElem::Downcast(..) | ProjectionElem::Subslice { .. } | - ProjectionElem::Field(_/*field*/, _/*ty*/) => { + ProjectionElem::ConstantIndex { .. } | + ProjectionElem::Index(_) => { cursor = &proj.base; - continue; + continue 'cursor; } ProjectionElem::Deref => { - let ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); - match ty.sty { - ty::TyRawPtr(_) => { - return false; - } - ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { - // FIXME: do I need to check validity of - // region here though? (I think the original - // check_loans code did, like readme says) - return false; - } - ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { - cursor = &proj.base; - continue; - } - ty::TyAdt(..) if ty.is_box() => { - cursor = &proj.base; - continue; - } - _ => { - panic!("unknown type fed to Projection Deref."); - } - } + // (handled below) } } - } - } - } - impl<'c, 'tcx> Iterator for Restrictions<'c, 'tcx> { - type Item = &'c Lvalue<'tcx>; - fn next(&mut self) -> Option { - 'pop: loop { - let lvalue = match self.lvalue_stack.pop() { - None => return None, - Some(lvalue) => lvalue, - }; - - // `lvalue` may not be a restriction itself, but may - // hold one further down (e.g. we never return - // downcasts here, but may return a base of a - // downcast). - // - // Also, we need to enqueue any additional - // subrestrictions that it implies, since we can only - // return from from this call alone. - - let mut cursor = lvalue; - 'cursor: loop { - let proj = match *cursor { - Lvalue::Local(_) => return Some(cursor), // search yielded this leaf - Lvalue::Static(_) => continue 'pop, // fruitless leaf; try next on stack - Lvalue::Projection(ref proj) => proj, - }; + assert_eq!(proj.elem, ProjectionElem::Deref); - match proj.elem { - ProjectionElem::Field(_/*field*/, _/*ty*/) => { - // FIXME: add union handling - self.lvalue_stack.push(&proj.base); - return Some(cursor); - } - ProjectionElem::Downcast(..) | - ProjectionElem::Subslice { .. } | - ProjectionElem::ConstantIndex { .. } | - ProjectionElem::Index(Operand::Constant(..)) => { - cursor = &proj.base; - continue 'cursor; - } - ProjectionElem::Index(Operand::Consume(ref index)) => { - self.lvalue_stack.push(index); // FIXME: did old borrowck do this? - cursor = &proj.base; - continue 'cursor; - } - ProjectionElem::Deref => { - // (handled below) - } + match self.kind { + PrefixSet::Shallow => { + // shallow prefixes are found by stripping away + // fields, but stop at *any* dereference. + // So we can just stop the traversal now. + self.next = None; + return Some(cursor); } + PrefixSet::All => { + // all prefixes: just blindly enqueue the base + // of the projection + self.next = Some(&proj.base); + return Some(cursor); + } + PrefixSet::Supporting => { + // fall through! + } + } - assert_eq!(proj.elem, ProjectionElem::Deref); - - let ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); - match ty.sty { - ty::TyRawPtr(_) => { - // borrowck ignores raw ptrs; treat analogous to imm borrow - continue 'pop; - } - // R-Deref-Imm-Borrowed - ty::TyRef(_/*rgn*/, ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { - // immutably-borrowed referents do not - // have recursively-implied restrictions - // (because preventing actions on `*LV` - // does nothing about aliases like `*LV1`) - - // FIXME: do I need to check validity of - // `_r` here though? (I think the original - // check_loans code did, like the readme - // says) - - // (And do I *really* not have to - // recursively process the `base` as a - // further search here? Leaving this `if - // false` here as a hint to look at this - // again later. - // - // Ah, it might be because the - // restrictions are distinct from the path - // substructure. Note that there is a - // separate loop over the path - // substructure in fn - // each_borrow_involving_path, for better - // or for worse. - - if false { - cursor = &proj.base; - continue 'cursor; - } else { - continue 'pop; - } - } - - // R-Deref-Mut-Borrowed - ty::TyRef(_/*rgn*/, ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { - // mutably-borrowed referents are - // themselves restricted. - - // FIXME: do I need to check validity of - // `_r` here though? (I think the original - // check_loans code did, like the readme - // says) - - // schedule base for future iteration. - self.lvalue_stack.push(&proj.base); - return Some(cursor); // search yielded interior node - } + assert_eq!(self.kind, PrefixSet::Supporting); + // supporting prefixes: strip away fields and + // derefs, except we stop at the deref of a shared + // reference. + + let ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); + match ty.sty { + ty::TyRawPtr(_) | + ty::TyRef(_/*rgn*/, ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { + // don't continue traversing over derefs of raw pointers or shared borrows. + self.next = None; + return Some(cursor); + } - // R-Deref-Send-Pointer - ty::TyAdt(..) if ty.is_box() => { - // borrowing interior of a box implies that - // its base can no longer be mutated (o/w box - // storage would be freed) - self.lvalue_stack.push(&proj.base); - return Some(cursor); // search yielded interior node - } + ty::TyRef(_/*rgn*/, ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { + self.next = Some(&proj.base); + return Some(cursor); + } - _ => panic!("unknown type fed to Projection Deref."), + ty::TyAdt(..) if ty.is_box() => { + self.next = Some(&proj.base); + return Some(cursor); } + + _ => panic!("unknown type fed to Projection Deref."), } } } @@ -892,77 +958,120 @@ mod restrictions { impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> { fn report_use_of_moved(&mut self, _context: Context, + desired_action: &str, (lvalue, span): (&Lvalue, Span)) { - let mut err = self.tcx.cannot_act_on_uninitialized_variable( - span, "use", &self.describe_lvalue(lvalue), Origin::Mir); - // FIXME: add span_label for use of uninitialized variable - err.emit(); + self.tcx.cannot_act_on_uninitialized_variable(span, + desired_action, + &self.describe_lvalue(lvalue), + Origin::Mir) + .span_label(span, format!("use of possibly uninitialized `{}`", + self.describe_lvalue(lvalue))) + .emit(); } fn report_move_out_while_borrowed(&mut self, _context: Context, - (lvalue, span): (&Lvalue, Span)) { - let mut err = self.tcx.cannot_move_when_borrowed( - span, &self.describe_lvalue(lvalue), Origin::Mir); - // FIXME 1: add span_label for "borrow of `()` occurs here" - // FIXME 2: add span_label for "move out of `{}` occurs here" - err.emit(); + (lvalue, span): (&Lvalue, Span), + borrow: &BorrowData) { + self.tcx.cannot_move_when_borrowed(span, + &self.describe_lvalue(lvalue), + Origin::Mir) + .span_label(self.retrieve_borrow_span(borrow), + format!("borrow of `{}` occurs here", + self.describe_lvalue(&borrow.lvalue))) + .span_label(span, format!("move out of `{}` occurs here", + self.describe_lvalue(lvalue))) + .emit(); } fn report_use_while_mutably_borrowed(&mut self, _context: Context, - (lvalue, span): (&Lvalue, Span)) { + (lvalue, span): (&Lvalue, Span), + borrow : &BorrowData) { + let mut err = self.tcx.cannot_use_when_mutably_borrowed( - span, &self.describe_lvalue(lvalue), Origin::Mir); - // FIXME 1: add span_label for "borrow of `()` occurs here" - // FIXME 2: add span_label for "use of `{}` occurs here" + span, &self.describe_lvalue(lvalue), + self.retrieve_borrow_span(borrow), &self.describe_lvalue(&borrow.lvalue), + Origin::Mir); + err.emit(); } fn report_conflicting_borrow(&mut self, _context: Context, + common_prefix: &Lvalue, (lvalue, span): (&Lvalue, Span), - loan1: &BorrowData, - loan2: &BorrowData) { - // FIXME: obviously falsifiable. Generalize for non-eq lvalues later. - assert_eq!(loan1.lvalue, loan2.lvalue); + gen_borrow_kind: BorrowKind, + issued_borrow: &BorrowData, + end_issued_loan_span: Span) { + use self::prefixes::IsPrefixOf; + + assert!(common_prefix.is_prefix_of(lvalue)); + assert!(common_prefix.is_prefix_of(&issued_borrow.lvalue)); + + let issued_span = self.retrieve_borrow_span(issued_borrow); // FIXME: supply non-"" `opt_via` when appropriate - let mut err = match (loan1.kind, "immutable", "mutable", - loan2.kind, "immutable", "mutable") { + let mut err = match (gen_borrow_kind, "immutable", "mutable", + issued_borrow.kind, "immutable", "mutable") { (BorrowKind::Shared, lft, _, BorrowKind::Mut, _, rgt) | - (BorrowKind::Mut, _, lft, BorrowKind::Shared, rgt, _) | - (BorrowKind::Mut, _, lft, BorrowKind::Mut, _, rgt) => + (BorrowKind::Mut, _, lft, BorrowKind::Shared, rgt, _) => self.tcx.cannot_reborrow_already_borrowed( - span, &self.describe_lvalue(lvalue), - "", lft, "it", rgt, "", Origin::Mir), - - _ => self.tcx.cannot_mutably_borrow_multiply( - span, &self.describe_lvalue(lvalue), "", Origin::Mir), - // FIXME: add span labels for first and second mutable borrows, as well as - // end point for first. + span, &self.describe_lvalue(lvalue), "", lft, issued_span, + "it", rgt, "", end_issued_loan_span, Origin::Mir), + + (BorrowKind::Mut, _, _, BorrowKind::Mut, _, _) => + self.tcx.cannot_mutably_borrow_multiply( + span, &self.describe_lvalue(lvalue), "", issued_span, + "", end_issued_loan_span, Origin::Mir), + + (BorrowKind::Unique, _, _, BorrowKind::Unique, _, _) => + self.tcx.cannot_uniquely_borrow_by_two_closures( + span, &self.describe_lvalue(lvalue), issued_span, + end_issued_loan_span, Origin::Mir), + + (BorrowKind::Unique, _, _, _, _, _) => + self.tcx.cannot_uniquely_borrow_by_one_closure( + span, &self.describe_lvalue(lvalue), "", + issued_span, "it", "", end_issued_loan_span, Origin::Mir), + + (_, _, _, BorrowKind::Unique, _, _) => + self.tcx.cannot_reborrow_already_uniquely_borrowed( + span, &self.describe_lvalue(lvalue), "it", "", + issued_span, "", end_issued_loan_span, Origin::Mir), + + (BorrowKind::Shared, _, _, BorrowKind::Shared, _, _) => + unreachable!(), }; err.emit(); } - fn report_illegal_mutation_of_borrowed(&mut self, _: Context, (lvalue, span): (&Lvalue, Span)) { + fn report_illegal_mutation_of_borrowed(&mut self, + _: Context, + (lvalue, span): (&Lvalue, Span), + loan: &BorrowData) { let mut err = self.tcx.cannot_assign_to_borrowed( - span, &self.describe_lvalue(lvalue), Origin::Mir); - // FIXME: add span labels for borrow and assignment points + span, self.retrieve_borrow_span(loan), &self.describe_lvalue(lvalue), Origin::Mir); + err.emit(); } - fn report_illegal_reassignment(&mut self, _context: Context, (lvalue, span): (&Lvalue, Span)) { - let mut err = self.tcx.cannot_reassign_immutable( - span, &self.describe_lvalue(lvalue), Origin::Mir); - // FIXME: add span labels for borrow and assignment points - err.emit(); + fn report_illegal_reassignment(&mut self, + _context: Context, + (lvalue, span): (&Lvalue, Span), + assigned_span: Span) { + self.tcx.cannot_reassign_immutable(span, + &self.describe_lvalue(lvalue), + Origin::Mir) + .span_label(span, "re-assignment of immutable variable") + .span_label(assigned_span, format!("first assignment to `{}`", + self.describe_lvalue(lvalue))) + .emit(); } fn report_assignment_to_static(&mut self, _context: Context, (lvalue, span): (&Lvalue, Span)) { let mut err = self.tcx.cannot_assign_static( span, &self.describe_lvalue(lvalue), Origin::Mir); - // FIXME: add span labels for borrow and assignment points err.emit(); } } @@ -995,8 +1104,8 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> ProjectionElem::Downcast(..) => ("", format!(""), None), // (dont emit downcast info) ProjectionElem::Field(field, _ty) => - ("", format!(".{}", field.index()), None), - ProjectionElem::Index(ref index) => + ("", format!(".{}", field.index()), None), // FIXME: report name of field + ProjectionElem::Index(index) => ("", format!(""), Some(index)), ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => ("", format!("[{} of {}]", offset, min_length), None), @@ -1013,48 +1122,22 @@ impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> self.append_lvalue_to_string(&proj.base, buf); if let Some(index) = index_operand { buf.push_str("["); - self.append_operand_to_string(index, buf); + self.append_lvalue_to_string(&Lvalue::Local(index), buf); buf.push_str("]"); } else { buf.push_str(&suffix); } - } } } - fn append_operand_to_string(&self, operand: &Operand, buf: &mut String) { - match *operand { - Operand::Consume(ref lvalue) => { - self.append_lvalue_to_string(lvalue, buf); - } - Operand::Constant(ref constant) => { - buf.push_str(&format!("{:?}", constant)); - } - } + // Retrieve span of given borrow from the current MIR representation + fn retrieve_borrow_span(&self, borrow: &BorrowData) -> Span { + self.mir.source_info(borrow.location).span } } impl<'c, 'b, 'a: 'b+'c, 'gcx, 'tcx: 'a> MirBorrowckCtxt<'c, 'b, 'a, 'gcx, 'tcx> { - // FIXME: needs to be able to express errors analogous to check_loans.rs - fn conflicts_with(&self, loan1: &BorrowData<'gcx>, loan2: &BorrowData<'gcx>) -> bool { - if loan1.compatible_with(loan2.kind) { return false; } - - let loan2_base_path = self.base_path(&loan2.lvalue); - for restricted in self.restrictions(&loan1.lvalue) { - if restricted != loan2_base_path { continue; } - return true; - } - - let loan1_base_path = self.base_path(&loan1.lvalue); - for restricted in self.restrictions(&loan2.lvalue) { - if restricted != loan1_base_path { continue; } - return true; - } - - return false; - } - // FIXME (#16118): function intended to allow the borrow checker // to be less precise in its handling of Box while still allowing // moves out of a Box. They should be removed when/if we stop @@ -1101,6 +1184,7 @@ enum ContextKind { CallOperand, CallDest, Assert, + Yield, StorageDead, } @@ -1233,26 +1317,8 @@ impl FlowInProgress where BD: BitDenotation { self.curr_state.subtract(&self.stmt_kill); } - fn elems_generated(&self) -> indexed_set::Elems { - let univ = self.base_results.sets().bits_per_block(); - self.stmt_gen.elems(univ) - } - fn elems_incoming(&self) -> indexed_set::Elems { let univ = self.base_results.sets().bits_per_block(); self.curr_state.elems(univ) } } - -impl<'tcx> BorrowData<'tcx> { - fn compatible_with(&self, bk: BorrowKind) -> bool { - match (self.kind, bk) { - (BorrowKind::Shared, BorrowKind::Shared) => true, - - (BorrowKind::Mut, _) | - (BorrowKind::Unique, _) | - (_, BorrowKind::Mut) | - (_, BorrowKind::Unique) => false, - } - } -} diff --git a/src/librustc_mir/build/block.rs b/src/librustc_mir/build/block.rs index 4583d80b83..1fc96dbf45 100644 --- a/src/librustc_mir/build/block.rs +++ b/src/librustc_mir/build/block.rs @@ -21,22 +21,32 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ast_block: &'tcx hir::Block, source_info: SourceInfo) -> BlockAnd<()> { - let Block { extent, opt_destruction_extent, span, stmts, expr, targeted_by_break } = + let Block { + region_scope, + opt_destruction_scope, + span, + stmts, + expr, + targeted_by_break, + safety_mode + } = self.hir.mirror(ast_block); - self.in_opt_scope(opt_destruction_extent.map(|de|(de, source_info)), block, move |this| { - this.in_scope((extent, source_info), block, move |this| { + self.in_opt_scope(opt_destruction_scope.map(|de|(de, source_info)), block, move |this| { + this.in_scope((region_scope, source_info), LintLevel::Inherited, block, move |this| { if targeted_by_break { // This is a `break`-able block (currently only `catch { ... }`) let exit_block = this.cfg.start_new_block(); let block_exit = this.in_breakable_scope( None, exit_block, destination.clone(), |this| { - this.ast_block_stmts(destination, block, span, stmts, expr) + this.ast_block_stmts(destination, block, span, stmts, expr, + safety_mode) }); this.cfg.terminate(unpack!(block_exit), source_info, TerminatorKind::Goto { target: exit_block }); exit_block.unit() } else { - this.ast_block_stmts(destination, block, span, stmts, expr) + this.ast_block_stmts(destination, block, span, stmts, expr, + safety_mode) } }) }) @@ -47,7 +57,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { mut block: BasicBlock, span: Span, stmts: Vec>, - expr: Option>) + expr: Option>, + safety_mode: BlockSafety) -> BlockAnd<()> { let this = self; @@ -67,38 +78,48 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // the let-scopes at the end. // // First we build all the statements in the block. - let mut let_extent_stack = Vec::with_capacity(8); + let mut let_scope_stack = Vec::with_capacity(8); let outer_visibility_scope = this.visibility_scope; + let outer_push_unsafe_count = this.push_unsafe_count; + let outer_unpushed_unsafe = this.unpushed_unsafe; + this.update_visibility_scope_for_safety_mode(span, safety_mode); + let source_info = this.source_info(span); for stmt in stmts { - let Stmt { span, kind, opt_destruction_extent } = this.hir.mirror(stmt); + let Stmt { kind, opt_destruction_scope } = this.hir.mirror(stmt); match kind { StmtKind::Expr { scope, expr } => { unpack!(block = this.in_opt_scope( - opt_destruction_extent.map(|de|(de, source_info)), block, |this| { - this.in_scope((scope, source_info), block, |this| { + opt_destruction_scope.map(|de|(de, source_info)), block, |this| { + let si = (scope, source_info); + this.in_scope(si, LintLevel::Inherited, block, |this| { let expr = this.hir.mirror(expr); this.stmt_expr(block, expr) }) })); } - StmtKind::Let { remainder_scope, init_scope, pattern, initializer } => { - let tcx = this.hir.tcx(); - + StmtKind::Let { + remainder_scope, + init_scope, + pattern, + initializer, + lint_level + } => { // Enter the remainder scope, i.e. the bindings' destruction scope. this.push_scope((remainder_scope, source_info)); - let_extent_stack.push(remainder_scope); + let_scope_stack.push(remainder_scope); // Declare the bindings, which may create a visibility scope. - let remainder_span = remainder_scope.span(&tcx.hir); - let remainder_span = remainder_span.unwrap_or(span); - let scope = this.declare_bindings(None, remainder_span, &pattern); + let remainder_span = remainder_scope.span(this.hir.tcx(), + &this.hir.region_scope_tree); + let scope = this.declare_bindings(None, remainder_span, lint_level, &pattern); // Evaluate the initializer, if present. if let Some(init) = initializer { unpack!(block = this.in_opt_scope( - opt_destruction_extent.map(|de|(de, source_info)), block, move |this| { - this.in_scope((init_scope, source_info), block, move |this| { + opt_destruction_scope.map(|de|(de, source_info)), block, move |this| { + let scope = (init_scope, source_info); + this.in_scope(scope, lint_level, block, move |this| { // FIXME #30046 ^~~~ this.expr_into_pattern(block, pattern, init) }) @@ -126,11 +147,53 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } // Finally, we pop all the let scopes before exiting out from the scope of block // itself. - for extent in let_extent_stack.into_iter().rev() { - unpack!(block = this.pop_scope((extent, source_info), block)); + for scope in let_scope_stack.into_iter().rev() { + unpack!(block = this.pop_scope((scope, source_info), block)); } // Restore the original visibility scope. this.visibility_scope = outer_visibility_scope; + this.push_unsafe_count = outer_push_unsafe_count; + this.unpushed_unsafe = outer_unpushed_unsafe; block.unit() } + + /// If we are changing the safety mode, create a new visibility scope + fn update_visibility_scope_for_safety_mode(&mut self, + span: Span, + safety_mode: BlockSafety) + { + debug!("update_visibility_scope_for({:?}, {:?})", span, safety_mode); + let new_unsafety = match safety_mode { + BlockSafety::Safe => None, + BlockSafety::ExplicitUnsafe(node_id) => { + assert_eq!(self.push_unsafe_count, 0); + match self.unpushed_unsafe { + Safety::Safe => {} + _ => return + } + self.unpushed_unsafe = Safety::ExplicitUnsafe(node_id); + Some(Safety::ExplicitUnsafe(node_id)) + } + BlockSafety::PushUnsafe => { + self.push_unsafe_count += 1; + Some(Safety::BuiltinUnsafe) + } + BlockSafety::PopUnsafe => { + self.push_unsafe_count = + self.push_unsafe_count.checked_sub(1).unwrap_or_else(|| { + span_bug!(span, "unsafe count underflow") + }); + if self.push_unsafe_count == 0 { + Some(self.unpushed_unsafe) + } else { + None + } + } + }; + + if let Some(unsafety) = new_unsafety { + self.visibility_scope = self.new_visibility_scope( + span, LintLevel::Inherited, Some(unsafety)); + } + } } diff --git a/src/librustc_mir/build/cfg.rs b/src/librustc_mir/build/cfg.rs index b390e2888f..dfddbfe485 100644 --- a/src/librustc_mir/build/cfg.rs +++ b/src/librustc_mir/build/cfg.rs @@ -14,8 +14,9 @@ //! Routines for manipulating the control-flow graph. use build::CFG; -use rustc::middle::region::CodeExtent; +use rustc::middle::region; use rustc::mir::*; +use rustc::ty::TyCtxt; impl<'tcx> CFG<'tcx> { pub fn block_data(&self, blk: BasicBlock) -> &BasicBlockData<'tcx> { @@ -44,14 +45,17 @@ impl<'tcx> CFG<'tcx> { self.block_data_mut(block).statements.push(statement); } - pub fn push_end_region(&mut self, - block: BasicBlock, - source_info: SourceInfo, - extent: CodeExtent) { - self.push(block, Statement { - source_info, - kind: StatementKind::EndRegion(extent), - }); + pub fn push_end_region<'a, 'gcx:'a+'tcx>(&mut self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + block: BasicBlock, + source_info: SourceInfo, + region_scope: region::Scope) { + if tcx.sess.emit_end_regions() { + self.push(block, Statement { + source_info, + kind: StatementKind::EndRegion(region_scope), + }); + } } pub fn push_assign(&mut self, diff --git a/src/librustc_mir/build/expr/as_constant.rs b/src/librustc_mir/build/expr/as_constant.rs index 6d15f0a2e5..a57f1b9549 100644 --- a/src/librustc_mir/build/expr/as_constant.rs +++ b/src/librustc_mir/build/expr/as_constant.rs @@ -29,7 +29,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let Expr { ty, temp_lifetime: _, span, kind } = expr; match kind { - ExprKind::Scope { extent: _, value } => + ExprKind::Scope { region_scope: _, lint_level: _, value } => this.as_constant(value), ExprKind::Literal { literal } => Constant { span: span, ty: ty, literal: literal }, diff --git a/src/librustc_mir/build/expr/as_lvalue.rs b/src/librustc_mir/build/expr/as_lvalue.rs index 04c2321546..69d0dd9922 100644 --- a/src/librustc_mir/build/expr/as_lvalue.rs +++ b/src/librustc_mir/build/expr/as_lvalue.rs @@ -39,8 +39,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let expr_span = expr.span; let source_info = this.source_info(expr_span); match expr.kind { - ExprKind::Scope { extent, value } => { - this.in_scope((extent, source_info), block, |this| this.as_lvalue(block, value)) + ExprKind::Scope { region_scope, lint_level, value } => { + this.in_scope((region_scope, source_info), lint_level, block, |this| { + this.as_lvalue(block, value) + }) } ExprKind::Field { lhs, name } => { let lvalue = unpack!(block = this.as_lvalue(block, lhs)); @@ -56,10 +58,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let (usize_ty, bool_ty) = (this.hir.usize_ty(), this.hir.bool_ty()); let slice = unpack!(block = this.as_lvalue(block, lhs)); - // extent=None so lvalue indexes live forever. They are scalars so they + // region_scope=None so lvalue indexes live forever. They are scalars so they // do not need storage annotations, and they are often copied between // places. - let idx = unpack!(block = this.as_operand(block, None, index)); + let idx = unpack!(block = this.as_temp(block, None, index)); // bounds check: let (len, lt) = (this.temp(usize_ty.clone(), expr_span), @@ -68,12 +70,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { &len, Rvalue::Len(slice.clone())); this.cfg.push_assign(block, source_info, // lt = idx < len <, Rvalue::BinaryOp(BinOp::Lt, - idx.clone(), + Operand::Consume(Lvalue::Local(idx)), Operand::Consume(len.clone()))); let msg = AssertMessage::BoundsCheck { len: Operand::Consume(len), - index: idx.clone() + index: Operand::Consume(Lvalue::Local(idx)) }; let success = this.assert(block, Operand::Consume(lt), true, msg, expr_span); @@ -118,13 +120,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ExprKind::Return { .. } | ExprKind::Literal { .. } | ExprKind::InlineAsm { .. } | + ExprKind::Yield { .. } | ExprKind::Call { .. } => { // these are not lvalues, so we need to make a temporary. debug_assert!(match Category::of(&expr.kind) { Some(Category::Lvalue) => false, _ => true, }); - this.as_temp(block, expr.temp_lifetime, expr) + let temp = unpack!(block = this.as_temp(block, expr.temp_lifetime, expr)); + block.and(Lvalue::Local(temp)) } } } diff --git a/src/librustc_mir/build/expr/as_operand.rs b/src/librustc_mir/build/expr/as_operand.rs index 4679e0bb0a..ea6e434209 100644 --- a/src/librustc_mir/build/expr/as_operand.rs +++ b/src/librustc_mir/build/expr/as_operand.rs @@ -13,7 +13,7 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::expr::category::Category; use hair::*; -use rustc::middle::region::CodeExtent; +use rustc::middle::region; use rustc::mir::*; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { @@ -39,7 +39,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// The operand is known to be live until the end of `scope`. pub fn as_operand(&mut self, block: BasicBlock, - scope: Option, + scope: Option, expr: M) -> BlockAnd> where M: Mirror<'tcx, Output = Expr<'tcx>> { @@ -49,16 +49,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn expr_as_operand(&mut self, mut block: BasicBlock, - scope: Option, + scope: Option, expr: Expr<'tcx>) -> BlockAnd> { debug!("expr_as_operand(block={:?}, expr={:?})", block, expr); let this = self; - if let ExprKind::Scope { extent, value } = expr.kind { + if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind { let source_info = this.source_info(expr.span); - let extent = (extent, source_info); - return this.in_scope(extent, block, |this| { + let region_scope = (region_scope, source_info); + return this.in_scope(region_scope, lint_level, block, |this| { this.as_operand(block, scope, value) }); } @@ -74,7 +74,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { Category::Rvalue(..) => { let operand = unpack!(block = this.as_temp(block, scope, expr)); - block.and(Operand::Consume(operand)) + block.and(Operand::Consume(Lvalue::Local(operand))) } } } diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index c74378bdad..d17f00b489 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -21,8 +21,8 @@ use build::expr::category::{Category, RvalueFunc}; use hair::*; use rustc_const_math::{ConstInt, ConstIsize}; use rustc::middle::const_val::ConstVal; -use rustc::middle::region::CodeExtent; -use rustc::ty; +use rustc::middle::region; +use rustc::ty::{self, Ty}; use rustc::mir::*; use syntax::ast; use syntax_pos::Span; @@ -38,7 +38,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } /// Compile `expr`, yielding an rvalue. - pub fn as_rvalue(&mut self, block: BasicBlock, scope: Option, expr: M) + pub fn as_rvalue(&mut self, block: BasicBlock, scope: Option, expr: M) -> BlockAnd> where M: Mirror<'tcx, Output = Expr<'tcx>> { @@ -48,7 +48,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn expr_as_rvalue(&mut self, mut block: BasicBlock, - scope: Option, + scope: Option, expr: Expr<'tcx>) -> BlockAnd> { debug!("expr_as_rvalue(block={:?}, scope={:?}, expr={:?})", block, scope, expr); @@ -58,9 +58,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let source_info = this.source_info(expr_span); match expr.kind { - ExprKind::Scope { extent, value } => { - let extent = (extent, source_info); - this.in_scope(extent, block, |this| this.as_rvalue(block, scope, value)) + ExprKind::Scope { region_scope, lint_level, value } => { + let region_scope = (region_scope, source_info); + this.in_scope(region_scope, lint_level, block, + |this| this.as_rvalue(block, scope, value)) } ExprKind::Repeat { value, count } => { let value_operand = unpack!(block = this.as_operand(block, scope, value)); @@ -96,23 +97,27 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } ExprKind::Box { value } => { let value = this.hir.mirror(value); - let result = this.temp(expr.ty, expr_span); + // The `Box` temporary created here is not a part of the HIR, + // and therefore is not considered during generator OIBIT + // determination. See the comment about `box` at `yield_in_scope`. + let result = this.local_decls.push( + LocalDecl::new_internal(expr.ty, expr_span)); + this.cfg.push(block, Statement { + source_info, + kind: StatementKind::StorageLive(result) + }); if let Some(scope) = scope { // schedule a shallow free of that memory, lest we unwind: - this.cfg.push(block, Statement { - source_info, - kind: StatementKind::StorageLive(result.clone()) - }); - this.schedule_drop(expr_span, scope, &result, value.ty); + this.schedule_drop(expr_span, scope, &Lvalue::Local(result), value.ty); } // malloc some memory of suitable type (thus far, uninitialized): let box_ = Rvalue::NullaryOp(NullOp::Box, value.ty); - this.cfg.push_assign(block, source_info, &result, box_); + this.cfg.push_assign(block, source_info, &Lvalue::Local(result), box_); // initialize the box contents: - unpack!(block = this.into(&result.clone().deref(), block, value)); - block.and(Rvalue::Use(Operand::Consume(result))) + unpack!(block = this.into(&Lvalue::Local(result).deref(), block, value)); + block.and(Rvalue::Use(Operand::Consume(Lvalue::Local(result)))) } ExprKind::Cast { source } => { let source = this.hir.mirror(source); @@ -185,12 +190,29 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block.and(Rvalue::Aggregate(box AggregateKind::Tuple, fields)) } - ExprKind::Closure { closure_id, substs, upvars } => { // see (*) above - let upvars = + ExprKind::Closure { closure_id, substs, upvars, interior } => { // see (*) above + let mut operands: Vec<_> = upvars.into_iter() .map(|upvar| unpack!(block = this.as_operand(block, scope, upvar))) .collect(); - block.and(Rvalue::Aggregate(box AggregateKind::Closure(closure_id, substs), upvars)) + let result = if let Some(interior) = interior { + // Add the state operand since it follows the upvars in the generator + // struct. See librustc_mir/transform/generator.rs for more details. + operands.push(Operand::Constant(box Constant { + span: expr_span, + ty: this.hir.tcx().types.u32, + literal: Literal::Value { + value: this.hir.tcx().mk_const(ty::Const { + val: ConstVal::Integral(ConstInt::U32(0)), + ty: this.hir.tcx().types.u32 + }), + }, + })); + box AggregateKind::Generator(closure_id, substs, interior) + } else { + box AggregateKind::Closure(closure_id, substs) + }; + block.and(Rvalue::Aggregate(result, operands)) } ExprKind::Adt { adt_def, variant_index, substs, fields, base @@ -232,6 +254,17 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block = unpack!(this.stmt_expr(block, expr)); block.and(this.unit_rvalue()) } + ExprKind::Yield { value } => { + let value = unpack!(block = this.as_operand(block, scope, value)); + let resume = this.cfg.start_new_block(); + let cleanup = this.generator_drop_cleanup(); + this.cfg.terminate(block, source_info, TerminatorKind::Yield { + value: value, + resume: resume, + drop: cleanup, + }); + resume.and(this.unit_rvalue()) + } ExprKind::Literal { .. } | ExprKind::Block { .. } | ExprKind::Match { .. } | @@ -263,7 +296,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } pub fn build_binary_op(&mut self, mut block: BasicBlock, - op: BinOp, span: Span, ty: ty::Ty<'tcx>, + op: BinOp, span: Span, ty: Ty<'tcx>, lhs: Operand<'tcx>, rhs: Operand<'tcx>) -> BlockAnd> { let source_info = self.source_info(span); let bool_ty = self.hir.bool_ty(); @@ -350,7 +383,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } // Helper to get a `-1` value of the appropriate type - fn neg_1_literal(&mut self, span: Span, ty: ty::Ty<'tcx>) -> Operand<'tcx> { + fn neg_1_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> { let literal = match ty.sty { ty::TyInt(ity) => { let val = match ity { @@ -360,13 +393,18 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ast::IntTy::I64 => ConstInt::I64(-1), ast::IntTy::I128 => ConstInt::I128(-1), ast::IntTy::Is => { - let int_ty = self.hir.tcx().sess.target.int_type; + let int_ty = self.hir.tcx().sess.target.isize_ty; let val = ConstIsize::new(-1, int_ty).unwrap(); ConstInt::Isize(val) } }; - Literal::Value { value: ConstVal::Integral(val) } + Literal::Value { + value: self.hir.tcx().mk_const(ty::Const { + val: ConstVal::Integral(val), + ty + }) + } } _ => { span_bug!(span, "Invalid type for neg_1_literal: `{:?}`", ty) @@ -377,7 +415,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } // Helper to get the minimum value of the appropriate type - fn minval_literal(&mut self, span: Span, ty: ty::Ty<'tcx>) -> Operand<'tcx> { + fn minval_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> { let literal = match ty.sty { ty::TyInt(ity) => { let val = match ity { @@ -387,7 +425,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ast::IntTy::I64 => ConstInt::I64(i64::min_value()), ast::IntTy::I128 => ConstInt::I128(i128::min_value()), ast::IntTy::Is => { - let int_ty = self.hir.tcx().sess.target.int_type; + let int_ty = self.hir.tcx().sess.target.isize_ty; let min = match int_ty { ast::IntTy::I16 => std::i16::MIN as i64, ast::IntTy::I32 => std::i32::MIN as i64, @@ -399,7 +437,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } }; - Literal::Value { value: ConstVal::Integral(val) } + Literal::Value { + value: self.hir.tcx().mk_const(ty::Const { + val: ConstVal::Integral(val), + ty + }) + } } _ => { span_bug!(span, "Invalid type for minval_literal: `{:?}`", ty) diff --git a/src/librustc_mir/build/expr/as_temp.rs b/src/librustc_mir/build/expr/as_temp.rs index 4f248ddb0e..ba422a8183 100644 --- a/src/librustc_mir/build/expr/as_temp.rs +++ b/src/librustc_mir/build/expr/as_temp.rs @@ -13,7 +13,7 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::expr::category::Category; use hair::*; -use rustc::middle::region::CodeExtent; +use rustc::middle::region; use rustc::mir::*; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { @@ -21,9 +21,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// up rvalues so as to freeze the value that will be consumed. pub fn as_temp(&mut self, block: BasicBlock, - temp_lifetime: Option, + temp_lifetime: Option, expr: M) - -> BlockAnd> + -> BlockAnd where M: Mirror<'tcx, Output = Expr<'tcx>> { let expr = self.hir.mirror(expr); @@ -32,28 +32,28 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn expr_as_temp(&mut self, mut block: BasicBlock, - temp_lifetime: Option, + temp_lifetime: Option, expr: Expr<'tcx>) - -> BlockAnd> { + -> BlockAnd { debug!("expr_as_temp(block={:?}, temp_lifetime={:?}, expr={:?})", block, temp_lifetime, expr); let this = self; let expr_span = expr.span; let source_info = this.source_info(expr_span); - if let ExprKind::Scope { extent, value } = expr.kind { - return this.in_scope((extent, source_info), block, |this| { + if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind { + return this.in_scope((region_scope, source_info), lint_level, block, |this| { this.as_temp(block, temp_lifetime, value) }); } - let expr_ty = expr.ty.clone(); - let temp = this.temp(expr_ty.clone(), expr_span); + let expr_ty = expr.ty; + let temp = this.local_decls.push(LocalDecl::new_temp(expr_ty, expr_span)); - if !expr_ty.is_never() && temp_lifetime.is_some() { + if !expr_ty.is_never() { this.cfg.push(block, Statement { source_info, - kind: StatementKind::StorageLive(temp.clone()) + kind: StatementKind::StorageLive(temp) }); } @@ -68,10 +68,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { Category::Lvalue => { let lvalue = unpack!(block = this.as_lvalue(block, expr)); let rvalue = Rvalue::Use(Operand::Consume(lvalue)); - this.cfg.push_assign(block, source_info, &temp, rvalue); + this.cfg.push_assign(block, source_info, &Lvalue::Local(temp), rvalue); } _ => { - unpack!(block = this.into(&temp, block, expr)); + unpack!(block = this.into(&Lvalue::Local(temp), block, expr)); } } @@ -79,7 +79,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // anything because no values with a destructor can be created in // a constant at this time, even if the type may need dropping. if let Some(temp_lifetime) = temp_lifetime { - this.schedule_drop(expr_span, temp_lifetime, &temp, expr_ty); + this.schedule_drop(expr_span, temp_lifetime, &Lvalue::Local(temp), expr_ty); } block.and(temp) diff --git a/src/librustc_mir/build/expr/category.rs b/src/librustc_mir/build/expr/category.rs index 35173bb598..f05411aaca 100644 --- a/src/librustc_mir/build/expr/category.rs +++ b/src/librustc_mir/build/expr/category.rs @@ -77,6 +77,7 @@ impl Category { ExprKind::Borrow { .. } | ExprKind::Assign { .. } | ExprKind::AssignOp { .. } | + ExprKind::Yield { .. } | ExprKind::InlineAsm { .. } => Some(Category::Rvalue(RvalueFunc::AsRvalue)), diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index 576b1059e5..cdbcb43370 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -38,9 +38,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let source_info = this.source_info(expr_span); match expr.kind { - ExprKind::Scope { extent, value } => { - let extent = (extent, source_info); - this.in_scope(extent, block, |this| this.into(destination, block, value)) + ExprKind::Scope { region_scope, lint_level, value } => { + let region_scope = (region_scope, source_info); + this.in_scope(region_scope, lint_level, block, + |this| this.into(destination, block, value)) } ExprKind::Block { body: ast_block } => { this.ast_block(destination, block, ast_block, source_info) @@ -209,7 +210,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let f = ty.fn_sig(this.hir.tcx()); if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic { - Some(this.hir.tcx().item_name(def_id).as_str()) + Some(this.hir.tcx().item_name(def_id)) } else { None } @@ -227,9 +228,22 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let val = args.next().expect("1 argument to `move_val_init`"); assert!(args.next().is_none(), ">2 arguments to `move_val_init`"); - let topmost_scope = this.topmost_scope(); - let ptr = unpack!(block = this.as_temp(block, Some(topmost_scope), ptr)); - this.into(&ptr.deref(), block, val) + let ptr = this.hir.mirror(ptr); + let ptr_ty = ptr.ty; + // Create an *internal* temp for the pointer, so that unsafety + // checking won't complain about the raw pointer assignment. + let ptr_temp = this.local_decls.push(LocalDecl { + mutability: Mutability::Mut, + ty: ptr_ty, + name: None, + source_info, + lexical_scope: source_info.scope, + internal: true, + is_user_variable: false + }); + let ptr_temp = Lvalue::Local(ptr_temp); + let block = unpack!(this.into(&ptr_temp, block, ptr)); + this.into(&ptr_temp.deref(), block, val) } else { let args: Vec<_> = args.into_iter() @@ -284,6 +298,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ExprKind::Index { .. } | ExprKind::Deref { .. } | ExprKind::Literal { .. } | + ExprKind::Yield { .. } | ExprKind::Field { .. } => { debug_assert!(match Category::of(&expr.kind).unwrap() { Category::Rvalue(RvalueFunc::Into) => false, diff --git a/src/librustc_mir/build/expr/stmt.rs b/src/librustc_mir/build/expr/stmt.rs index 0da722f72a..3cfb0ff401 100644 --- a/src/librustc_mir/build/expr/stmt.rs +++ b/src/librustc_mir/build/expr/stmt.rs @@ -22,9 +22,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Handle a number of expressions that don't need a destination at all. This // avoids needing a mountain of temporary `()` variables. match expr.kind { - ExprKind::Scope { extent, value } => { + ExprKind::Scope { region_scope, lint_level, value } => { let value = this.hir.mirror(value); - this.in_scope((extent, source_info), block, |this| this.stmt_expr(block, value)) + this.in_scope((region_scope, source_info), lint_level, block, |this| { + this.stmt_expr(block, value) + }) } ExprKind::Assign { lhs, rhs } => { let lhs = this.hir.mirror(lhs); @@ -77,29 +79,29 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block.unit() } ExprKind::Continue { label } => { - let BreakableScope { continue_block, extent, .. } = + let BreakableScope { continue_block, region_scope, .. } = *this.find_breakable_scope(expr_span, label); let continue_block = continue_block.expect( "Attempted to continue in non-continuable breakable block"); - this.exit_scope(expr_span, (extent, source_info), block, continue_block); + this.exit_scope(expr_span, (region_scope, source_info), block, continue_block); this.cfg.start_new_block().unit() } ExprKind::Break { label, value } => { - let (break_block, extent, destination) = { + let (break_block, region_scope, destination) = { let BreakableScope { break_block, - extent, + region_scope, ref break_destination, .. } = *this.find_breakable_scope(expr_span, label); - (break_block, extent, break_destination.clone()) + (break_block, region_scope, break_destination.clone()) }; if let Some(value) = value { unpack!(block = this.into(&destination, block, value)) } else { this.cfg.push_assign_unit(block, source_info, &destination) } - this.exit_scope(expr_span, (extent, source_info), block, break_block); + this.exit_scope(expr_span, (region_scope, source_info), block, break_block); this.cfg.start_new_block().unit() } ExprKind::Return { value } => { @@ -114,9 +116,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block } }; - let extent = this.extent_of_return_scope(); + let region_scope = this.region_scope_of_return_scope(); let return_block = this.return_block(); - this.exit_scope(expr_span, (extent, source_info), block, return_block); + this.exit_scope(expr_span, (region_scope, source_info), block, return_block); this.cfg.start_new_block().unit() } ExprKind::InlineAsm { asm, outputs, inputs } => { diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index 78805ba87e..f04dede6e4 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -16,8 +16,7 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::bitvec::BitVector; -use rustc::middle::const_val::ConstVal; -use rustc::ty::{AdtDef, Ty}; +use rustc::ty::{self, Ty}; use rustc::mir::*; use rustc::hir; use hair::*; @@ -47,8 +46,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Get the arm bodies and their scopes, while declaring bindings. let arm_bodies: Vec<_> = arms.iter().map(|arm| { + // BUG: use arm lint level let body = self.hir.mirror(arm.body.clone()); - let scope = self.declare_bindings(None, body.span, &arm.patterns[0]); + let scope = self.declare_bindings(None, body.span, + LintLevel::Inherited, + &arm.patterns[0]); (body, scope.unwrap_or(self.visibility_scope)) }).collect(); @@ -172,11 +174,22 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn declare_bindings(&mut self, mut var_scope: Option, scope_span: Span, + lint_level: LintLevel, pattern: &Pattern<'tcx>) -> Option { + assert!(!(var_scope.is_some() && lint_level.is_explicit()), + "can't have both a var and a lint scope at the same time"); self.visit_bindings(pattern, &mut |this, mutability, name, var, span, ty| { if var_scope.is_none() { - var_scope = Some(this.new_visibility_scope(scope_span)); + var_scope = Some(this.new_visibility_scope(scope_span, + LintLevel::Inherited, + None)); + // If we have lints, create a new visibility scope + // that marks the lints for the locals. + if lint_level.is_explicit() { + this.visibility_scope = + this.new_visibility_scope(scope_span, lint_level, None); + } } let source_info = SourceInfo { span, @@ -184,6 +197,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }; this.declare_binding(source_info, mutability, name, var, ty); }); + // Pop any scope we created for the locals. + if let Some(var_scope) = var_scope { + self.visibility_scope = var_scope; + } var_scope } @@ -194,7 +211,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let source_info = self.source_info(span); self.cfg.push(block, Statement { source_info, - kind: StatementKind::StorageLive(Lvalue::Local(local_id)) + kind: StatementKind::StorageLive(local_id) }); Lvalue::Local(local_id) } @@ -202,8 +219,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn schedule_drop_for_binding(&mut self, var: NodeId, span: Span) { let local_id = self.var_indices[&var]; let var_ty = self.local_decls[local_id].ty; - let extent = self.hir.region_maps.var_scope(var); - self.schedule_drop(span, extent, &Lvalue::Local(local_id), var_ty); + let hir_id = self.hir.tcx().hir.node_to_hir_id(var); + let region_scope = self.hir.region_scope_tree.var_scope(hir_id.local_id); + self.schedule_drop(span, region_scope, &Lvalue::Local(local_id), var_ty); } pub fn visit_bindings(&mut self, pattern: &Pattern<'tcx>, f: &mut F) @@ -293,20 +311,20 @@ pub struct MatchPair<'pat, 'tcx:'pat> { enum TestKind<'tcx> { // test the branches of enum Switch { - adt_def: &'tcx AdtDef, + adt_def: &'tcx ty::AdtDef, variants: BitVector, }, // test the branches of enum SwitchInt { switch_ty: Ty<'tcx>, - options: Vec>, - indices: FxHashMap, usize>, + options: Vec<&'tcx ty::Const<'tcx>>, + indices: FxHashMap<&'tcx ty::Const<'tcx>, usize>, }, // test for equality Eq { - value: ConstVal<'tcx>, + value: &'tcx ty::Const<'tcx>, ty: Ty<'tcx>, }, @@ -712,6 +730,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ty: var_ty.clone(), name: Some(name), source_info, + lexical_scope: self.visibility_scope, + internal: false, is_user_variable: true, }); self.var_indices.insert(var_id, var); diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index dc15163ecc..7b91c43aa3 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -61,24 +61,24 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - PatternKind::Constant { ref value } => { + PatternKind::Constant { value } => { Test { span: match_pair.pattern.span, kind: TestKind::Eq { - value: value.clone(), + value, ty: match_pair.pattern.ty.clone() } } } - PatternKind::Range { ref lo, ref hi, ref end } => { + PatternKind::Range { lo, hi, end } => { Test { span: match_pair.pattern.span, kind: TestKind::Range { - lo: Literal::Value { value: lo.clone() }, - hi: Literal::Value { value: hi.clone() }, + lo: Literal::Value { value: lo }, + hi: Literal::Value { value: hi }, ty: match_pair.pattern.ty.clone(), - end: end.clone(), + end, }, } } @@ -112,8 +112,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { test_lvalue: &Lvalue<'tcx>, candidate: &Candidate<'pat, 'tcx>, switch_ty: Ty<'tcx>, - options: &mut Vec>, - indices: &mut FxHashMap, usize>) + options: &mut Vec<&'tcx ty::Const<'tcx>>, + indices: &mut FxHashMap<&'tcx ty::Const<'tcx>, usize>) -> bool { let match_pair = match candidate.match_pairs.iter().find(|mp| mp.lvalue == *test_lvalue) { @@ -122,13 +122,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }; match *match_pair.pattern.kind { - PatternKind::Constant { ref value } => { + PatternKind::Constant { value } => { // if the lvalues match, the type should match assert_eq!(match_pair.pattern.ty, switch_ty); - indices.entry(value.clone()) + indices.entry(value) .or_insert_with(|| { - options.push(value.clone()); + options.push(value); options.len() - 1 }); true @@ -228,9 +228,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { assert!(options.len() > 0 && options.len() <= 2); let (true_bb, false_bb) = (self.cfg.start_new_block(), self.cfg.start_new_block()); - let ret = match &options[0] { - &ConstVal::Bool(true) => vec![true_bb, false_bb], - &ConstVal::Bool(false) => vec![false_bb, true_bb], + let ret = match options[0].val { + ConstVal::Bool(true) => vec![true_bb, false_bb], + ConstVal::Bool(false) => vec![false_bb, true_bb], v => span_bug!(test.span, "expected boolean value but got {:?}", v) }; (ret, TerminatorKind::if_(self.hir.tcx(), Operand::Consume(lvalue.clone()), @@ -245,7 +245,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { .chain(Some(otherwise)) .collect(); let values: Vec<_> = options.iter().map(|v| - v.to_const_int().expect("switching on integral") + v.val.to_const_int().expect("switching on integral") ).collect(); (targets.clone(), TerminatorKind::SwitchInt { discr: Operand::Consume(lvalue.clone()), @@ -258,12 +258,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ret } - TestKind::Eq { ref value, mut ty } => { + TestKind::Eq { value, mut ty } => { let mut val = Operand::Consume(lvalue.clone()); // If we're using b"..." as a pattern, we need to insert an // unsizing coercion, as the byte string has the type &[u8; N]. - let expect = if let ConstVal::ByteStr(ref bytes) = *value { + let expect = if let ConstVal::ByteStr(bytes) = value.val { let tcx = self.hir.tcx(); // Unsize the lvalue to &[u8], too, if necessary. @@ -279,10 +279,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { assert!(ty.is_slice()); - let array_ty = tcx.mk_array(tcx.types.u8, bytes.len()); + let array_ty = tcx.mk_array(tcx.types.u8, bytes.data.len() as u64); let array_ref = tcx.mk_imm_ref(tcx.types.re_static, array_ty); let array = self.literal_operand(test.span, array_ref, Literal::Value { - value: value.clone() + value }); let slice = self.temp(ty, test.span); @@ -291,7 +291,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { Operand::Consume(slice) } else { self.literal_operand(test.span, ty, Literal::Value { - value: value.clone() + value }) }; @@ -299,7 +299,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let fail = self.cfg.start_new_block(); if let ty::TyRef(_, mt) = ty.sty { assert!(ty.is_slice()); - let eq_def_id = self.hir.tcx().lang_items.eq_trait().unwrap(); + let eq_def_id = self.hir.tcx().lang_items().eq_trait().unwrap(); let ty = mt.ty; let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, &[ty]); diff --git a/src/librustc_mir/build/misc.rs b/src/librustc_mir/build/misc.rs index 0e4aac4299..1976b70ac0 100644 --- a/src/librustc_mir/build/misc.rs +++ b/src/librustc_mir/build/misc.rs @@ -59,7 +59,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ty::TyBool => { self.hir.false_literal() } - ty::TyChar => Literal::Value { value: ConstVal::Char('\0') }, + ty::TyChar => { + Literal::Value { + value: self.hir.tcx().mk_const(ty::Const { + val: ConstVal::Char('\0'), + ty + }) + } + } ty::TyUint(ity) => { let val = match ity { ast::UintTy::U8 => ConstInt::U8(0), @@ -68,13 +75,18 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ast::UintTy::U64 => ConstInt::U64(0), ast::UintTy::U128 => ConstInt::U128(0), ast::UintTy::Us => { - let uint_ty = self.hir.tcx().sess.target.uint_type; + let uint_ty = self.hir.tcx().sess.target.usize_ty; let val = ConstUsize::new(0, uint_ty).unwrap(); ConstInt::Usize(val) } }; - Literal::Value { value: ConstVal::Integral(val) } + Literal::Value { + value: self.hir.tcx().mk_const(ty::Const { + val: ConstVal::Integral(val), + ty + }) + } } ty::TyInt(ity) => { let val = match ity { @@ -84,13 +96,18 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ast::IntTy::I64 => ConstInt::I64(0), ast::IntTy::I128 => ConstInt::I128(0), ast::IntTy::Is => { - let int_ty = self.hir.tcx().sess.target.int_type; + let int_ty = self.hir.tcx().sess.target.isize_ty; let val = ConstIsize::new(0, int_ty).unwrap(); ConstInt::Isize(val) } }; - Literal::Value { value: ConstVal::Integral(val) } + Literal::Value { + value: self.hir.tcx().mk_const(ty::Const { + val: ConstVal::Integral(val), + ty + }) + } } _ => { span_bug!(span, "Invalid type for zero_literal: `{:?}`", ty) diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs index d7a295a1c3..46a5e5abbd 100644 --- a/src/librustc_mir/build/mod.rs +++ b/src/librustc_mir/build/mod.rs @@ -11,10 +11,10 @@ use build; use hair::cx::Cx; -use hair::Pattern; +use hair::LintLevel; use rustc::hir; use rustc::hir::def_id::DefId; -use rustc::middle::region::CodeExtent; +use rustc::middle::region; use rustc::mir::*; use rustc::mir::transform::MirSource; use rustc::mir::visit::{MutVisitor, Lookup}; @@ -71,15 +71,15 @@ pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'t // Assume that everything other than closures // is a constant "initializer" expression. match expr.node { - hir::ExprClosure(_, _, body, _) => body, - _ => hir::BodyId { node_id: expr.id } + hir::ExprClosure(_, _, body, _, _) => body, + _ => hir::BodyId { node_id: expr.id }, } } hir::map::NodeVariant(variant) => return create_constructor_shim(tcx, id, &variant.node.data), hir::map::NodeStructCtor(ctor) => return create_constructor_shim(tcx, id, ctor), - _ => unsupported() + _ => unsupported(), }; let src = MirSource::from_node(tcx, id); @@ -95,13 +95,24 @@ pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'t let ty = tcx.type_of(tcx.hir.local_def_id(id)); let mut abi = fn_sig.abi; - let implicit_argument = if let ty::TyClosure(..) = ty.sty { - // HACK(eddyb) Avoid having RustCall on closures, - // as it adds unnecessary (and wrong) auto-tupling. - abi = Abi::Rust; - Some((closure_self_ty(tcx, id, body_id), None)) - } else { - None + let implicit_argument = match ty.sty { + ty::TyClosure(..) => { + // HACK(eddyb) Avoid having RustCall on closures, + // as it adds unnecessary (and wrong) auto-tupling. + abi = Abi::Rust; + Some((closure_self_ty(tcx, id, body_id), None)) + } + ty::TyGenerator(..) => { + let gen_ty = tcx.body_tables(body_id).node_id_to_type(fn_hir_id); + Some((gen_ty, None)) + } + _ => None, + }; + + // FIXME: safety in closures + let safety = match fn_sig.unsafety { + hir::Unsafety::Normal => Safety::Safe, + hir::Unsafety::Unsafe => Safety::FnUnsafe, }; let body = tcx.hir.body(body_id); @@ -114,7 +125,16 @@ pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'t }); let arguments = implicit_argument.into_iter().chain(explicit_arguments); - build::construct_fn(cx, id, arguments, abi, fn_sig.output(), body) + + let (yield_ty, return_ty) = if body.is_generator { + let gen_sig = cx.tables().generator_sigs()[fn_hir_id].clone().unwrap(); + (Some(gen_sig.yield_ty), gen_sig.return_ty) + } else { + (None, fn_sig.output()) + }; + + build::construct_fn(cx, id, arguments, safety, abi, + return_ty, yield_ty, body) } else { build::construct_const(cx, body_id) }; @@ -154,6 +174,26 @@ impl<'a, 'gcx: 'tcx, 'tcx> MutVisitor<'tcx> for GlobalizeMir<'a, 'gcx> { } } + fn visit_region(&mut self, region: &mut ty::Region<'tcx>, _: Location) { + if let Some(lifted) = self.tcx.lift(region) { + *region = lifted; + } else { + span_bug!(self.span, + "found region `{:?}` with inference types/regions in MIR", + region); + } + } + + fn visit_const(&mut self, constant: &mut &'tcx ty::Const<'tcx>, _: Location) { + if let Some(lifted) = self.tcx.lift(constant) { + *constant = lifted; + } else { + span_bug!(self.span, + "found constant `{:?}` with inference types/regions in MIR", + constant); + } + } + fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>, _: Location) { if let Some(lifted) = self.tcx.lift(substs) { *substs = lifted; @@ -199,7 +239,7 @@ fn create_constructor_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, /////////////////////////////////////////////////////////////////////////// // BuildMir -- walks a crate, looking for fn items and methods to build MIR from -fn closure_self_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub fn closure_self_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, closure_expr_id: ast::NodeId, body_id: hir::BodyId) -> Ty<'tcx> { @@ -238,6 +278,13 @@ struct Builder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { /// see the `scope` module for more details scopes: Vec>, + /// The current unsafe block in scope, even if it is hidden by + /// a PushUnsafeBlock + unpushed_unsafe: Safety, + + /// The number of `push_unsafe_block` levels in scope + push_unsafe_count: usize, + /// the current set of breakables; see the `scope` module for more /// details breakable_scopes: Vec>, @@ -245,6 +292,7 @@ struct Builder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { /// the vector of all scopes that we have created thus far; /// we track this for debuginfo later visibility_scopes: IndexVec, + visibility_scope_info: IndexVec, visibility_scope: VisibilityScope, /// Maps node ids of variable bindings to the `Local`s created for them. @@ -263,19 +311,7 @@ struct CFG<'tcx> { basic_blocks: IndexVec>, } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct ScopeId(u32); - -impl Idx for ScopeId { - fn new(index: usize) -> ScopeId { - assert!(index < (u32::MAX as usize)); - ScopeId(index as u32) - } - - fn index(self) -> usize { - self.0 as usize - } -} +newtype_index!(ScopeId); /////////////////////////////////////////////////////////////////////////// /// The `BlockAnd` "monad" packages up the new basic block along with a @@ -326,8 +362,10 @@ macro_rules! unpack { fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, fn_id: ast::NodeId, arguments: A, + safety: Safety, abi: Abi, return_ty: Ty<'gcx>, + yield_ty: Option>, body: &'gcx hir::Body) -> Mir<'tcx> where A: Iterator, Option<&'gcx hir::Pat>)> @@ -336,18 +374,24 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, let tcx = hir.tcx(); let span = tcx.hir.span(fn_id); - let mut builder = Builder::new(hir.clone(), span, arguments.len(), return_ty); - - let call_site_extent = CodeExtent::CallSiteScope(body.id()); - let arg_extent = CodeExtent::ParameterScope(body.id()); + let mut builder = Builder::new(hir.clone(), + span, + arguments.len(), + safety, + return_ty); + + let call_site_scope = region::Scope::CallSite(body.value.hir_id.local_id); + let arg_scope = region::Scope::Arguments(body.value.hir_id.local_id); let mut block = START_BLOCK; let source_info = builder.source_info(span); - unpack!(block = builder.in_scope((call_site_extent, source_info), block, |builder| { - unpack!(block = builder.in_scope((arg_extent, source_info), block, |builder| { - builder.args_and_body(block, &arguments, arg_extent, &body.value) + let call_site_s = (call_site_scope, source_info); + unpack!(block = builder.in_scope(call_site_s, LintLevel::Inherited, block, |builder| { + let arg_scope_s = (arg_scope, source_info); + unpack!(block = builder.in_scope(arg_scope_s, LintLevel::Inherited, block, |builder| { + builder.args_and_body(block, &arguments, arg_scope, &body.value) })); // Attribute epilogue to function's closing brace - let fn_end = Span { lo: span.hi, ..span }; + let fn_end = span.with_lo(span.hi()); let source_info = builder.source_info(fn_end); let return_block = builder.return_block(); builder.cfg.terminate(block, source_info, @@ -367,11 +411,11 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, // Gather the upvars of a closure, if any. let upvar_decls: Vec<_> = tcx.with_freevars(fn_id, |freevars| { freevars.iter().map(|fv| { - let var_def_id = fv.def.def_id(); - let var_node_id = tcx.hir.as_local_node_id(var_def_id).unwrap(); + let var_id = fv.var_id(); + let var_hir_id = tcx.hir.node_to_hir_id(var_id); let closure_expr_id = tcx.hir.local_def_id(fn_id).index; let capture = hir.tables().upvar_capture(ty::UpvarId { - var_id: var_def_id.index, + var_id: var_hir_id, closure_expr_id, }); let by_ref = match capture { @@ -382,7 +426,7 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, debug_name: keywords::Invalid.name(), by_ref, }; - if let Some(hir::map::NodeBinding(pat)) = tcx.hir.find(var_node_id) { + if let Some(hir::map::NodeBinding(pat)) = tcx.hir.find(var_id) { if let hir::PatKind::Binding(_, _, ref ident, _) = pat.node { decl.debug_name = ident.node; } @@ -391,7 +435,7 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, }).collect() }); - let mut mir = builder.finish(upvar_decls, return_ty); + let mut mir = builder.finish(upvar_decls, return_ty, yield_ty); mir.spread_arg = spread_arg; mir } @@ -404,7 +448,7 @@ fn construct_const<'a, 'gcx, 'tcx>(hir: Cx<'a, 'gcx, 'tcx>, let ty = hir.tables().expr_ty_adjusted(ast_expr); let owner_id = tcx.hir.body_owner(body_id); let span = tcx.hir.span(owner_id); - let mut builder = Builder::new(hir.clone(), span, 0, ty); + let mut builder = Builder::new(hir.clone(), span, 0, Safety::Safe, ty); let mut block = START_BLOCK; let expr = builder.hir.mirror(ast_expr); @@ -416,26 +460,29 @@ fn construct_const<'a, 'gcx, 'tcx>(hir: Cx<'a, 'gcx, 'tcx>, // Constants can't `return` so a return block should not be created. assert_eq!(builder.cached_return_block, None); - builder.finish(vec![], ty) + builder.finish(vec![], ty, None) } fn construct_error<'a, 'gcx, 'tcx>(hir: Cx<'a, 'gcx, 'tcx>, - body_id: hir::BodyId) - -> Mir<'tcx> { - let span = hir.tcx().hir.span(hir.tcx().hir.body_owner(body_id)); + body_id: hir::BodyId) + -> Mir<'tcx> { + let owner_id = hir.tcx().hir.body_owner(body_id); + let span = hir.tcx().hir.span(owner_id); let ty = hir.tcx().types.err; - let mut builder = Builder::new(hir, span, 0, ty); + let mut builder = Builder::new(hir, span, 0, Safety::Safe, ty); let source_info = builder.source_info(span); builder.cfg.terminate(START_BLOCK, source_info, TerminatorKind::Unreachable); - builder.finish(vec![], ty) + builder.finish(vec![], ty, None) } impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn new(hir: Cx<'a, 'gcx, 'tcx>, span: Span, arg_count: usize, + safety: Safety, return_ty: Ty<'tcx>) -> Builder<'a, 'gcx, 'tcx> { + let lint_level = LintLevel::Explicit(hir.root_lint_level); let mut builder = Builder { hir, cfg: CFG { basic_blocks: IndexVec::new() }, @@ -444,6 +491,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { scopes: vec![], visibility_scopes: IndexVec::new(), visibility_scope: ARGUMENT_VISIBILITY_SCOPE, + visibility_scope_info: IndexVec::new(), + push_unsafe_count: 0, + unpushed_unsafe: safety, breakable_scopes: vec![], local_decls: IndexVec::from_elem_n(LocalDecl::new_return_pointer(return_ty, span), 1), @@ -454,7 +504,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }; assert_eq!(builder.cfg.start_new_block(), START_BLOCK); - assert_eq!(builder.new_visibility_scope(span), ARGUMENT_VISIBILITY_SCOPE); + assert_eq!( + builder.new_visibility_scope(span, lint_level, Some(safety)), + ARGUMENT_VISIBILITY_SCOPE); builder.visibility_scopes[ARGUMENT_VISIBILITY_SCOPE].parent_scope = None; builder @@ -462,7 +514,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn finish(self, upvar_decls: Vec, - return_ty: Ty<'tcx>) + return_ty: Ty<'tcx>, + yield_ty: Option>) -> Mir<'tcx> { for (index, block) in self.cfg.basic_blocks.iter().enumerate() { if block.terminator.is_none() { @@ -472,8 +525,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { Mir::new(self.cfg.basic_blocks, self.visibility_scopes, + ClearOnDecode::Set(self.visibility_scope_info), IndexVec::new(), return_ty, + yield_ty, self.local_decls, self.arg_count, upvar_decls, @@ -484,7 +539,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn args_and_body(&mut self, mut block: BasicBlock, arguments: &[(Ty<'gcx>, Option<&'gcx hir::Pat>)], - argument_extent: CodeExtent, + argument_scope: region::Scope, ast_body: &'gcx hir::Expr) -> BlockAnd<()> { @@ -505,7 +560,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { scope: ARGUMENT_VISIBILITY_SCOPE, span: pattern.map_or(self.fn_span, |pat| pat.span) }, + lexical_scope: ARGUMENT_VISIBILITY_SCOPE, name, + internal: false, is_user_variable: false, }); } @@ -517,17 +574,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let lvalue = Lvalue::Local(Local::new(index + 1)); if let Some(pattern) = pattern { - let pattern = Pattern::from_hir(self.hir.tcx().global_tcx(), - self.hir.param_env.and(self.hir.identity_substs), - self.hir.tables(), - pattern); - scope = self.declare_bindings(scope, ast_body.span, &pattern); + let pattern = self.hir.pattern_from_hir(pattern); + scope = self.declare_bindings(scope, ast_body.span, + LintLevel::Inherited, &pattern); unpack!(block = self.lvalue_into_pattern(block, pattern, &lvalue)); } // Make sure we drop (parts of) the argument even when not matched on. self.schedule_drop(pattern.as_ref().map_or(ast_body.span, |pat| pat.span), - argument_extent, &lvalue, ty); + argument_scope, &lvalue, ty); } diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index c36da9410f..0327341943 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -12,7 +12,7 @@ Managing the scope stack. The scopes are tied to lexical scopes, so as we descend the HAIR, we push a scope on the stack, translate ite contents, and then pop it off. Every scope is named by a -`CodeExtent`. +`region::Scope`. ### SEME Regions @@ -23,7 +23,7 @@ via a `break` or `return` or just by fallthrough, that marks an exit from the scope. Each lexical scope thus corresponds to a single-entry, multiple-exit (SEME) region in the control-flow graph. -For now, we keep a mapping from each `CodeExtent` to its +For now, we keep a mapping from each `region::Scope` to its corresponding SEME region for later reference (see caveat in next paragraph). This is because region scopes are tied to them. Eventually, when we shift to non-lexical lifetimes, there should @@ -88,8 +88,10 @@ should go to. */ use build::{BlockAnd, BlockAndExtension, Builder, CFG}; -use rustc::middle::region::CodeExtent; -use rustc::ty::Ty; +use hair::LintLevel; +use rustc::middle::region; +use rustc::ty::{Ty, TyCtxt}; +use rustc::hir::def_id::LOCAL_CRATE; use rustc::mir::*; use rustc::mir::transform::MirSource; use syntax_pos::{Span}; @@ -101,11 +103,11 @@ pub struct Scope<'tcx> { /// The visibility scope this scope was created in. visibility_scope: VisibilityScope, - /// the extent of this scope within source code. - extent: CodeExtent, + /// the region span of this scope within source code. + region_scope: region::Scope, - /// the span of that extent - extent_span: Span, + /// the span of that region_scope + region_scope_span: Span, /// Whether there's anything to do for the cleanup path, that is, /// when unwinding through this scope. This includes destructors, @@ -125,7 +127,10 @@ pub struct Scope<'tcx> { drops: Vec>, /// The cache for drop chain on “normal” exit into a particular BasicBlock. - cached_exits: FxHashMap<(BasicBlock, CodeExtent), BasicBlock>, + cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>, + + /// The cache for drop chain on "generator drop" exit. + cached_generator_drop: Option, } #[derive(Debug)] @@ -140,22 +145,30 @@ struct DropData<'tcx> { kind: DropKind } +#[derive(Debug, Default, Clone, Copy)] +struct CachedBlock { + /// The cached block for the cleanups-on-diverge path. This block + /// contains code to run the current drop and all the preceding + /// drops (i.e. those having lower index in Drop’s Scope drop + /// array) + unwind: Option, + + /// The cached block for unwinds during cleanups-on-generator-drop path + generator_drop: Option, +} + #[derive(Debug)] enum DropKind { Value { - /// The cached block for the cleanups-on-diverge path. This block - /// contains code to run the current drop and all the preceding - /// drops (i.e. those having lower index in Drop’s Scope drop - /// array) - cached_block: Option + cached_block: CachedBlock, }, Storage } #[derive(Clone, Debug)] pub struct BreakableScope<'tcx> { - /// Extent of the loop - pub extent: CodeExtent, + /// Region scope of the loop + pub region_scope: region::Scope, /// Where the body of the loop begins. `None` if block pub continue_block: Option, /// Block to branch into when the loop or block terminates (either by being `break`-en out @@ -166,6 +179,29 @@ pub struct BreakableScope<'tcx> { pub break_destination: Lvalue<'tcx>, } +impl CachedBlock { + fn invalidate(&mut self) { + self.generator_drop = None; + self.unwind = None; + } + + fn get(&self, generator_drop: bool) -> Option { + if generator_drop { + self.generator_drop + } else { + self.unwind + } + } + + fn ref_mut(&mut self, generator_drop: bool) -> &mut Option { + if generator_drop { + &mut self.generator_drop + } else { + &mut self.unwind + } + } +} + impl DropKind { fn may_panic(&self) -> bool { match *self { @@ -187,7 +223,7 @@ impl<'tcx> Scope<'tcx> { if !unwind { return; } for dropdata in &mut self.drops { if let DropKind::Value { ref mut cached_block } = dropdata.kind { - *cached_block = None; + cached_block.invalidate(); } } } @@ -196,10 +232,12 @@ impl<'tcx> Scope<'tcx> { /// /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for /// this method to work correctly. - fn cached_block(&self) -> Option { + fn cached_block(&self, generator_drop: bool) -> Option { let mut drops = self.drops.iter().rev().filter_map(|data| { match data.kind { - DropKind::Value { cached_block } => Some(cached_block), + DropKind::Value { cached_block } => { + Some(cached_block.get(generator_drop)) + } DropKind::Storage => None } }); @@ -233,9 +271,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { f: F) -> R where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R { - let extent = self.topmost_scope(); + let region_scope = self.topmost_scope(); let scope = BreakableScope { - extent, + region_scope, continue_block: loop_block, break_block, break_destination, @@ -243,41 +281,64 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { self.breakable_scopes.push(scope); let res = f(self); let breakable_scope = self.breakable_scopes.pop().unwrap(); - assert!(breakable_scope.extent == extent); + assert!(breakable_scope.region_scope == region_scope); res } pub fn in_opt_scope(&mut self, - opt_extent: Option<(CodeExtent, SourceInfo)>, + opt_scope: Option<(region::Scope, SourceInfo)>, mut block: BasicBlock, f: F) -> BlockAnd where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd { - debug!("in_opt_scope(opt_extent={:?}, block={:?})", opt_extent, block); - if let Some(extent) = opt_extent { self.push_scope(extent); } + debug!("in_opt_scope(opt_scope={:?}, block={:?})", opt_scope, block); + if let Some(region_scope) = opt_scope { self.push_scope(region_scope); } let rv = unpack!(block = f(self)); - if let Some(extent) = opt_extent { - unpack!(block = self.pop_scope(extent, block)); + if let Some(region_scope) = opt_scope { + unpack!(block = self.pop_scope(region_scope, block)); } - debug!("in_scope: exiting opt_extent={:?} block={:?}", opt_extent, block); + debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block); block.and(rv) } /// Convenience wrapper that pushes a scope and then executes `f` /// to build its contents, popping the scope afterwards. pub fn in_scope(&mut self, - extent: (CodeExtent, SourceInfo), + region_scope: (region::Scope, SourceInfo), + lint_level: LintLevel, mut block: BasicBlock, f: F) -> BlockAnd where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd { - debug!("in_scope(extent={:?}, block={:?})", extent, block); - self.push_scope(extent); + debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block); + let visibility_scope = self.visibility_scope; + let tcx = self.hir.tcx(); + if let LintLevel::Explicit(node_id) = lint_level { + let same_lint_scopes = tcx.dep_graph.with_ignore(|| { + let sets = tcx.lint_levels(LOCAL_CRATE); + let parent_hir_id = + tcx.hir.definitions().node_to_hir_id( + self.visibility_scope_info[visibility_scope].lint_root + ); + let current_hir_id = + tcx.hir.definitions().node_to_hir_id(node_id); + sets.lint_level_set(parent_hir_id) == + sets.lint_level_set(current_hir_id) + }); + + if !same_lint_scopes { + self.visibility_scope = + self.new_visibility_scope(region_scope.1.span, lint_level, + None); + } + } + self.push_scope(region_scope); let rv = unpack!(block = f(self)); - unpack!(block = self.pop_scope(extent, block)); - debug!("in_scope: exiting extent={:?} block={:?}", extent, block); + unpack!(block = self.pop_scope(region_scope, block)); + self.visibility_scope = visibility_scope; + debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block); block.and(rv) } @@ -285,27 +346,28 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// scope and call `pop_scope` afterwards. Note that these two /// calls must be paired; using `in_scope` as a convenience /// wrapper maybe preferable. - pub fn push_scope(&mut self, extent: (CodeExtent, SourceInfo)) { - debug!("push_scope({:?})", extent); + pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) { + debug!("push_scope({:?})", region_scope); let vis_scope = self.visibility_scope; self.scopes.push(Scope { visibility_scope: vis_scope, - extent: extent.0, - extent_span: extent.1.span, + region_scope: region_scope.0, + region_scope_span: region_scope.1.span, needs_cleanup: false, drops: vec![], + cached_generator_drop: None, cached_exits: FxHashMap() }); } - /// Pops a scope, which should have extent `extent`, adding any - /// drops onto the end of `block` that are needed. This must - /// match 1-to-1 with `push_scope`. + /// Pops a scope, which should have region scope `region_scope`, + /// adding any drops onto the end of `block` that are needed. + /// This must match 1-to-1 with `push_scope`. pub fn pop_scope(&mut self, - extent: (CodeExtent, SourceInfo), + region_scope: (region::Scope, SourceInfo), mut block: BasicBlock) -> BlockAnd<()> { - debug!("pop_scope({:?}, {:?})", extent, block); + debug!("pop_scope({:?}, {:?})", region_scope, block); // If we are emitting a `drop` statement, we need to have the cached // diverge cleanup pads ready in case that drop panics. let may_panic = @@ -314,32 +376,36 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { self.diverge_cleanup(); } let scope = self.scopes.pop().unwrap(); - assert_eq!(scope.extent, extent.0); + assert_eq!(scope.region_scope, region_scope.0); + + self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope); unpack!(block = build_scope_drops(&mut self.cfg, &scope, &self.scopes, block, - self.arg_count)); + self.arg_count, + false)); - self.cfg.push_end_region(block, extent.1, scope.extent); block.unit() } /// Branch out of `block` to `target`, exiting all scopes up to - /// and including `extent`. This will insert whatever drops are + /// and including `region_scope`. This will insert whatever drops are /// needed, as well as tracking this exit for the SEME region. See /// module comment for details. pub fn exit_scope(&mut self, span: Span, - extent: (CodeExtent, SourceInfo), + region_scope: (region::Scope, SourceInfo), mut block: BasicBlock, target: BasicBlock) { - debug!("exit_scope(extent={:?}, block={:?}, target={:?})", extent, block, target); - let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent.0) - .unwrap_or_else(||{ - span_bug!(span, "extent {:?} does not enclose", extent) - }); + debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})", + region_scope, block, target); + let scope_count = 1 + self.scopes.iter().rev() + .position(|scope| scope.region_scope == region_scope.0) + .unwrap_or_else(|| { + span_bug!(span, "region_scope {:?} does not enclose", region_scope) + }); let len = self.scopes.len(); assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes"); @@ -355,7 +421,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let mut rest = &mut self.scopes[(len - scope_count)..]; while let Some((scope, rest_)) = {rest}.split_last_mut() { rest = rest_; - block = if let Some(&e) = scope.cached_exits.get(&(target, extent.0)) { + block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) { self.cfg.terminate(block, scope.source_info(span), TerminatorKind::Goto { target: e }); return; @@ -363,17 +429,19 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let b = self.cfg.start_new_block(); self.cfg.terminate(block, scope.source_info(span), TerminatorKind::Goto { target: b }); - scope.cached_exits.insert((target, extent.0), b); + scope.cached_exits.insert((target, region_scope.0), b); b }; + + // End all regions for scopes out of which we are breaking. + self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope); + unpack!(block = build_scope_drops(&mut self.cfg, scope, rest, block, - self.arg_count)); - - // End all regions for scopes out of which we are breaking. - self.cfg.push_end_region(block, extent.1, scope.extent); + self.arg_count, + false)); } } let scope = &self.scopes[len - scope_count]; @@ -381,14 +449,79 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { TerminatorKind::Goto { target: target }); } + /// Creates a path that performs all required cleanup for dropping a generator. + /// + /// This path terminates in GeneratorDrop. Returns the start of the path. + /// None indicates there’s no cleanup to do at this point. + pub fn generator_drop_cleanup(&mut self) -> Option { + if !self.scopes.iter().any(|scope| scope.needs_cleanup) { + return None; + } + + // Fill in the cache + self.diverge_cleanup_gen(true); + + let src_info = self.scopes[0].source_info(self.fn_span); + let mut block = self.cfg.start_new_block(); + let result = block; + let mut rest = &mut self.scopes[..]; + + while let Some((scope, rest_)) = {rest}.split_last_mut() { + rest = rest_; + if !scope.needs_cleanup { + continue; + } + block = if let Some(b) = scope.cached_generator_drop { + self.cfg.terminate(block, src_info, + TerminatorKind::Goto { target: b }); + return Some(result); + } else { + let b = self.cfg.start_new_block(); + scope.cached_generator_drop = Some(b); + self.cfg.terminate(block, src_info, + TerminatorKind::Goto { target: b }); + b + }; + unpack!(block = build_scope_drops(&mut self.cfg, + scope, + rest, + block, + self.arg_count, + true)); + + // End all regions for scopes out of which we are breaking. + self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope); + } + + self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop); + + Some(result) + } + /// Creates a new visibility scope, nested in the current one. - pub fn new_visibility_scope(&mut self, span: Span) -> VisibilityScope { + pub fn new_visibility_scope(&mut self, + span: Span, + lint_level: LintLevel, + safety: Option) -> VisibilityScope { let parent = self.visibility_scope; - let scope = VisibilityScope::new(self.visibility_scopes.len()); - self.visibility_scopes.push(VisibilityScopeData { + debug!("new_visibility_scope({:?}, {:?}, {:?}) - parent({:?})={:?}", + span, lint_level, safety, + parent, self.visibility_scope_info.get(parent)); + let scope = self.visibility_scopes.push(VisibilityScopeData { span, parent_scope: Some(parent), }); + let scope_info = VisibilityScopeInfo { + lint_root: if let LintLevel::Explicit(lint_root) = lint_level { + lint_root + } else { + self.visibility_scope_info[parent].lint_root + }, + safety: safety.unwrap_or_else(|| { + self.visibility_scope_info[parent].safety + }) + }; + self.visibility_scope_info.push(scope_info); scope } @@ -398,12 +531,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// resolving `break` and `continue`. pub fn find_breakable_scope(&mut self, span: Span, - label: CodeExtent) + label: region::Scope) -> &mut BreakableScope<'tcx> { // find the loop-scope with the correct id self.breakable_scopes.iter_mut() .rev() - .filter(|breakable_scope| breakable_scope.extent == label) + .filter(|breakable_scope| breakable_scope.region_scope == label) .next() .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found")) } @@ -416,23 +549,23 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - /// Returns the extent of the scope which should be exited by a + /// Returns the `region::Scope` of the scope which should be exited by a /// return. - pub fn extent_of_return_scope(&self) -> CodeExtent { + pub fn region_scope_of_return_scope(&self) -> region::Scope { // The outermost scope (`scopes[0]`) will be the `CallSiteScope`. // We want `scopes[1]`, which is the `ParameterScope`. assert!(self.scopes.len() >= 2); - assert!(match self.scopes[1].extent { - CodeExtent::ParameterScope(_) => true, + assert!(match self.scopes[1].region_scope.data() { + region::ScopeData::Arguments(_) => true, _ => false, }); - self.scopes[1].extent + self.scopes[1].region_scope } /// Returns the topmost active scope, which is known to be alive until /// the next scope expression. - pub fn topmost_scope(&self) -> CodeExtent { - self.scopes.last().expect("topmost_scope: no scopes present").extent + pub fn topmost_scope(&self) -> region::Scope { + self.scopes.last().expect("topmost_scope: no scopes present").region_scope } /// Returns the scope that we should use as the lifetime of an @@ -457,7 +590,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// /// When building statics/constants, returns `None` since /// intermediate values do not have to be dropped in that case. - pub fn local_scope(&self) -> Option { + pub fn local_scope(&self) -> Option { match self.hir.src { MirSource::Const(_) | MirSource::Static(..) => @@ -465,7 +598,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { None, MirSource::Fn(_) => Some(self.topmost_scope()), - MirSource::Promoted(..) => + MirSource::Promoted(..) | + MirSource::GeneratorDrop(..) => bug!(), } } @@ -473,15 +607,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Scheduling drops // ================ /// Indicates that `lvalue` should be dropped on exit from - /// `extent`. + /// `region_scope`. pub fn schedule_drop(&mut self, span: Span, - extent: CodeExtent, + region_scope: region::Scope, lvalue: &Lvalue<'tcx>, lvalue_ty: Ty<'tcx>) { let needs_drop = self.hir.needs_drop(lvalue_ty); let drop_kind = if needs_drop { - DropKind::Value { cached_block: None } + DropKind::Value { cached_block: CachedBlock::default() } } else { // Only temps and vars need their storage dead. match *lvalue { @@ -491,7 +625,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }; for scope in self.scopes.iter_mut().rev() { - let this_scope = scope.extent == extent; + let this_scope = scope.region_scope == region_scope; // When building drops, we try to cache chains of drops in such a way so these drops // could be reused by the drops which would branch into the cached (already built) // blocks. This, however, means that whenever we add a drop into a scope which already @@ -544,10 +678,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { if let DropKind::Value { .. } = drop_kind { scope.needs_cleanup = true; } - let tcx = self.hir.tcx(); - let extent_span = extent.span(&tcx.hir).unwrap(); + let region_scope_span = region_scope.span(self.hir.tcx(), + &self.hir.region_scope_tree); // Attribute scope exit drops to scope's closing brace - let scope_end = Span { lo: extent_span.hi, .. extent_span}; + let scope_end = region_scope_span.with_lo(region_scope_span.hi()); scope.drops.push(DropData { span: scope_end, location: lvalue.clone(), @@ -556,7 +690,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { return; } } - span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue); + span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, lvalue); } // Other @@ -567,6 +701,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// See module comment for more details. None indicates there’s no /// cleanup to do at this point. pub fn diverge_cleanup(&mut self) -> Option { + self.diverge_cleanup_gen(false) + } + + fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> Option { if !self.scopes.iter().any(|scope| scope.needs_cleanup) { return None; } @@ -599,7 +737,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }; for scope in scopes.iter_mut() { - target = build_diverge_scope(cfg, scope.extent_span, scope, target); + target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span, + scope, target, generator_drop); } Some(target) } @@ -676,7 +815,8 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, scope: &Scope<'tcx>, earlier_scopes: &[Scope<'tcx>], mut block: BasicBlock, - arg_count: usize) + arg_count: usize, + generator_drop: bool) -> BlockAnd<()> { debug!("build_scope_drops({:?} -> {:?})", block, scope); let mut iter = scope.drops.iter().rev().peekable(); @@ -688,16 +828,20 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, // for us to diverge into in case the drop panics. let on_diverge = iter.peek().iter().filter_map(|dd| { match dd.kind { - DropKind::Value { cached_block: None } => - span_bug!(drop_data.span, "cached block not present?"), - DropKind::Value { cached_block } => cached_block, + DropKind::Value { cached_block } => { + let result = cached_block.get(generator_drop); + if result.is_none() { + span_bug!(drop_data.span, "cached block not present?") + } + result + }, DropKind::Storage => None } }).next(); // If there’s no `cached_block`s within current scope, // we must look for one in the enclosing scope. let on_diverge = on_diverge.or_else(|| { - earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next() + earlier_scopes.iter().rev().flat_map(|s| s.cached_block(generator_drop)).next() }); let next = cfg.start_new_block(); cfg.terminate(block, source_info, TerminatorKind::Drop { @@ -710,13 +854,18 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, DropKind::Storage => {} } + // We do not need to emit StorageDead for generator drops + if generator_drop { + continue + } + // Drop the storage for both value and storage drops. // Only temps and vars need their storage dead. match drop_data.location { Lvalue::Local(index) if index.index() > arg_count => { cfg.push(block, Statement { source_info, - kind: StatementKind::StorageDead(drop_data.location.clone()) + kind: StatementKind::StorageDead(index) }); } _ => continue @@ -725,10 +874,12 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, block.unit() } -fn build_diverge_scope<'a, 'gcx, 'tcx>(cfg: &mut CFG<'tcx>, +fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + cfg: &mut CFG<'tcx>, span: Span, scope: &mut Scope<'tcx>, - mut target: BasicBlock) + mut target: BasicBlock, + generator_drop: bool) -> BasicBlock { // Build up the drops in **reverse** order. The end result will @@ -742,7 +893,7 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(cfg: &mut CFG<'tcx>, // The code in this function reads from right to left. At each // point, we check for cached blocks representing the // remainder. If everything is cached, we'll just walk right to - // left reading the cached results but never created anything. + // left reading the cached results but never create anything. let visibility_scope = scope.visibility_scope; let source_info = |span| SourceInfo { @@ -764,7 +915,7 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(cfg: &mut CFG<'tcx>, // match the behavior of clang, but on inspection eddyb says // this is not what clang does. let cached_block = match drop_data.kind { - DropKind::Value { ref mut cached_block } => cached_block, + DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop), DropKind::Storage => continue }; target = if let Some(cached_block) = *cached_block { @@ -786,7 +937,7 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(cfg: &mut CFG<'tcx>, // becomes trivial goto after pass that removes all EndRegions.) { let block = cfg.start_new_cleanup_block(); - cfg.push_end_region(block, source_info(span), scope.extent); + cfg.push_end_region(tcx, block, source_info(span), scope.region_scope); cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target }); target = block } diff --git a/src/librustc_mir/dataflow/impls/borrows.rs b/src/librustc_mir/dataflow/impls/borrows.rs index ab62342e60..9321121fe1 100644 --- a/src/librustc_mir/dataflow/impls/borrows.rs +++ b/src/librustc_mir/dataflow/impls/borrows.rs @@ -11,6 +11,7 @@ use rustc::mir::{self, Location, Mir}; use rustc::mir::visit::Visitor; use rustc::ty::{Region, TyCtxt}; +use rustc::ty::RegionKind; use rustc::ty::RegionKind::ReScope; use rustc::util::nodemap::{FxHashMap, FxHashSet}; @@ -21,6 +22,8 @@ use rustc_data_structures::indexed_vec::{IndexVec}; use dataflow::{BitDenotation, BlockSets, DataflowOperator}; pub use dataflow::indexes::BorrowIndex; +use syntax_pos::Span; + use std::fmt; // `Borrows` maps each dataflow bit to an `Rvalue::Ref`, which can be @@ -32,6 +35,7 @@ pub struct Borrows<'a, 'tcx: 'a> { borrows: IndexVec>, location_map: FxHashMap, region_map: FxHashMap, FxHashSet>, + region_span_map: FxHashMap, } // temporarily allow some dead fields: `kind` and `region` will be @@ -63,18 +67,21 @@ impl<'a, 'tcx> Borrows<'a, 'tcx> { pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &'a Mir<'tcx>) -> Self { let mut visitor = GatherBorrows { idx_vec: IndexVec::new(), location_map: FxHashMap(), - region_map: FxHashMap(), }; + region_map: FxHashMap(), + region_span_map: FxHashMap()}; visitor.visit_mir(mir); return Borrows { tcx: tcx, mir: mir, borrows: visitor.idx_vec, location_map: visitor.location_map, - region_map: visitor.region_map, }; + region_map: visitor.region_map, + region_span_map: visitor.region_span_map}; struct GatherBorrows<'tcx> { idx_vec: IndexVec>, location_map: FxHashMap, region_map: FxHashMap, FxHashSet>, + region_span_map: FxHashMap, } impl<'tcx> Visitor<'tcx> for GatherBorrows<'tcx> { fn visit_rvalue(&mut self, @@ -90,6 +97,16 @@ impl<'a, 'tcx> Borrows<'a, 'tcx> { borrows.insert(idx); } } + + fn visit_statement(&mut self, + block: mir::BasicBlock, + statement: &mir::Statement<'tcx>, + location: Location) { + if let mir::StatementKind::EndRegion(region_scope) = statement.kind { + self.region_span_map.insert(ReScope(region_scope), statement.source_info.span); + } + self.super_statement(block, statement, location); + } } } @@ -98,6 +115,12 @@ impl<'a, 'tcx> Borrows<'a, 'tcx> { pub fn location(&self, idx: BorrowIndex) -> &Location { &self.borrows[idx].location } + + pub fn region_span(&self, region: &Region) -> Span { + let opt_span = self.region_span_map.get(region); + assert!(opt_span.is_some(), "end region not found for {:?}", region); + *opt_span.unwrap() + } } impl<'a, 'tcx> BitDenotation for Borrows<'a, 'tcx> { @@ -107,7 +130,7 @@ impl<'a, 'tcx> BitDenotation for Borrows<'a, 'tcx> { self.borrows.len() } fn start_block_effect(&self, _sets: &mut BlockSets) { - // no borrows of code extents have been taken prior to + // no borrows of code region_scopes have been taken prior to // function execution, so this method has no effect on // `_sets`. } @@ -121,12 +144,12 @@ impl<'a, 'tcx> BitDenotation for Borrows<'a, 'tcx> { panic!("could not find statement at location {:?}"); }); match stmt.kind { - mir::StatementKind::EndRegion(extent) => { - let borrow_indexes = self.region_map.get(&ReScope(extent)).unwrap_or_else(|| { - panic!("could not find BorrowIndexs for code-extent {:?}", extent); - }); - - for idx in borrow_indexes { sets.kill(&idx); } + mir::StatementKind::EndRegion(region_scope) => { + if let Some(borrow_indexes) = self.region_map.get(&ReScope(region_scope)) { + for idx in borrow_indexes { sets.kill(&idx); } + } else { + // (if there is no entry, then there are no borrows to be tracked) + } } mir::StatementKind::Assign(_, ref rhs) => { @@ -153,7 +176,7 @@ impl<'a, 'tcx> BitDenotation for Borrows<'a, 'tcx> { fn terminator_effect(&self, _sets: &mut BlockSets, _location: Location) { - // no terminators start nor end code extents. + // no terminators start nor end region scopes. } fn propagate_call_return(&self, @@ -161,7 +184,7 @@ impl<'a, 'tcx> BitDenotation for Borrows<'a, 'tcx> { _call_bb: mir::BasicBlock, _dest_bb: mir::BasicBlock, _dest_lval: &mir::Lvalue) { - // there are no effects on the extents from method calls. + // there are no effects on the region scopes from method calls. } } diff --git a/src/librustc_mir/dataflow/impls/mod.rs b/src/librustc_mir/dataflow/impls/mod.rs index a4421b216c..19a595622b 100644 --- a/src/librustc_mir/dataflow/impls/mod.rs +++ b/src/librustc_mir/dataflow/impls/mod.rs @@ -27,6 +27,10 @@ use super::drop_flag_effects_for_function_entry; use super::drop_flag_effects_for_location; use super::on_lookup_result_bits; +mod storage_liveness; + +pub use self::storage_liveness::*; + #[allow(dead_code)] pub(super) mod borrows; diff --git a/src/librustc_mir/dataflow/impls/storage_liveness.rs b/src/librustc_mir/dataflow/impls/storage_liveness.rs new file mode 100644 index 0000000000..98615c6b26 --- /dev/null +++ b/src/librustc_mir/dataflow/impls/storage_liveness.rs @@ -0,0 +1,82 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use super::*; + +use rustc::mir::*; +use dataflow::BitDenotation; + +#[derive(Copy, Clone)] +pub struct MaybeStorageLive<'a, 'tcx: 'a> { + mir: &'a Mir<'tcx>, +} + +impl<'a, 'tcx: 'a> MaybeStorageLive<'a, 'tcx> { + pub fn new(mir: &'a Mir<'tcx>) + -> Self { + MaybeStorageLive { mir: mir } + } + + pub fn mir(&self) -> &Mir<'tcx> { + self.mir + } +} + +impl<'a, 'tcx> BitDenotation for MaybeStorageLive<'a, 'tcx> { + type Idx = Local; + fn name() -> &'static str { "maybe_storage_live" } + fn bits_per_block(&self) -> usize { + self.mir.local_decls.len() + } + + fn start_block_effect(&self, _sets: &mut BlockSets) { + // Nothing is live on function entry + } + + fn statement_effect(&self, + sets: &mut BlockSets, + loc: Location) { + let stmt = &self.mir[loc.block].statements[loc.statement_index]; + + match stmt.kind { + StatementKind::StorageLive(l) => sets.gen(&l), + StatementKind::StorageDead(l) => sets.kill(&l), + _ => (), + } + } + + fn terminator_effect(&self, + _sets: &mut BlockSets, + _loc: Location) { + // Terminators have no effect + } + + fn propagate_call_return(&self, + _in_out: &mut IdxSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + _dest_lval: &mir::Lvalue) { + // Nothing to do when a call returns successfully + } +} + +impl<'a, 'tcx> BitwiseOperator for MaybeStorageLive<'a, 'tcx> { + #[inline] + fn join(&self, pred1: usize, pred2: usize) -> usize { + pred1 | pred2 // "maybe" means we union effects of both preds + } +} + +impl<'a, 'tcx> DataflowOperator for MaybeStorageLive<'a, 'tcx> { + #[inline] + fn bottom_value() -> bool { + false // bottom = dead + } +} diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs index 237795491b..9fa5691d64 100644 --- a/src/librustc_mir/dataflow/mod.rs +++ b/src/librustc_mir/dataflow/mod.rs @@ -24,6 +24,7 @@ use std::mem; use std::path::PathBuf; use std::usize; +pub use self::impls::{MaybeStorageLive}; pub use self::impls::{MaybeInitializedLvals, MaybeUninitializedLvals}; pub use self::impls::{DefinitelyInitializedLvals}; pub use self::impls::borrows::{Borrows, BorrowData, BorrowIndex}; @@ -351,6 +352,29 @@ pub trait DataflowResultsConsumer<'a, 'tcx: 'a> { flow_state: &mut Self::FlowState); } +pub fn state_for_location(loc: Location, + analysis: &T, + result: &DataflowResults) + -> IdxSetBuf { + let mut entry = result.sets().on_entry_set_for(loc.block.index()).to_owned(); + + { + let mut sets = BlockSets { + on_entry: &mut entry.clone(), + kill_set: &mut entry.clone(), + gen_set: &mut entry, + }; + + for stmt in 0..loc.statement_index { + let mut stmt_loc = loc; + stmt_loc.statement_index = stmt; + analysis.statement_effect(&mut sets, stmt_loc); + } + } + + entry +} + pub struct DataflowAnalysis<'a, 'tcx: 'a, O> where O: BitDenotation { flow_state: DataflowState, @@ -653,15 +677,21 @@ impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation match bb_data.terminator().kind { mir::TerminatorKind::Return | mir::TerminatorKind::Resume | + mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Unreachable => {} mir::TerminatorKind::Goto { ref target } | mir::TerminatorKind::Assert { ref target, cleanup: None, .. } | + mir::TerminatorKind::Yield { resume: ref target, drop: None, .. } | mir::TerminatorKind::Drop { ref target, location: _, unwind: None } | mir::TerminatorKind::DropAndReplace { ref target, value: _, location: _, unwind: None } => { self.propagate_bits_into_entry_set_for(in_out, changed, target); } + mir::TerminatorKind::Yield { resume: ref target, drop: Some(ref drop), .. } => { + self.propagate_bits_into_entry_set_for(in_out, changed, target); + self.propagate_bits_into_entry_set_for(in_out, changed, drop); + } mir::TerminatorKind::Assert { ref target, cleanup: Some(ref unwind), .. } | mir::TerminatorKind::Drop { ref target, location: _, unwind: Some(ref unwind) } | mir::TerminatorKind::DropAndReplace { diff --git a/src/librustc_mir/dataflow/move_paths/abs_domain.rs b/src/librustc_mir/dataflow/move_paths/abs_domain.rs index 173396f224..00825c7a88 100644 --- a/src/librustc_mir/dataflow/move_paths/abs_domain.rs +++ b/src/librustc_mir/dataflow/move_paths/abs_domain.rs @@ -21,8 +21,7 @@ //! `a[x]` would still overlap them both. But that is not this //! representation does today.) -use rustc::mir::LvalueElem; -use rustc::mir::{Operand, ProjectionElem}; +use rustc::mir::{Local, LvalueElem, Operand, ProjectionElem}; use rustc::ty::Ty; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] @@ -40,6 +39,10 @@ impl<'tcx> Lift for Operand<'tcx> { type Abstract = AbstractOperand; fn lift(&self) -> Self::Abstract { AbstractOperand } } +impl Lift for Local { + type Abstract = AbstractOperand; + fn lift(&self) -> Self::Abstract { AbstractOperand } +} impl<'tcx> Lift for Ty<'tcx> { type Abstract = AbstractType; fn lift(&self) -> Self::Abstract { AbstractType } diff --git a/src/librustc_mir/dataflow/move_paths/builder.rs b/src/librustc_mir/dataflow/move_paths/builder.rs index c45c91011d..0790d937ce 100644 --- a/src/librustc_mir/dataflow/move_paths/builder.rs +++ b/src/librustc_mir/dataflow/move_paths/builder.rs @@ -22,17 +22,15 @@ use std::mem; use super::abs_domain::Lift; use super::{LocationMap, MoveData, MovePath, MovePathLookup, MovePathIndex, MoveOut, MoveOutIndex}; +use super::{MoveError}; +use super::IllegalMoveOriginKind::*; -pub(super) struct MoveDataBuilder<'a, 'tcx: 'a> { +struct MoveDataBuilder<'a, 'tcx: 'a> { mir: &'a Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, data: MoveData<'tcx>, -} - -pub enum MovePathError { - IllegalMove, - UnionMove { path: MovePathIndex }, + errors: Vec>, } impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { @@ -47,6 +45,7 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { mir, tcx, param_env, + errors: Vec::new(), data: MoveData { moves: IndexVec::new(), loc_map: LocationMap::new(mir), @@ -85,7 +84,9 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { assert_eq!(path_map_ent, move_path); move_path } +} +impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> { /// This creates a MovePath for a given lvalue, returning an `MovePathError` /// if that lvalue can't be moved from. /// @@ -94,13 +95,15 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { /// /// Maybe we should have separate "borrowck" and "moveck" modes. fn move_path_for(&mut self, lval: &Lvalue<'tcx>) - -> Result + -> Result> { debug!("lookup({:?})", lval); match *lval { - Lvalue::Local(local) => Ok(self.data.rev_lookup.locals[local]), - // error: can't move out of a static - Lvalue::Static(..) => Err(MovePathError::IllegalMove), + Lvalue::Local(local) => Ok(self.builder.data.rev_lookup.locals[local]), + Lvalue::Static(..) => { + let span = self.builder.mir.source_info(self.loc).span; + Err(MoveError::cannot_move_out_of(span, Static)) + } Lvalue::Projection(ref proj) => { self.move_path_for_projection(lval, proj) } @@ -116,37 +119,52 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { fn move_path_for_projection(&mut self, lval: &Lvalue<'tcx>, proj: &LvalueProjection<'tcx>) - -> Result + -> Result> { let base = try!(self.move_path_for(&proj.base)); - let lv_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); + let mir = self.builder.mir; + let tcx = self.builder.tcx; + let lv_ty = proj.base.ty(mir, tcx).to_ty(tcx); match lv_ty.sty { - // error: can't move out of borrowed content - ty::TyRef(..) | ty::TyRawPtr(..) => return Err(MovePathError::IllegalMove), - // error: can't move out of struct with destructor - ty::TyAdt(adt, _) if adt.has_dtor(self.tcx) && !adt.is_box() => - return Err(MovePathError::IllegalMove), + ty::TyRef(..) | ty::TyRawPtr(..) => + return Err(MoveError::cannot_move_out_of(mir.source_info(self.loc).span, + BorrowedContent)), + ty::TyAdt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() => + return Err(MoveError::cannot_move_out_of(mir.source_info(self.loc).span, + InteriorOfTypeWithDestructor { + container_ty: lv_ty + })), // move out of union - always move the entire union ty::TyAdt(adt, _) if adt.is_union() => - return Err(MovePathError::UnionMove { path: base }), - // error: can't move out of a slice - ty::TySlice(..) => - return Err(MovePathError::IllegalMove), - ty::TyArray(..) => match proj.elem { - // error: can't move out of an array - ProjectionElem::Index(..) => return Err(MovePathError::IllegalMove), + return Err(MoveError::UnionMove { path: base }), + ty::TySlice(elem_ty) => + return Err(MoveError::cannot_move_out_of( + mir.source_info(self.loc).span, + InteriorOfSlice { + elem_ty, is_index: match proj.elem { + ProjectionElem::Index(..) => true, + _ => false + }, + })), + ty::TyArray(elem_ty, _num_elems) => match proj.elem { + ProjectionElem::Index(..) => + return Err(MoveError::cannot_move_out_of( + mir.source_info(self.loc).span, + InteriorOfArray { + elem_ty, is_index: true + })), _ => { // FIXME: still badly broken } }, _ => {} }; - match self.data.rev_lookup.projections.entry((base, proj.elem.lift())) { + match self.builder.data.rev_lookup.projections.entry((base, proj.elem.lift())) { Entry::Occupied(ent) => Ok(*ent.get()), Entry::Vacant(ent) => { - let path = Self::new_move_path( - &mut self.data.move_paths, - &mut self.data.path_map, + let path = MoveDataBuilder::new_move_path( + &mut self.builder.data.move_paths, + &mut self.builder.data.path_map, Some(base), lval.clone() ); @@ -155,8 +173,10 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { } } } +} - fn finalize(self) -> MoveData<'tcx> { +impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { + fn finalize(self) -> Result, (MoveData<'tcx>, Vec>)> { debug!("{}", { debug!("moves for {:?}:", self.mir.span); for (j, mo) in self.data.moves.iter_enumerated() { @@ -168,14 +188,20 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { } "done dumping moves" }); - self.data + + if self.errors.len() > 0 { + Err((self.data, self.errors)) + } else { + Ok(self.data) + } } } pub(super) fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) - -> MoveData<'tcx> { + -> Result, + (MoveData<'tcx>, Vec>)> { let mut builder = MoveDataBuilder::new(mir, tcx, param_env); for (bb, block) in mir.basic_blocks().iter_enumerated() { @@ -197,6 +223,22 @@ pub(super) fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { fn gather_statement(&mut self, loc: Location, stmt: &Statement<'tcx>) { debug!("gather_statement({:?}, {:?})", loc, stmt); + (Gatherer { builder: self, loc }).gather_statement(stmt); + } + + fn gather_terminator(&mut self, loc: Location, term: &Terminator<'tcx>) { + debug!("gather_terminator({:?}, {:?})", loc, term); + (Gatherer { builder: self, loc }).gather_terminator(term); + } +} + +struct Gatherer<'b, 'a: 'b, 'tcx: 'a> { + builder: &'b mut MoveDataBuilder<'a, 'tcx>, + loc: Location, +} + +impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> { + fn gather_statement(&mut self, stmt: &Statement<'tcx>) { match stmt.kind { StatementKind::Assign(ref lval, ref rval) => { self.create_move_path(lval); @@ -206,7 +248,7 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { // the exterior. self.create_move_path(&lval.clone().deref()); } - self.gather_rvalue(loc, rval); + self.gather_rvalue(rval); } StatementKind::StorageLive(_) | StatementKind::StorageDead(_) => {} @@ -221,22 +263,22 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { } } - fn gather_rvalue(&mut self, loc: Location, rvalue: &Rvalue<'tcx>) { + fn gather_rvalue(&mut self, rvalue: &Rvalue<'tcx>) { match *rvalue { Rvalue::Use(ref operand) | Rvalue::Repeat(ref operand, _) | Rvalue::Cast(_, ref operand, _) | Rvalue::UnaryOp(_, ref operand) => { - self.gather_operand(loc, operand) + self.gather_operand(operand) } Rvalue::BinaryOp(ref _binop, ref lhs, ref rhs) | Rvalue::CheckedBinaryOp(ref _binop, ref lhs, ref rhs) => { - self.gather_operand(loc, lhs); - self.gather_operand(loc, rhs); + self.gather_operand(lhs); + self.gather_operand(rhs); } Rvalue::Aggregate(ref _kind, ref operands) => { for operand in operands { - self.gather_operand(loc, operand); + self.gather_operand(operand); } } Rvalue::Ref(..) | @@ -258,15 +300,15 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { } } - fn gather_terminator(&mut self, loc: Location, term: &Terminator<'tcx>) { - debug!("gather_terminator({:?}, {:?})", loc, term); + fn gather_terminator(&mut self, term: &Terminator<'tcx>) { match term.kind { TerminatorKind::Goto { target: _ } | TerminatorKind::Resume | + TerminatorKind::GeneratorDrop | TerminatorKind::Unreachable => { } TerminatorKind::Return => { - self.gather_move(loc, &Lvalue::Local(RETURN_POINTER)); + self.gather_move(&Lvalue::Local(RETURN_POINTER)); } TerminatorKind::Assert { .. } | @@ -274,17 +316,21 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { // branching terminators - these don't move anything } + TerminatorKind::Yield { ref value, .. } => { + self.gather_operand(value); + } + TerminatorKind::Drop { ref location, target: _, unwind: _ } => { - self.gather_move(loc, location); + self.gather_move(location); } TerminatorKind::DropAndReplace { ref location, ref value, .. } => { self.create_move_path(location); - self.gather_operand(loc, value); + self.gather_operand(value); } TerminatorKind::Call { ref func, ref args, ref destination, cleanup: _ } => { - self.gather_operand(loc, func); + self.gather_operand(func); for arg in args { - self.gather_operand(loc, arg); + self.gather_operand(arg); } if let Some((ref destination, _bb)) = *destination { self.create_move_path(destination); @@ -293,40 +339,38 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { } } - fn gather_operand(&mut self, loc: Location, operand: &Operand<'tcx>) { + fn gather_operand(&mut self, operand: &Operand<'tcx>) { match *operand { Operand::Constant(..) => {} // not-a-move Operand::Consume(ref lval) => { // a move - self.gather_move(loc, lval); + self.gather_move(lval); } } } - fn gather_move(&mut self, loc: Location, lval: &Lvalue<'tcx>) { - debug!("gather_move({:?}, {:?})", loc, lval); + fn gather_move(&mut self, lval: &Lvalue<'tcx>) { + debug!("gather_move({:?}, {:?})", self.loc, lval); - let lv_ty = lval.ty(self.mir, self.tcx).to_ty(self.tcx); - if !lv_ty.moves_by_default(self.tcx, self.param_env, DUMMY_SP) { - debug!("gather_move({:?}, {:?}) - {:?} is Copy. skipping", loc, lval, lv_ty); + let tcx = self.builder.tcx; + let lv_ty = lval.ty(self.builder.mir, tcx).to_ty(tcx); + if !lv_ty.moves_by_default(tcx, self.builder.param_env, DUMMY_SP) { + debug!("gather_move({:?}, {:?}) - {:?} is Copy. skipping", self.loc, lval, lv_ty); return } let path = match self.move_path_for(lval) { - Ok(path) | Err(MovePathError::UnionMove { path }) => path, - Err(MovePathError::IllegalMove) => { - // Moving out of a bad path. Eventually, this should be a MIR - // borrowck error instead of a bug. - span_bug!(self.mir.span, - "Broken MIR: moving out of lvalue {:?}: {:?} at {:?}", - lval, lv_ty, loc); + Ok(path) | Err(MoveError::UnionMove { path }) => path, + Err(error @ MoveError::IllegalMove { .. }) => { + self.builder.errors.push(error); + return; } }; - let move_out = self.data.moves.push(MoveOut { path: path, source: loc }); + let move_out = self.builder.data.moves.push(MoveOut { path: path, source: self.loc }); debug!("gather_move({:?}, {:?}): adding move {:?} of {:?}", - loc, lval, move_out, path); + self.loc, lval, move_out, path); - self.data.path_map[path].push(move_out); - self.data.loc_map[loc].push(move_out); + self.builder.data.path_map[path].push(move_out); + self.builder.data.loc_map[self.loc].push(move_out); } } diff --git a/src/librustc_mir/dataflow/move_paths/mod.rs b/src/librustc_mir/dataflow/move_paths/mod.rs index d2d8064984..9369156a22 100644 --- a/src/librustc_mir/dataflow/move_paths/mod.rs +++ b/src/librustc_mir/dataflow/move_paths/mod.rs @@ -13,6 +13,7 @@ use rustc::ty::{self, TyCtxt}; use rustc::mir::*; use rustc::util::nodemap::FxHashMap; use rustc_data_structures::indexed_vec::{IndexVec}; +use syntax_pos::{Span}; use std::fmt; use std::ops::{Index, IndexMut}; @@ -227,11 +228,39 @@ impl<'tcx> MovePathLookup<'tcx> { } } +#[derive(Debug)] +pub struct IllegalMoveOrigin<'tcx> { + pub(crate) span: Span, + pub(crate) kind: IllegalMoveOriginKind<'tcx>, +} + +#[derive(Debug)] +pub(crate) enum IllegalMoveOriginKind<'tcx> { + Static, + BorrowedContent, + InteriorOfTypeWithDestructor { container_ty: ty::Ty<'tcx> }, + InteriorOfSlice { elem_ty: ty::Ty<'tcx>, is_index: bool, }, + InteriorOfArray { elem_ty: ty::Ty<'tcx>, is_index: bool, }, +} + +#[derive(Debug)] +pub enum MoveError<'tcx> { + IllegalMove { cannot_move_out_of: IllegalMoveOrigin<'tcx> }, + UnionMove { path: MovePathIndex }, +} + +impl<'tcx> MoveError<'tcx> { + fn cannot_move_out_of(span: Span, kind: IllegalMoveOriginKind<'tcx>) -> Self { + let origin = IllegalMoveOrigin { span, kind }; + MoveError::IllegalMove { cannot_move_out_of: origin } + } +} + impl<'a, 'tcx> MoveData<'tcx> { pub fn gather_moves(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) - -> Self { + -> Result>)> { builder::gather_moves(mir, tcx, param_env) } } diff --git a/src/librustc_mir/diagnostics.rs b/src/librustc_mir/diagnostics.rs index 83a8ce34c6..645af0bff6 100644 --- a/src/librustc_mir/diagnostics.rs +++ b/src/librustc_mir/diagnostics.rs @@ -195,6 +195,40 @@ instead of using a `const fn`, or refactoring the code to a functional style to avoid mutation if possible. "##, +E0133: r##" +Unsafe code was used outside of an unsafe function or block. + +Erroneous code example: + +```compile_fail,E0133 +unsafe fn f() { return; } // This is the unsafe code + +fn main() { + f(); // error: call to unsafe function requires unsafe function or block +} +``` + +Using unsafe functionality is potentially dangerous and disallowed by safety +checks. Examples: + +* Dereferencing raw pointers +* Calling functions via FFI +* Calling functions marked unsafe + +These safety checks can be relaxed for a section of the code by wrapping the +unsafe instructions with an `unsafe` block. For instance: + +``` +unsafe fn f() { return; } + +fn main() { + unsafe { f(); } // ok! +} +``` + +See also https://doc.rust-lang.org/book/first-edition/unsafe.html +"##, + E0381: r##" It is not allowed to use or capture an uninitialized variable. For example: @@ -384,7 +418,7 @@ static B: &'static AtomicUsize = &A; // ok! You can also have this error while using a cell type: ```compile_fail,E0492 -#![feature(const_fn)] +#![feature(const_cell_new)] use std::cell::Cell; @@ -412,7 +446,7 @@ However, if you still wish to use these types, you can achieve this by an unsafe wrapper: ``` -#![feature(const_fn)] +#![feature(const_cell_new)] use std::cell::Cell; use std::marker::Sync; @@ -431,29 +465,6 @@ Remember this solution is unsafe! You will have to ensure that accesses to the cell are synchronized. "##, -E0493: r##" -A type with a destructor was assigned to an invalid type of variable. Erroneous -code example: - -```compile_fail,E0493 -struct Foo { - a: u32 -} - -impl Drop for Foo { - fn drop(&mut self) {} -} - -const F : Foo = Foo { a : 0 }; -// error: constants are not allowed to have destructors -static S : Foo = Foo { a : 0 }; -// error: destructors in statics are an unstable feature -``` - -To solve this issue, please use a type which does allow the usage of type with -destructors. -"##, - E0494: r##" A reference of an interior static was assigned to another const/static. Erroneous code example: @@ -988,10 +999,278 @@ fn print_fancy_ref(fancy_ref: &FancyNum){ ``` "##, +E0507: r##" +You tried to move out of a value which was borrowed. Erroneous code example: + +```compile_fail,E0507 +use std::cell::RefCell; + +struct TheDarkKnight; + +impl TheDarkKnight { + fn nothing_is_true(self) {} +} + +fn main() { + let x = RefCell::new(TheDarkKnight); + + x.borrow().nothing_is_true(); // error: cannot move out of borrowed content +} +``` + +Here, the `nothing_is_true` method takes the ownership of `self`. However, +`self` cannot be moved because `.borrow()` only provides an `&TheDarkKnight`, +which is a borrow of the content owned by the `RefCell`. To fix this error, +you have three choices: + +* Try to avoid moving the variable. +* Somehow reclaim the ownership. +* Implement the `Copy` trait on the type. + +Examples: + +``` +use std::cell::RefCell; + +struct TheDarkKnight; + +impl TheDarkKnight { + fn nothing_is_true(&self) {} // First case, we don't take ownership +} + +fn main() { + let x = RefCell::new(TheDarkKnight); + + x.borrow().nothing_is_true(); // ok! +} +``` + +Or: + +``` +use std::cell::RefCell; + +struct TheDarkKnight; + +impl TheDarkKnight { + fn nothing_is_true(self) {} +} + +fn main() { + let x = RefCell::new(TheDarkKnight); + let x = x.into_inner(); // we get back ownership + + x.nothing_is_true(); // ok! +} +``` + +Or: + +``` +use std::cell::RefCell; + +#[derive(Clone, Copy)] // we implement the Copy trait +struct TheDarkKnight; + +impl TheDarkKnight { + fn nothing_is_true(self) {} +} + +fn main() { + let x = RefCell::new(TheDarkKnight); + + x.borrow().nothing_is_true(); // ok! +} +``` + +Moving a member out of a mutably borrowed struct will also cause E0507 error: + +```compile_fail,E0507 +struct TheDarkKnight; + +impl TheDarkKnight { + fn nothing_is_true(self) {} +} + +struct Batcave { + knight: TheDarkKnight +} + +fn main() { + let mut cave = Batcave { + knight: TheDarkKnight + }; + let borrowed = &mut cave; + + borrowed.knight.nothing_is_true(); // E0507 +} +``` + +It is fine only if you put something back. `mem::replace` can be used for that: + +``` +# struct TheDarkKnight; +# impl TheDarkKnight { fn nothing_is_true(self) {} } +# struct Batcave { knight: TheDarkKnight } +use std::mem; + +let mut cave = Batcave { + knight: TheDarkKnight +}; +let borrowed = &mut cave; + +mem::replace(&mut borrowed.knight, TheDarkKnight).nothing_is_true(); // ok! +``` + +You can find more information about borrowing in the rust-book: +http://doc.rust-lang.org/book/first-edition/references-and-borrowing.html +"##, + +E0508: r##" +A value was moved out of a non-copy fixed-size array. + +Example of erroneous code: + +```compile_fail,E0508 +struct NonCopy; + +fn main() { + let array = [NonCopy; 1]; + let _value = array[0]; // error: cannot move out of type `[NonCopy; 1]`, + // a non-copy fixed-size array +} +``` + +The first element was moved out of the array, but this is not +possible because `NonCopy` does not implement the `Copy` trait. + +Consider borrowing the element instead of moving it: + +``` +struct NonCopy; + +fn main() { + let array = [NonCopy; 1]; + let _value = &array[0]; // Borrowing is allowed, unlike moving. +} +``` + +Alternatively, if your type implements `Clone` and you need to own the value, +consider borrowing and then cloning: + +``` +#[derive(Clone)] +struct NonCopy; + +fn main() { + let array = [NonCopy; 1]; + // Now you can clone the array element. + let _value = array[0].clone(); +} +``` +"##, + +E0509: r##" +This error occurs when an attempt is made to move out of a value whose type +implements the `Drop` trait. + +Example of erroneous code: + +```compile_fail,E0509 +struct FancyNum { + num: usize +} + +struct DropStruct { + fancy: FancyNum +} + +impl Drop for DropStruct { + fn drop(&mut self) { + // Destruct DropStruct, possibly using FancyNum + } +} + +fn main() { + let drop_struct = DropStruct{fancy: FancyNum{num: 5}}; + let fancy_field = drop_struct.fancy; // Error E0509 + println!("Fancy: {}", fancy_field.num); + // implicit call to `drop_struct.drop()` as drop_struct goes out of scope +} +``` + +Here, we tried to move a field out of a struct of type `DropStruct` which +implements the `Drop` trait. However, a struct cannot be dropped if one or +more of its fields have been moved. + +Structs implementing the `Drop` trait have an implicit destructor that gets +called when they go out of scope. This destructor may use the fields of the +struct, so moving out of the struct could make it impossible to run the +destructor. Therefore, we must think of all values whose type implements the +`Drop` trait as single units whose fields cannot be moved. + +This error can be fixed by creating a reference to the fields of a struct, +enum, or tuple using the `ref` keyword: + +``` +struct FancyNum { + num: usize +} + +struct DropStruct { + fancy: FancyNum +} + +impl Drop for DropStruct { + fn drop(&mut self) { + // Destruct DropStruct, possibly using FancyNum + } +} + +fn main() { + let drop_struct = DropStruct{fancy: FancyNum{num: 5}}; + let ref fancy_field = drop_struct.fancy; // No more errors! + println!("Fancy: {}", fancy_field.num); + // implicit call to `drop_struct.drop()` as drop_struct goes out of scope +} +``` + +Note that this technique can also be used in the arms of a match expression: + +``` +struct FancyNum { + num: usize +} + +enum DropEnum { + Fancy(FancyNum) +} + +impl Drop for DropEnum { + fn drop(&mut self) { + // Destruct DropEnum, possibly using FancyNum + } +} + +fn main() { + // Creates and enum of type `DropEnum`, which implements `Drop` + let drop_enum = DropEnum::Fancy(FancyNum{num: 10}); + match drop_enum { + // Creates a reference to the inside of `DropEnum::Fancy` + DropEnum::Fancy(ref fancy_field) => // No error! + println!("It was fancy-- {}!", fancy_field.num), + } + // implicit call to `drop_enum.drop()` as drop_enum goes out of scope +} +``` +"##, + } register_diagnostics! { + E0493, // destructors cannot be evaluated at compile-time E0524, // two closures require unique access to `..` at the same time E0526, // shuffle indices are not constant + E0594, // cannot assign to {} E0625, // thread-local statics cannot be accessed at compile-time } diff --git a/src/librustc_mir/hair/cx/block.rs b/src/librustc_mir/hair/cx/block.rs index 61d128fc84..6f6258f52f 100644 --- a/src/librustc_mir/hair/cx/block.rs +++ b/src/librustc_mir/hair/cx/block.rs @@ -11,9 +11,10 @@ use hair::*; use hair::cx::Cx; use hair::cx::to_ref::ToRef; -use rustc::middle::region::{BlockRemainder, CodeExtent}; +use rustc::middle::region::{self, BlockRemainder}; use rustc::hir; -use syntax::ast; + +use rustc_data_structures::indexed_vec::Idx; impl<'tcx> Mirror<'tcx> for &'tcx hir::Block { type Output = Block<'tcx>; @@ -21,62 +22,70 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Block { fn make_mirror<'a, 'gcx>(self, cx: &mut Cx<'a, 'gcx, 'tcx>) -> Block<'tcx> { // We have to eagerly translate the "spine" of the statements // in order to get the lexical scoping correctly. - let stmts = mirror_stmts(cx, self.id, &*self.stmts); - let opt_destruction_extent = cx.region_maps.opt_destruction_extent(self.id); + let stmts = mirror_stmts(cx, self.hir_id.local_id, &*self.stmts); + let opt_destruction_scope = + cx.region_scope_tree.opt_destruction_scope(self.hir_id.local_id); Block { targeted_by_break: self.targeted_by_break, - extent: CodeExtent::Misc(self.id), - opt_destruction_extent, + region_scope: region::Scope::Node(self.hir_id.local_id), + opt_destruction_scope, span: self.span, stmts, expr: self.expr.to_ref(), + safety_mode: match self.rules { + hir::BlockCheckMode::DefaultBlock => + BlockSafety::Safe, + hir::BlockCheckMode::UnsafeBlock(..) => + BlockSafety::ExplicitUnsafe(self.id), + hir::BlockCheckMode::PushUnsafeBlock(..) => + BlockSafety::PushUnsafe, + hir::BlockCheckMode::PopUnsafeBlock(..) => + BlockSafety::PopUnsafe + }, } } } fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, - block_id: ast::NodeId, + block_id: hir::ItemLocalId, stmts: &'tcx [hir::Stmt]) -> Vec> { let mut result = vec![]; for (index, stmt) in stmts.iter().enumerate() { - let opt_dxn_ext = cx.region_maps.opt_destruction_extent(stmt.node.id()); + let hir_id = cx.tcx.hir.node_to_hir_id(stmt.node.id()); + let opt_dxn_ext = cx.region_scope_tree.opt_destruction_scope(hir_id.local_id); match stmt.node { - hir::StmtExpr(ref expr, id) | - hir::StmtSemi(ref expr, id) => { + hir::StmtExpr(ref expr, _) | + hir::StmtSemi(ref expr, _) => { result.push(StmtRef::Mirror(Box::new(Stmt { - span: stmt.span, kind: StmtKind::Expr { - scope: CodeExtent::Misc(id), + scope: region::Scope::Node(hir_id.local_id), expr: expr.to_ref(), }, - opt_destruction_extent: opt_dxn_ext, + opt_destruction_scope: opt_dxn_ext, }))) } - hir::StmtDecl(ref decl, id) => { + hir::StmtDecl(ref decl, _) => { match decl.node { hir::DeclItem(..) => { // ignore for purposes of the MIR } hir::DeclLocal(ref local) => { - let remainder_extent = CodeExtent::Remainder(BlockRemainder { + let remainder_scope = region::Scope::Remainder(BlockRemainder { block: block_id, - first_statement_index: index as u32, + first_statement_index: region::FirstStatementIndex::new(index), }); - let pattern = Pattern::from_hir(cx.tcx.global_tcx(), - cx.param_env.and(cx.identity_substs), - cx.tables(), - &local.pat); + let pattern = cx.pattern_from_hir(&local.pat); result.push(StmtRef::Mirror(Box::new(Stmt { - span: stmt.span, kind: StmtKind::Let { - remainder_scope: remainder_extent, - init_scope: CodeExtent::Misc(id), + remainder_scope: remainder_scope, + init_scope: region::Scope::Node(hir_id.local_id), pattern, initializer: local.init.to_ref(), + lint_level: cx.lint_level_of(local.id), }, - opt_destruction_extent: opt_dxn_ext, + opt_destruction_scope: opt_dxn_ext, }))); } } @@ -90,7 +99,7 @@ pub fn to_expr_ref<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, block: &'tcx hir::Block) -> ExprRef<'tcx> { let block_ty = cx.tables().node_id_to_type(block.hir_id); - let temp_lifetime = cx.region_maps.temporary_scope(block.id); + let temp_lifetime = cx.region_scope_tree.temporary_scope(block.hir_id.local_id); let expr = Expr { ty: block_ty, temp_lifetime, diff --git a/src/librustc_mir/hair/cx/expr.rs b/src/librustc_mir/hair/cx/expr.rs index 944fb8e833..f5a53e2aa8 100644 --- a/src/librustc_mir/hair/cx/expr.rs +++ b/src/librustc_mir/hair/cx/expr.rs @@ -25,8 +25,8 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { type Output = Expr<'tcx>; fn make_mirror<'a, 'gcx>(self, cx: &mut Cx<'a, 'gcx, 'tcx>) -> Expr<'tcx> { - let temp_lifetime = cx.region_maps.temporary_scope(self.id); - let expr_extent = CodeExtent::Misc(self.id); + let temp_lifetime = cx.region_scope_tree.temporary_scope(self.hir_id.local_id); + let expr_scope = region::Scope::Node(self.hir_id.local_id); debug!("Expr::make_mirror(): id={}, span={:?}", self.id, self.span); @@ -46,23 +46,26 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { ty: expr.ty, span: self.span, kind: ExprKind::Scope { - extent: expr_extent, + region_scope: expr_scope, value: expr.to_ref(), + lint_level: cx.lint_level_of(self.id), }, }; // Finally, create a destruction scope, if any. - if let Some(extent) = cx.region_maps.opt_destruction_extent(self.id) { - expr = Expr { - temp_lifetime, - ty: expr.ty, - span: self.span, - kind: ExprKind::Scope { - extent, - value: expr.to_ref(), - }, - }; - } + if let Some(region_scope) = + cx.region_scope_tree.opt_destruction_scope(self.hir_id.local_id) { + expr = Expr { + temp_lifetime, + ty: expr.ty, + span: self.span, + kind: ExprKind::Scope { + region_scope, + value: expr.to_ref(), + lint_level: LintLevel::Inherited, + }, + }; + } // OK, all done! expr @@ -125,7 +128,7 @@ fn apply_adjustment<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // Convert this to a suitable `&foo` and // then an unsafe coercion. Limit the region to be just this // expression. - let region = ty::ReScope(CodeExtent::Misc(hir_expr.id)); + let region = ty::ReScope(region::Scope::Node(hir_expr.hir_id.local_id)); let region = cx.tcx.mk_region(region); expr = Expr { temp_lifetime, @@ -160,7 +163,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, expr: &'tcx hir::Expr) -> Expr<'tcx> { let expr_ty = cx.tables().expr_ty(expr); - let temp_lifetime = cx.region_maps.temporary_scope(expr.id); + let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id); let kind = match expr.node { // Here comes the interesting stuff: @@ -432,8 +435,9 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, hir::ExprClosure(..) => { let closure_ty = cx.tables().expr_ty(expr); - let (def_id, substs) = match closure_ty.sty { - ty::TyClosure(def_id, substs) => (def_id, substs), + let (def_id, substs, interior) = match closure_ty.sty { + ty::TyClosure(def_id, substs) => (def_id, substs, None), + ty::TyGenerator(def_id, substs, interior) => (def_id, substs, Some(interior)), _ => { span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty); } @@ -448,6 +452,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, closure_id: def_id, substs, upvars, + interior, } } @@ -470,7 +475,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let def_id = cx.tcx.hir.body_owner_def_id(count); let substs = Substs::identity_for_item(cx.tcx.global_tcx(), def_id); let count = match cx.tcx.at(c.span).const_eval(cx.param_env.and((def_id, substs))) { - Ok(ConstVal::Integral(ConstInt::Usize(u))) => u, + Ok(&ty::Const { val: ConstVal::Integral(ConstInt::Usize(u)), .. }) => u, Ok(other) => bug!("constant evaluation of repeat count yielded {:?}", other), Err(s) => cx.fatal_const_eval_err(&s, c.span, "expression") }; @@ -485,7 +490,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, match dest.target_id { hir::ScopeTarget::Block(target_id) | hir::ScopeTarget::Loop(hir::LoopIdResult::Ok(target_id)) => ExprKind::Break { - label: CodeExtent::Misc(target_id), + label: region::Scope::Node(cx.tcx.hir.node_to_hir_id(target_id).local_id), value: value.to_ref(), }, hir::ScopeTarget::Loop(hir::LoopIdResult::Err(err)) => @@ -496,7 +501,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, match dest.target_id { hir::ScopeTarget::Block(_) => bug!("cannot continue to blocks"), hir::ScopeTarget::Loop(hir::LoopIdResult::Ok(loop_id)) => ExprKind::Continue { - label: CodeExtent::Misc(loop_id), + label: region::Scope::Node(cx.tcx.hir.node_to_hir_id(loop_id).local_id), }, hir::ScopeTarget::Loop(hir::LoopIdResult::Err(err)) => bug!("invalid loop id for continue: {}", err) @@ -567,6 +572,8 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } hir::ExprArray(ref fields) => ExprKind::Array { fields: fields.to_ref() }, hir::ExprTup(ref fields) => ExprKind::Tuple { fields: fields.to_ref() }, + + hir::ExprYield(ref v) => ExprKind::Yield { value: v.to_ref() }, }; Expr { @@ -581,18 +588,22 @@ fn method_callee<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, expr: &hir::Expr, custom_callee: Option<(DefId, &'tcx Substs<'tcx>)>) -> Expr<'tcx> { - let temp_lifetime = cx.region_maps.temporary_scope(expr.id); + let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id); let (def_id, substs) = custom_callee.unwrap_or_else(|| { (cx.tables().type_dependent_defs()[expr.hir_id].def_id(), cx.tables().node_substs(expr.hir_id)) }); + let ty = cx.tcx().mk_fn_def(def_id, substs); Expr { temp_lifetime, - ty: cx.tcx().mk_fn_def(def_id, substs), + ty, span: expr.span, kind: ExprKind::Literal { literal: Literal::Value { - value: ConstVal::Function(def_id, substs), + value: cx.tcx.mk_const(ty::Const { + val: ConstVal::Function(def_id, substs), + ty + }), }, }, } @@ -607,14 +618,11 @@ fn to_borrow_kind(m: hir::Mutability) -> BorrowKind { fn convert_arm<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, arm: &'tcx hir::Arm) -> Arm<'tcx> { Arm { - patterns: arm.pats.iter().map(|p| { - Pattern::from_hir(cx.tcx.global_tcx(), - cx.param_env.and(cx.identity_substs), - cx.tables(), - p) - }).collect(), + patterns: arm.pats.iter().map(|p| cx.pattern_from_hir(p)).collect(), guard: arm.guard.to_ref(), body: arm.body.to_ref(), + // BUG: fix this + lint_level: LintLevel::Inherited, } } @@ -630,15 +638,20 @@ fn convert_path_expr<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, Def::StructCtor(def_id, CtorKind::Fn) | Def::VariantCtor(def_id, CtorKind::Fn) => ExprKind::Literal { literal: Literal::Value { - value: ConstVal::Function(def_id, substs), + value: cx.tcx.mk_const(ty::Const { + val: ConstVal::Function(def_id, substs), + ty: cx.tables().node_id_to_type(expr.hir_id) + }), }, }, Def::Const(def_id) | Def::AssociatedConst(def_id) => ExprKind::Literal { - literal: Literal::Item { - def_id, - substs, + literal: Literal::Value { + value: cx.tcx.mk_const(ty::Const { + val: ConstVal::Unevaluated(def_id, substs), + ty: cx.tables().node_id_to_type(expr.hir_id) + }), }, }, @@ -672,22 +685,18 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, expr: &'tcx hir::Expr, def: Def) -> ExprKind<'tcx> { - let temp_lifetime = cx.region_maps.temporary_scope(expr.id); + let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id); match def { - Def::Local(def_id) => { - let node_id = cx.tcx.hir.as_local_node_id(def_id).unwrap(); - ExprKind::VarRef { id: node_id } - } + Def::Local(id) => ExprKind::VarRef { id }, - Def::Upvar(var_def_id, index, closure_expr_id) => { - let id_var = cx.tcx.hir.as_local_node_id(var_def_id).unwrap(); + Def::Upvar(var_id, index, closure_expr_id) => { debug!("convert_var(upvar({:?}, {:?}, {:?}))", - id_var, + var_id, index, closure_expr_id); - let var_ty = cx.tables() - .node_id_to_type(cx.tcx.hir.node_to_hir_id(id_var)); + let var_hir_id = cx.tcx.hir.node_to_hir_id(var_id); + let var_ty = cx.tables().node_id_to_type(var_hir_id); // FIXME free regions in closures are not right let closure_ty = cx.tables() @@ -703,56 +712,65 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, }); let region = cx.tcx.mk_region(region); - let self_expr = match cx.tcx.closure_kind(closure_def_id) { - ty::ClosureKind::Fn => { - let ref_closure_ty = cx.tcx.mk_ref(region, - ty::TypeAndMut { - ty: closure_ty, - mutbl: hir::MutImmutable, - }); - Expr { - ty: closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::Deref { - arg: Expr { - ty: ref_closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::SelfRef, - } - .to_ref(), - }, + let self_expr = if let ty::TyClosure(..) = closure_ty.sty { + match cx.tcx.closure_kind(closure_def_id) { + ty::ClosureKind::Fn => { + let ref_closure_ty = cx.tcx.mk_ref(region, + ty::TypeAndMut { + ty: closure_ty, + mutbl: hir::MutImmutable, + }); + Expr { + ty: closure_ty, + temp_lifetime: temp_lifetime, + span: expr.span, + kind: ExprKind::Deref { + arg: Expr { + ty: ref_closure_ty, + temp_lifetime, + span: expr.span, + kind: ExprKind::SelfRef, + } + .to_ref(), + }, + } } - } - ty::ClosureKind::FnMut => { - let ref_closure_ty = cx.tcx.mk_ref(region, - ty::TypeAndMut { - ty: closure_ty, - mutbl: hir::MutMutable, - }); - Expr { - ty: closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::Deref { - arg: Expr { - ty: ref_closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::SelfRef, - }.to_ref(), - }, + ty::ClosureKind::FnMut => { + let ref_closure_ty = cx.tcx.mk_ref(region, + ty::TypeAndMut { + ty: closure_ty, + mutbl: hir::MutMutable, + }); + Expr { + ty: closure_ty, + temp_lifetime, + span: expr.span, + kind: ExprKind::Deref { + arg: Expr { + ty: ref_closure_ty, + temp_lifetime, + span: expr.span, + kind: ExprKind::SelfRef, + }.to_ref(), + }, + } } - } - ty::ClosureKind::FnOnce => { - Expr { - ty: closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::SelfRef, + ty::ClosureKind::FnOnce => { + Expr { + ty: closure_ty, + temp_lifetime, + span: expr.span, + kind: ExprKind::SelfRef, + } } } + } else { + Expr { + ty: closure_ty, + temp_lifetime, + span: expr.span, + kind: ExprKind::SelfRef, + } }; // at this point we have `self.n`, which loads up the upvar @@ -764,7 +782,7 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // ...but the upvar might be an `&T` or `&mut T` capture, at which // point we need an implicit deref let upvar_id = ty::UpvarId { - var_id: var_def_id.index, + var_id: var_hir_id, closure_expr_id: closure_def_id.index, }; match cx.tables().upvar_capture(upvar_id) { @@ -854,7 +872,7 @@ fn overloaded_lvalue<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // construct the complete expression `foo()` for the overloaded call, // which will yield the &T type - let temp_lifetime = cx.region_maps.temporary_scope(expr.id); + let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id); let fun = method_callee(cx, expr, custom_callee); let ref_expr = Expr { temp_lifetime, @@ -876,16 +894,14 @@ fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, freevar: &hir::Freevar, freevar_ty: Ty<'tcx>) -> ExprRef<'tcx> { - let var_def_id = freevar.def.def_id(); - let var_node_id = cx.tcx.hir.as_local_node_id(var_def_id).unwrap(); + let var_hir_id = cx.tcx.hir.node_to_hir_id(freevar.var_id()); let upvar_id = ty::UpvarId { - var_id: var_def_id.index, + var_id: var_hir_id, closure_expr_id: cx.tcx.hir.local_def_id(closure_expr.id).index, }; let upvar_capture = cx.tables().upvar_capture(upvar_id); - let temp_lifetime = cx.region_maps.temporary_scope(closure_expr.id); - let var_ty = cx.tables() - .node_id_to_type(cx.tcx.hir.node_to_hir_id(var_node_id)); + let temp_lifetime = cx.region_scope_tree.temporary_scope(closure_expr.hir_id.local_id); + let var_ty = cx.tables().node_id_to_type(var_hir_id); let captured_var = Expr { temp_lifetime, ty: var_ty, diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index 2f4ab36d39..4434df0ac3 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -20,13 +20,14 @@ use rustc::mir::transform::MirSource; use rustc::middle::const_val::{ConstEvalErr, ConstVal}; use rustc_const_eval::ConstContext; use rustc_data_structures::indexed_vec::Idx; -use rustc::hir::def_id::DefId; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::hir::map::blocks::FnLikeNode; -use rustc::middle::region::RegionMaps; +use rustc::middle::region; use rustc::infer::InferCtxt; use rustc::ty::subst::Subst; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::Substs; +use syntax::ast; use syntax::symbol::Symbol; use rustc::hir; use rustc_const_math::{ConstInt, ConstUsize}; @@ -37,12 +38,13 @@ pub struct Cx<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + pub root_lint_level: ast::NodeId, pub param_env: ty::ParamEnv<'gcx>, /// Identity `Substs` for use with const-evaluation. pub identity_substs: &'gcx Substs<'gcx>, - pub region_maps: Rc, + pub region_scope_tree: Rc, pub tables: &'a ty::TypeckTables<'gcx>, /// This is `Constness::Const` if we are compiling a `static`, @@ -57,10 +59,12 @@ pub struct Cx<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { } impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { - pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, src: MirSource) -> Cx<'a, 'gcx, 'tcx> { + pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + src: MirSource) -> Cx<'a, 'gcx, 'tcx> { let constness = match src { MirSource::Const(_) | MirSource::Static(..) => hir::Constness::Const, + MirSource::GeneratorDrop(..) => hir::Constness::NotConst, MirSource::Fn(id) => { let fn_like = FnLikeNode::from_node(infcx.tcx.hir.get(id)); fn_like.map_or(hir::Constness::NotConst, |f| f.constness()) @@ -86,18 +90,21 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { // Constants and const fn's always need overflow checks. check_overflow |= constness == hir::Constness::Const; + let lint_level = lint_level_for_hir_id(tcx, src_id); Cx { tcx, infcx, + root_lint_level: lint_level, param_env: tcx.param_env(src_def_id), identity_substs: Substs::identity_for_item(tcx.global_tcx(), src_def_id), - region_maps: tcx.region_maps(src_def_id), + region_scope_tree: tcx.region_scope_tree(src_def_id), tables: tcx.typeck_tables_of(src_def_id), constness, src, check_overflow, } } + } impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { @@ -111,8 +118,15 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { } pub fn usize_literal(&mut self, value: u64) -> Literal<'tcx> { - match ConstUsize::new(value, self.tcx.sess.target.uint_type) { - Ok(val) => Literal::Value { value: ConstVal::Integral(ConstInt::Usize(val)) }, + match ConstUsize::new(value, self.tcx.sess.target.usize_ty) { + Ok(val) => { + Literal::Value { + value: self.tcx.mk_const(ty::Const { + val: ConstVal::Integral(ConstInt::Usize(val)), + ty: self.tcx.types.usize + }) + } + } Err(_) => bug!("usize literal out of range for target"), } } @@ -126,11 +140,21 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { } pub fn true_literal(&mut self) -> Literal<'tcx> { - Literal::Value { value: ConstVal::Bool(true) } + Literal::Value { + value: self.tcx.mk_const(ty::Const { + val: ConstVal::Bool(true), + ty: self.tcx.types.bool + }) + } } pub fn false_literal(&mut self) -> Literal<'tcx> { - Literal::Value { value: ConstVal::Bool(false) } + Literal::Value { + value: self.tcx.mk_const(ty::Const { + val: ConstVal::Bool(false), + ty: self.tcx.types.bool + }) + } } pub fn const_eval_literal(&mut self, e: &hir::Expr) -> Literal<'tcx> { @@ -138,12 +162,24 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { let const_cx = ConstContext::new(tcx, self.param_env.and(self.identity_substs), self.tables()); - match const_cx.eval(e) { - Ok(value) => Literal::Value { value: value }, + match const_cx.eval(tcx.hir.expect_expr(e.id)) { + Ok(value) => Literal::Value { value }, Err(s) => self.fatal_const_eval_err(&s, e.span, "expression") } } + pub fn pattern_from_hir(&mut self, p: &hir::Pat) -> Pattern<'tcx> { + let tcx = self.tcx.global_tcx(); + let p = match tcx.hir.get(p.id) { + hir::map::NodePat(p) | hir::map::NodeBinding(p) => p, + node => bug!("pattern became {:?}", node) + }; + Pattern::from_hir(tcx, + self.param_env.and(self.identity_substs), + self.tables(), + p) + } + pub fn fatal_const_eval_err(&mut self, err: &ConstEvalErr<'tcx>, primary_span: Span, @@ -169,7 +205,10 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { let method_ty = method_ty.subst(self.tcx, substs); return (method_ty, Literal::Value { - value: ConstVal::Function(item.def_id, substs), + value: self.tcx.mk_const(ty::Const { + val: ConstVal::Function(item.def_id, substs), + ty: method_ty + }), }); } } @@ -196,6 +235,19 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { ty.needs_drop(self.tcx.global_tcx(), param_env) } + fn lint_level_of(&self, node_id: ast::NodeId) -> LintLevel { + let hir_id = self.tcx.hir.definitions().node_to_hir_id(node_id); + let has_lint_level = self.tcx.dep_graph.with_ignore(|| { + self.tcx.lint_levels(LOCAL_CRATE).lint_level_set(hir_id).is_some() + }); + + if has_lint_level { + LintLevel::Explicit(node_id) + } else { + LintLevel::Inherited + } + } + pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.tcx } @@ -209,6 +261,31 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { } } +fn lint_level_for_hir_id(tcx: TyCtxt, mut id: ast::NodeId) -> ast::NodeId { + // Right now we insert a `with_ignore` node in the dep graph here to + // ignore the fact that `lint_levels` below depends on the entire crate. + // For now this'll prevent false positives of recompiling too much when + // anything changes. + // + // Once red/green incremental compilation lands we should be able to + // remove this because while the crate changes often the lint level map + // will change rarely. + tcx.dep_graph.with_ignore(|| { + let sets = tcx.lint_levels(LOCAL_CRATE); + loop { + let hir_id = tcx.hir.definitions().node_to_hir_id(id); + if sets.lint_level_set(hir_id).is_some() { + return id + } + let next = tcx.hir.get_parent_node(id); + if next == id { + bug!("lint traversal reached the root of the crate"); + } + id = next; + } + }) +} + mod block; mod expr; mod to_ref; diff --git a/src/librustc_mir/hair/mod.rs b/src/librustc_mir/hair/mod.rs index 01faa61d09..09a31f9ab8 100644 --- a/src/librustc_mir/hair/mod.rs +++ b/src/librustc_mir/hair/mod.rs @@ -17,9 +17,9 @@ use rustc_const_math::ConstUsize; use rustc::mir::{BinOp, BorrowKind, Field, Literal, UnOp}; use rustc::hir::def_id::DefId; -use rustc::middle::region::CodeExtent; +use rustc::middle::region; use rustc::ty::subst::Substs; -use rustc::ty::{self, AdtDef, ClosureSubsts, Region, Ty}; +use rustc::ty::{AdtDef, ClosureSubsts, Region, Ty, GeneratorInterior}; use rustc::hir; use syntax::ast; use syntax_pos::Span; @@ -29,14 +29,38 @@ pub mod cx; pub use rustc_const_eval::pattern::{BindingMode, Pattern, PatternKind, FieldPattern}; +#[derive(Copy, Clone, Debug)] +pub enum LintLevel { + Inherited, + Explicit(ast::NodeId) +} + +impl LintLevel { + pub fn is_explicit(self) -> bool { + match self { + LintLevel::Inherited => false, + LintLevel::Explicit(_) => true + } + } +} + #[derive(Clone, Debug)] pub struct Block<'tcx> { pub targeted_by_break: bool, - pub extent: CodeExtent, - pub opt_destruction_extent: Option, + pub region_scope: region::Scope, + pub opt_destruction_scope: Option, pub span: Span, pub stmts: Vec>, pub expr: Option>, + pub safety_mode: BlockSafety, +} + +#[derive(Copy, Clone, Debug)] +pub enum BlockSafety { + Safe, + ExplicitUnsafe(ast::NodeId), + PushUnsafe, + PopUnsafe } #[derive(Clone, Debug)] @@ -46,16 +70,15 @@ pub enum StmtRef<'tcx> { #[derive(Clone, Debug)] pub struct Stmt<'tcx> { - pub span: Span, pub kind: StmtKind<'tcx>, - pub opt_destruction_extent: Option, + pub opt_destruction_scope: Option, } #[derive(Clone, Debug)] pub enum StmtKind<'tcx> { Expr { /// scope for this statement; may be used as lifetime of temporaries - scope: CodeExtent, + scope: region::Scope, /// expression being evaluated in this statement expr: ExprRef<'tcx>, @@ -64,17 +87,20 @@ pub enum StmtKind<'tcx> { Let { /// scope for variables bound in this let; covers this and /// remaining statements in block - remainder_scope: CodeExtent, + remainder_scope: region::Scope, /// scope for the initialization itself; might be used as /// lifetime of temporaries - init_scope: CodeExtent, + init_scope: region::Scope, /// let = ... pattern: Pattern<'tcx>, /// let pat = ... - initializer: Option> + initializer: Option>, + + /// the lint level for this let-statement + lint_level: LintLevel, }, } @@ -99,7 +125,7 @@ pub struct Expr<'tcx> { /// lifetime of this expression if it should be spilled into a /// temporary; should be None only if in a constant context - pub temp_lifetime: Option, + pub temp_lifetime: Option, /// span of the expression in the source pub span: Span, @@ -111,14 +137,15 @@ pub struct Expr<'tcx> { #[derive(Clone, Debug)] pub enum ExprKind<'tcx> { Scope { - extent: CodeExtent, + region_scope: region::Scope, + lint_level: LintLevel, value: ExprRef<'tcx>, }, Box { value: ExprRef<'tcx>, }, Call { - ty: ty::Ty<'tcx>, + ty: Ty<'tcx>, fun: ExprRef<'tcx>, args: Vec>, }, @@ -208,11 +235,11 @@ pub enum ExprKind<'tcx> { arg: ExprRef<'tcx>, }, Break { - label: CodeExtent, + label: region::Scope, value: Option>, }, Continue { - label: CodeExtent, + label: region::Scope, }, Return { value: Option>, @@ -238,6 +265,7 @@ pub enum ExprKind<'tcx> { closure_id: DefId, substs: ClosureSubsts<'tcx>, upvars: Vec>, + interior: Option>, }, Literal { literal: Literal<'tcx>, @@ -247,6 +275,9 @@ pub enum ExprKind<'tcx> { outputs: Vec>, inputs: Vec> }, + Yield { + value: ExprRef<'tcx>, + }, } #[derive(Clone, Debug)] @@ -272,6 +303,7 @@ pub struct Arm<'tcx> { pub patterns: Vec>, pub guard: Option>, pub body: ExprRef<'tcx>, + pub lint_level: LintLevel, } #[derive(Copy, Clone, Debug)] diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index dba625e98f..7e4206e14c 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -18,22 +18,24 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(box_patterns)] #![feature(box_syntax)] +#![feature(const_fn)] +#![feature(core_intrinsics)] #![feature(i128_type)] #![feature(rustc_diagnostic_macros)] #![feature(placement_in_syntax)] #![feature(collection_placement)] #![feature(nonzero)] +#[macro_use] +extern crate bitflags; #[macro_use] extern crate log; extern crate graphviz as dot; #[macro_use] extern crate rustc; -extern crate rustc_data_structures; +#[macro_use] extern crate rustc_data_structures; +extern crate serialize as rustc_serialize; extern crate rustc_errors; #[macro_use] -#[no_link] -extern crate rustc_bitflags; -#[macro_use] extern crate syntax; extern crate syntax_pos; extern crate rustc_const_math; diff --git a/src/librustc_mir/shim.rs b/src/librustc_mir/shim.rs index 6bee1ceff8..a3a986918a 100644 --- a/src/librustc_mir/shim.rs +++ b/src/librustc_mir/shim.rs @@ -14,7 +14,7 @@ use rustc::infer; use rustc::middle::const_val::ConstVal; use rustc::mir::*; use rustc::mir::transform::MirSource; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::ty::maps::Providers; use rustc_const_math::{ConstInt, ConstUsize}; @@ -36,7 +36,7 @@ pub fn provide(providers: &mut Providers) { providers.mir_shims = make_shim; } -fn make_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, +fn make_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: ty::InstanceDef<'tcx>) -> &'tcx Mir<'tcx> { @@ -47,7 +47,7 @@ fn make_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, bug!("item {:?} passed to make_shim", instance), ty::InstanceDef::FnPtrShim(def_id, ty) => { let trait_ = tcx.trait_of_item(def_id).unwrap(); - let adjustment = match tcx.lang_items.fn_trait_kind(trait_) { + let adjustment = match tcx.lang_items().fn_trait_kind(trait_) { Some(ty::ClosureKind::FnOnce) => Adjustment::Identity, Some(ty::ClosureKind::FnMut) | Some(ty::ClosureKind::Fn) => Adjustment::Deref, @@ -82,7 +82,7 @@ fn make_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, ) } ty::InstanceDef::ClosureOnceShim { call_once } => { - let fn_mut = tcx.lang_items.fn_mut_trait().unwrap(); + let fn_mut = tcx.lang_items().fn_mut_trait().unwrap(); let call_mut = tcx.global_tcx() .associated_items(fn_mut) .find(|it| it.kind == ty::AssociatedKind::Method) @@ -100,7 +100,7 @@ fn make_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, build_drop_shim(tcx, def_id, ty) } ty::InstanceDef::CloneShim(def_id, ty) => { - let name = tcx.item_name(def_id).as_str(); + let name = tcx.item_name(def_id); if name == "clone" { build_clone_shim(tcx, def_id, ty) } else if name == "clone_from" { @@ -140,6 +140,8 @@ fn temp_decl(mutability: Mutability, ty: Ty, span: Span) -> LocalDecl { LocalDecl { mutability, ty, name: None, source_info: SourceInfo { scope: ARGUMENT_VISIBILITY_SCOPE, span }, + lexical_scope: ARGUMENT_VISIBILITY_SCOPE, + internal: false, is_user_variable: false } } @@ -153,13 +155,19 @@ fn local_decls_for_sig<'tcx>(sig: &ty::FnSig<'tcx>, span: Span) .collect() } -fn build_drop_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, +fn build_drop_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, ty: Option>) -> Mir<'tcx> { debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty); + // Check if this is a generator, if so, return the drop glue for it + if let Some(&ty::TyS { sty: ty::TyGenerator(gen_def_id, substs, _), .. }) = ty { + let mir = &**tcx.optimized_mir(gen_def_id).generator_drop.as_ref().unwrap(); + return mir.subst(tcx, substs.substs); + } + let substs = if let Some(ty) = ty { tcx.mk_substs(iter::once(Kind::from(ty))) } else { @@ -188,8 +196,10 @@ fn build_drop_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, IndexVec::from_elem_n( VisibilityScopeData { span: span, parent_scope: None }, 1 ), + ClearOnDecode::Clear, IndexVec::new(), sig.output(), + None, local_decls_for_sig(&sig, span), sig.inputs().len(), vec![], @@ -225,10 +235,10 @@ fn build_drop_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, } pub struct DropShimElaborator<'a, 'tcx: 'a> { - mir: &'a Mir<'tcx>, - patch: MirPatch<'tcx>, - tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, + pub mir: &'a Mir<'tcx>, + pub patch: MirPatch<'tcx>, + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + pub param_env: ty::ParamEnv<'tcx>, } impl<'a, 'tcx> fmt::Debug for DropShimElaborator<'a, 'tcx> { @@ -242,7 +252,7 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> { fn patch(&mut self) -> &mut MirPatch<'tcx> { &mut self.patch } fn mir(&self) -> &'a Mir<'tcx> { self.mir } - fn tcx(&self) -> ty::TyCtxt<'a, 'tcx, 'tcx> { self.tcx } + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.tcx } fn param_env(&self) -> ty::ParamEnv<'tcx> { self.param_env } fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle { @@ -272,9 +282,9 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> { } /// Build a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`. -fn build_clone_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, +fn build_clone_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, - self_ty: ty::Ty<'tcx>) + self_ty: Ty<'tcx>) -> Mir<'tcx> { debug!("build_clone_shim(def_id={:?})", def_id); @@ -284,10 +294,19 @@ fn build_clone_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, match self_ty.sty { _ if is_copy => builder.copy_shim(), - ty::TyArray(ty, len) => builder.array_shim(ty, len), - ty::TyTuple(tys, _) => builder.tuple_shim(tys), + ty::TyArray(ty, len) => { + let len = len.val.to_const_int().unwrap().to_u64().unwrap(); + builder.array_shim(ty, len) + } + ty::TyClosure(def_id, substs) => { + builder.tuple_like_shim( + &substs.upvar_tys(def_id, tcx).collect::>(), + AggregateKind::Closure(def_id, substs) + ) + } + ty::TyTuple(tys, _) => builder.tuple_like_shim(&**tys, AggregateKind::Tuple), _ => { - bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty); + bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty) } }; @@ -295,7 +314,7 @@ fn build_clone_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, } struct CloneShimBuilder<'a, 'tcx: 'a> { - tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, local_decls: IndexVec>, blocks: IndexVec>, @@ -304,7 +323,7 @@ struct CloneShimBuilder<'a, 'tcx: 'a> { } impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { - fn new(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Self { + fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Self { let sig = tcx.fn_sig(def_id); let sig = tcx.erase_late_bound_regions(&sig); let span = tcx.def_span(def_id); @@ -325,8 +344,10 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { IndexVec::from_elem_n( VisibilityScopeData { span: self.span, parent_scope: None }, 1 ), + ClearOnDecode::Clear, IndexVec::new(), self.sig.output(), + None, self.local_decls, self.sig.inputs().len(), vec![], @@ -370,7 +391,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { self.block(vec![ret_statement], TerminatorKind::Return, false); } - fn make_lvalue(&mut self, mutability: Mutability, ty: ty::Ty<'tcx>) -> Lvalue<'tcx> { + fn make_lvalue(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Lvalue<'tcx> { let span = self.span; Lvalue::Local( self.local_decls.push(temp_decl(mutability, ty, span)) @@ -379,7 +400,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { fn make_clone_call( &mut self, - ty: ty::Ty<'tcx>, + ty: Ty<'tcx>, rcvr_field: Lvalue<'tcx>, next: BasicBlock, cleanup: BasicBlock @@ -394,11 +415,15 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { ); // `func == Clone::clone(&ty) -> ty` + let func_ty = tcx.mk_fn_def(self.def_id, substs); let func = Operand::Constant(box Constant { span: self.span, - ty: tcx.mk_fn_def(self.def_id, substs), + ty: func_ty, literal: Literal::Value { - value: ConstVal::Function(self.def_id, substs), + value: tcx.mk_const(ty::Const { + val: ConstVal::Function(self.def_id, substs), + ty: func_ty + }), }, }); @@ -457,22 +482,26 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { ); } - fn make_usize(&self, value: usize) -> Box> { - let value = ConstUsize::new(value as u64, self.tcx.sess.target.uint_type).unwrap(); + fn make_usize(&self, value: u64) -> Box> { + let value = ConstUsize::new(value, self.tcx.sess.target.usize_ty).unwrap(); box Constant { span: self.span, ty: self.tcx.types.usize, literal: Literal::Value { - value: ConstVal::Integral(ConstInt::Usize(value)) + value: self.tcx.mk_const(ty::Const { + val: ConstVal::Integral(ConstInt::Usize(value)), + ty: self.tcx.types.usize, + }) } } } - fn array_shim(&mut self, ty: ty::Ty<'tcx>, len: usize) { + fn array_shim(&mut self, ty: Ty<'tcx>, len: u64) { let tcx = self.tcx; + let span = self.span; let rcvr = Lvalue::Local(Local::new(1+0)).deref(); - let beg = self.make_lvalue(Mutability::Mut, tcx.types.usize); + let beg = self.local_decls.push(temp_decl(Mutability::Mut, tcx.types.usize, span)); let end = self.make_lvalue(Mutability::Not, tcx.types.usize); let ret = self.make_lvalue(Mutability::Mut, tcx.mk_array(ty, len)); @@ -483,7 +512,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { let inits = vec![ self.make_statement( StatementKind::Assign( - beg.clone(), + Lvalue::Local(beg), Rvalue::Use(Operand::Constant(self.make_usize(0))) ) ), @@ -501,19 +530,19 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { // BB #3; // } // BB #4; - self.loop_header(beg.clone(), end, BasicBlock::new(2), BasicBlock::new(4), false); + self.loop_header(Lvalue::Local(beg), end, BasicBlock::new(2), BasicBlock::new(4), false); // BB #2 // `let cloned = Clone::clone(rcvr[beg])`; // Goto #3 if ok, #5 if unwinding happens. - let rcvr_field = rcvr.clone().index(Operand::Consume(beg.clone())); + let rcvr_field = rcvr.clone().index(beg); let cloned = self.make_clone_call(ty, rcvr_field, BasicBlock::new(3), BasicBlock::new(5)); // BB #3 // `ret[beg] = cloned;` // `beg = beg + 1;` // `goto #1`; - let ret_field = ret.clone().index(Operand::Consume(beg.clone())); + let ret_field = ret.clone().index(beg); let statements = vec![ self.make_statement( StatementKind::Assign( @@ -523,10 +552,10 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { ), self.make_statement( StatementKind::Assign( - beg.clone(), + Lvalue::Local(beg), Rvalue::BinaryOp( BinOp::Add, - Operand::Consume(beg.clone()), + Operand::Consume(Lvalue::Local(beg)), Operand::Constant(self.make_usize(1)) ) ) @@ -549,10 +578,10 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { // `let mut beg = 0;` // goto #6; let end = beg; - let beg = self.make_lvalue(Mutability::Mut, tcx.types.usize); + let beg = self.local_decls.push(temp_decl(Mutability::Mut, tcx.types.usize, span)); let init = self.make_statement( StatementKind::Assign( - beg.clone(), + Lvalue::Local(beg), Rvalue::Use(Operand::Constant(self.make_usize(0))) ) ); @@ -563,12 +592,13 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { // BB #8; // } // BB #9; - self.loop_header(beg.clone(), end, BasicBlock::new(7), BasicBlock::new(9), true); + self.loop_header(Lvalue::Local(beg), Lvalue::Local(end), + BasicBlock::new(7), BasicBlock::new(9), true); // BB #7 (cleanup) // `drop(ret[beg])`; self.block(vec![], TerminatorKind::Drop { - location: ret.index(Operand::Consume(beg.clone())), + location: ret.index(beg), target: BasicBlock::new(8), unwind: None, }, true); @@ -578,10 +608,10 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { // `goto #6;` let statement = self.make_statement( StatementKind::Assign( - beg.clone(), + Lvalue::Local(beg), Rvalue::BinaryOp( BinOp::Add, - Operand::Consume(beg.clone()), + Operand::Consume(Lvalue::Local(beg)), Operand::Constant(self.make_usize(1)) ) ) @@ -592,7 +622,12 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { self.block(vec![], TerminatorKind::Resume, true); } - fn tuple_shim(&mut self, tys: &ty::Slice>) { + fn tuple_like_shim(&mut self, tys: &[ty::Ty<'tcx>], kind: AggregateKind<'tcx>) { + match kind { + AggregateKind::Tuple | AggregateKind::Closure(..) => (), + _ => bug!("only tuples and closures are accepted"), + }; + let rcvr = Lvalue::Local(Local::new(1+0)).deref(); let mut returns = Vec::new(); @@ -625,17 +660,17 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { } } - // `return (returns[0], returns[1], ..., returns[tys.len() - 1]);` + // `return kind(returns[0], returns[1], ..., returns[tys.len() - 1]);` let ret_statement = self.make_statement( StatementKind::Assign( Lvalue::Local(RETURN_POINTER), Rvalue::Aggregate( - box AggregateKind::Tuple, + box kind, returns.into_iter().map(Operand::Consume).collect() ) ) ); - self.block(vec![ret_statement], TerminatorKind::Return, false); + self.block(vec![ret_statement], TerminatorKind::Return, false); } } @@ -645,7 +680,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { /// /// If `untuple_args` is a vec of types, the second argument of the /// function will be untupled as these types. -fn build_call_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, +fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, rcvr_adjustment: Adjustment, call_kind: CallKind, @@ -695,17 +730,21 @@ fn build_call_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, let (callee, mut args) = match call_kind { CallKind::Indirect => (rcvr, vec![]), - CallKind::Direct(def_id) => ( - Operand::Constant(box Constant { + CallKind::Direct(def_id) => { + let ty = tcx.type_of(def_id); + (Operand::Constant(box Constant { span, - ty: tcx.type_of(def_id), + ty, literal: Literal::Value { - value: ConstVal::Function(def_id, - Substs::identity_for_item(tcx, def_id)), + value: tcx.mk_const(ty::Const { + val: ConstVal::Function(def_id, + Substs::identity_for_item(tcx, def_id)), + ty + }), }, - }), - vec![rcvr] - ) + }), + vec![rcvr]) + } }; if let Some(untuple_args) = untuple_args { @@ -768,8 +807,10 @@ fn build_call_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, IndexVec::from_elem_n( VisibilityScopeData { span: span, parent_scope: None }, 1 ), + ClearOnDecode::Clear, IndexVec::new(), sig.output(), + None, local_decls, sig.inputs().len(), vec![], @@ -839,8 +880,10 @@ pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, IndexVec::from_elem_n( VisibilityScopeData { span: span, parent_scope: None }, 1 ), + ClearOnDecode::Clear, IndexVec::new(), sig.output(), + None, local_decls, sig.inputs().len(), vec![], diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 52c2eaa7cb..8fad538af9 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -18,7 +18,7 @@ use rustc::ty::{self, TyCtxt, RegionKind}; use rustc::hir; use rustc::mir::*; use rustc::mir::transform::{MirPass, MirSource}; -use rustc::middle::region::CodeExtent; +use rustc::middle::region; pub struct AddValidation; @@ -27,7 +27,7 @@ fn lval_context<'a, 'tcx, D>( lval: &Lvalue<'tcx>, local_decls: &D, tcx: TyCtxt<'a, 'tcx, 'tcx> -) -> (Option, hir::Mutability) +) -> (Option, hir::Mutability) where D: HasLocalDecls<'tcx> { use rustc::mir::Lvalue::*; diff --git a/src/librustc_mir/transform/check_unsafety.rs b/src/librustc_mir/transform/check_unsafety.rs new file mode 100644 index 0000000000..ac76c8e95d --- /dev/null +++ b/src/librustc_mir/transform/check_unsafety.rs @@ -0,0 +1,422 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::indexed_vec::IndexVec; + +use rustc::ty::maps::Providers; +use rustc::ty::{self, TyCtxt}; +use rustc::hir; +use rustc::hir::def::Def; +use rustc::hir::def_id::DefId; +use rustc::hir::map::{DefPathData, Node}; +use rustc::lint::builtin::{SAFE_EXTERN_STATICS, UNUSED_UNSAFE}; +use rustc::mir::*; +use rustc::mir::visit::{LvalueContext, Visitor}; + +use syntax::ast; + +use std::rc::Rc; + + +pub struct UnsafetyChecker<'a, 'tcx: 'a> { + mir: &'a Mir<'tcx>, + visibility_scope_info: &'a IndexVec, + violations: Vec, + source_info: SourceInfo, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + used_unsafe: FxHashSet, + inherited_blocks: Vec<(ast::NodeId, bool)>, +} + +impl<'a, 'gcx, 'tcx> UnsafetyChecker<'a, 'tcx> { + fn new(mir: &'a Mir<'tcx>, + visibility_scope_info: &'a IndexVec, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>) -> Self { + Self { + mir, + visibility_scope_info, + violations: vec![], + source_info: SourceInfo { + span: mir.span, + scope: ARGUMENT_VISIBILITY_SCOPE + }, + tcx, + param_env, + used_unsafe: FxHashSet(), + inherited_blocks: vec![], + } + } +} + +impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { + fn visit_terminator(&mut self, + block: BasicBlock, + terminator: &Terminator<'tcx>, + location: Location) + { + self.source_info = terminator.source_info; + match terminator.kind { + TerminatorKind::Goto { .. } | + TerminatorKind::SwitchInt { .. } | + TerminatorKind::Drop { .. } | + TerminatorKind::Yield { .. } | + TerminatorKind::Assert { .. } | + TerminatorKind::DropAndReplace { .. } | + TerminatorKind::GeneratorDrop | + TerminatorKind::Resume | + TerminatorKind::Return | + TerminatorKind::Unreachable => { + // safe (at least as emitted during MIR construction) + } + + TerminatorKind::Call { ref func, .. } => { + let func_ty = func.ty(self.mir, self.tcx); + let sig = func_ty.fn_sig(self.tcx); + if let hir::Unsafety::Unsafe = sig.unsafety() { + self.require_unsafe("call to unsafe function") + } + } + } + self.super_terminator(block, terminator, location); + } + + fn visit_statement(&mut self, + block: BasicBlock, + statement: &Statement<'tcx>, + location: Location) + { + self.source_info = statement.source_info; + match statement.kind { + StatementKind::Assign(..) | + StatementKind::SetDiscriminant { .. } | + StatementKind::StorageLive(..) | + StatementKind::StorageDead(..) | + StatementKind::EndRegion(..) | + StatementKind::Validate(..) | + StatementKind::Nop => { + // safe (at least as emitted during MIR construction) + } + + StatementKind::InlineAsm { .. } => { + self.require_unsafe("use of inline assembly") + }, + } + self.super_statement(block, statement, location); + } + + fn visit_rvalue(&mut self, + rvalue: &Rvalue<'tcx>, + location: Location) + { + if let &Rvalue::Aggregate(box ref aggregate, _) = rvalue { + match aggregate { + &AggregateKind::Array(..) | + &AggregateKind::Tuple | + &AggregateKind::Adt(..) => {} + &AggregateKind::Closure(def_id, _) | + &AggregateKind::Generator(def_id, _, _) => { + let UnsafetyCheckResult { + violations, unsafe_blocks + } = self.tcx.unsafety_check_result(def_id); + self.register_violations(&violations, &unsafe_blocks); + } + } + } + self.super_rvalue(rvalue, location); + } + + fn visit_lvalue(&mut self, + lvalue: &Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + match lvalue { + &Lvalue::Projection(box Projection { + ref base, ref elem + }) => { + let old_source_info = self.source_info; + if let &Lvalue::Local(local) = base { + if self.mir.local_decls[local].internal { + // Internal locals are used in the `move_val_init` desugaring. + // We want to check unsafety against the source info of the + // desugaring, rather than the source info of the RHS. + self.source_info = self.mir.local_decls[local].source_info; + } + } + let base_ty = base.ty(self.mir, self.tcx).to_ty(self.tcx); + match base_ty.sty { + ty::TyRawPtr(..) => { + self.require_unsafe("dereference of raw pointer") + } + ty::TyAdt(adt, _) if adt.is_union() => { + if context == LvalueContext::Store || + context == LvalueContext::Drop + { + let elem_ty = match elem { + &ProjectionElem::Field(_, ty) => ty, + _ => span_bug!( + self.source_info.span, + "non-field projection {:?} from union?", + lvalue) + }; + if elem_ty.moves_by_default(self.tcx, self.param_env, + self.source_info.span) { + self.require_unsafe( + "assignment to non-`Copy` union field") + } else { + // write to non-move union, safe + } + } else { + self.require_unsafe("access to union field") + } + } + _ => {} + } + self.source_info = old_source_info; + } + &Lvalue::Local(..) => { + // locals are safe + } + &Lvalue::Static(box Static { def_id, ty: _ }) => { + if self.is_static_mut(def_id) { + self.require_unsafe("use of mutable static"); + } else if self.tcx.is_foreign_item(def_id) { + let source_info = self.source_info; + let lint_root = + self.visibility_scope_info[source_info.scope].lint_root; + self.register_violations(&[UnsafetyViolation { + source_info, + description: "use of extern static", + lint_node_id: Some(lint_root) + }], &[]); + } + } + } + self.super_lvalue(lvalue, context, location); + } +} + +impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> { + fn is_static_mut(&self, def_id: DefId) -> bool { + if let Some(node) = self.tcx.hir.get_if_local(def_id) { + match node { + Node::NodeItem(&hir::Item { + node: hir::ItemStatic(_, hir::MutMutable, _), .. + }) => true, + Node::NodeForeignItem(&hir::ForeignItem { + node: hir::ForeignItemStatic(_, mutbl), .. + }) => mutbl, + _ => false + } + } else { + match self.tcx.describe_def(def_id) { + Some(Def::Static(_, mutbl)) => mutbl, + _ => false + } + } + } + fn require_unsafe(&mut self, + description: &'static str) + { + let source_info = self.source_info; + self.register_violations(&[UnsafetyViolation { + source_info, description, lint_node_id: None + }], &[]); + } + + fn register_violations(&mut self, + violations: &[UnsafetyViolation], + unsafe_blocks: &[(ast::NodeId, bool)]) { + let within_unsafe = match self.visibility_scope_info[self.source_info.scope].safety { + Safety::Safe => { + for violation in violations { + if !self.violations.contains(violation) { + self.violations.push(violation.clone()) + } + } + + false + } + Safety::BuiltinUnsafe | Safety::FnUnsafe => true, + Safety::ExplicitUnsafe(node_id) => { + if !violations.is_empty() { + self.used_unsafe.insert(node_id); + } + true + } + }; + self.inherited_blocks.extend(unsafe_blocks.iter().map(|&(node_id, is_used)| { + (node_id, is_used && !within_unsafe) + })); + } +} + +pub(crate) fn provide(providers: &mut Providers) { + *providers = Providers { + unsafety_check_result, + ..*providers + }; +} + +struct UnusedUnsafeVisitor<'a> { + used_unsafe: &'a FxHashSet, + unsafe_blocks: &'a mut Vec<(ast::NodeId, bool)>, +} + +impl<'a, 'tcx> hir::intravisit::Visitor<'tcx> for UnusedUnsafeVisitor<'a> { + fn nested_visit_map<'this>(&'this mut self) -> + hir::intravisit::NestedVisitorMap<'this, 'tcx> + { + hir::intravisit::NestedVisitorMap::None + } + + fn visit_block(&mut self, block: &'tcx hir::Block) { + hir::intravisit::walk_block(self, block); + + if let hir::UnsafeBlock(hir::UserProvided) = block.rules { + self.unsafe_blocks.push((block.id, self.used_unsafe.contains(&block.id))); + } + } +} + +fn check_unused_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + used_unsafe: &FxHashSet, + unsafe_blocks: &'a mut Vec<(ast::NodeId, bool)>) +{ + let body_id = + tcx.hir.as_local_node_id(def_id).and_then(|node_id| { + tcx.hir.maybe_body_owned_by(node_id) + }); + + let body_id = match body_id { + Some(body) => body, + None => { + debug!("check_unused_unsafe({:?}) - no body found", def_id); + return + } + }; + let body = tcx.hir.body(body_id); + debug!("check_unused_unsafe({:?}, body={:?}, used_unsafe={:?})", + def_id, body, used_unsafe); + + let mut visitor = UnusedUnsafeVisitor { used_unsafe, unsafe_blocks }; + hir::intravisit::Visitor::visit_body(&mut visitor, body); +} + +fn unsafety_check_result<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) + -> UnsafetyCheckResult +{ + debug!("unsafety_violations({:?})", def_id); + + // NB: this borrow is valid because all the consumers of + // `mir_built` force this. + let mir = &tcx.mir_built(def_id).borrow(); + + let visibility_scope_info = match mir.visibility_scope_info { + ClearOnDecode::Set(ref data) => data, + ClearOnDecode::Clear => { + debug!("unsafety_violations: {:?} - remote, skipping", def_id); + return UnsafetyCheckResult { + violations: Rc::new([]), + unsafe_blocks: Rc::new([]) + } + } + }; + + let param_env = tcx.param_env(def_id); + let mut checker = UnsafetyChecker::new( + mir, visibility_scope_info, tcx, param_env); + checker.visit_mir(mir); + + check_unused_unsafe(tcx, def_id, &checker.used_unsafe, &mut checker.inherited_blocks); + UnsafetyCheckResult { + violations: checker.violations.into(), + unsafe_blocks: checker.inherited_blocks.into() + } +} + +/// Return the NodeId for an enclosing scope that is also `unsafe` +fn is_enclosed(tcx: TyCtxt, + used_unsafe: &FxHashSet, + id: ast::NodeId) -> Option<(String, ast::NodeId)> { + let parent_id = tcx.hir.get_parent_node(id); + if parent_id != id { + if used_unsafe.contains(&parent_id) { + Some(("block".to_string(), parent_id)) + } else if let Some(hir::map::NodeItem(&hir::Item { + node: hir::ItemFn(_, hir::Unsafety::Unsafe, _, _, _, _), + .. + })) = tcx.hir.find(parent_id) { + Some(("fn".to_string(), parent_id)) + } else { + is_enclosed(tcx, used_unsafe, parent_id) + } + } else { + None + } +} + +fn report_unused_unsafe(tcx: TyCtxt, used_unsafe: &FxHashSet, id: ast::NodeId) { + let span = tcx.hir.span(id); + let mut db = tcx.struct_span_lint_node(UNUSED_UNSAFE, id, span, "unnecessary `unsafe` block"); + db.span_label(span, "unnecessary `unsafe` block"); + if let Some((kind, id)) = is_enclosed(tcx, used_unsafe, id) { + db.span_note(tcx.hir.span(id), + &format!("because it's nested under this `unsafe` {}", kind)); + } + db.emit(); +} + +pub fn check_unsafety<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { + debug!("check_unsafety({:?})", def_id); + match tcx.def_key(def_id).disambiguated_data.data { + // closures are handled by their parent fn. + DefPathData::ClosureExpr => return, + _ => {} + }; + + let UnsafetyCheckResult { + violations, + unsafe_blocks + } = tcx.unsafety_check_result(def_id); + + for &UnsafetyViolation { + source_info, description, lint_node_id + } in violations.iter() { + // Report an error. + if let Some(lint_node_id) = lint_node_id { + tcx.lint_node(SAFE_EXTERN_STATICS, + lint_node_id, + source_info.span, + &format!("{} requires unsafe function or \ + block (error E0133)", description)); + } else { + struct_span_err!( + tcx.sess, source_info.span, E0133, + "{} requires unsafe function or block", description) + .span_label(source_info.span, description) + .emit(); + } + } + + let mut unsafe_blocks: Vec<_> = unsafe_blocks.into_iter().collect(); + unsafe_blocks.sort(); + let used_unsafe: FxHashSet<_> = unsafe_blocks.iter() + .flat_map(|&&(id, used)| if used { Some(id) } else { None }) + .collect(); + for &(block_id, is_used) in unsafe_blocks { + if !is_used { + report_unused_unsafe(tcx, &used_unsafe, block_id); + } + } +} diff --git a/src/librustc_mir/transform/clean_end_regions.rs b/src/librustc_mir/transform/clean_end_regions.rs index f06b88551d..a6750f400b 100644 --- a/src/librustc_mir/transform/clean_end_regions.rs +++ b/src/librustc_mir/transform/clean_end_regions.rs @@ -21,7 +21,7 @@ use rustc_data_structures::fx::FxHashSet; -use rustc::middle::region::CodeExtent; +use rustc::middle::region; use rustc::mir::transform::{MirPass, MirSource}; use rustc::mir::{BasicBlock, Location, Mir, Rvalue, Statement, StatementKind}; use rustc::mir::visit::{MutVisitor, Visitor, Lookup}; @@ -30,18 +30,20 @@ use rustc::ty::{Ty, RegionKind, TyCtxt}; pub struct CleanEndRegions; struct GatherBorrowedRegions { - seen_regions: FxHashSet, + seen_regions: FxHashSet, } struct DeleteTrivialEndRegions<'a> { - seen_regions: &'a FxHashSet, + seen_regions: &'a FxHashSet, } impl MirPass for CleanEndRegions { fn run_pass<'a, 'tcx>(&self, - _tcx: TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, _source: MirSource, mir: &mut Mir<'tcx>) { + if !tcx.sess.emit_end_regions() { return; } + let mut gather = GatherBorrowedRegions { seen_regions: FxHashSet() }; @@ -84,8 +86,8 @@ impl<'a, 'tcx> MutVisitor<'tcx> for DeleteTrivialEndRegions<'a> { location: Location) { let mut delete_it = false; - if let StatementKind::EndRegion(ref extent) = statement.kind { - if !self.seen_regions.contains(extent) { + if let StatementKind::EndRegion(ref region_scope) = statement.kind { + if !self.seen_regions.contains(region_scope) { delete_it = true; } } diff --git a/src/librustc_mir/transform/copy_prop.rs b/src/librustc_mir/transform/copy_prop.rs index 59b81f7e77..ac8ebd306d 100644 --- a/src/librustc_mir/transform/copy_prop.rs +++ b/src/librustc_mir/transform/copy_prop.rs @@ -60,6 +60,7 @@ impl MirPass for CopyPropagation { return } } + MirSource::GeneratorDrop(_) => (), } // We only run when the MIR optimization level is > 1. @@ -235,8 +236,7 @@ impl<'tcx> Action<'tcx> { } // Replace all uses of the destination local with the source local. - let src_lvalue = Lvalue::Local(src_local); - def_use_analysis.replace_all_defs_and_uses_with(dest_local, mir, src_lvalue); + def_use_analysis.replace_all_defs_and_uses_with(dest_local, mir, src_local); // Finally, zap the now-useless assignment instruction. debug!(" Deleting assignment"); diff --git a/src/librustc_mir/transform/elaborate_drops.rs b/src/librustc_mir/transform/elaborate_drops.rs index 97391452e5..be1b794ecd 100644 --- a/src/librustc_mir/transform/elaborate_drops.rs +++ b/src/librustc_mir/transform/elaborate_drops.rs @@ -29,7 +29,6 @@ use syntax::ast; use syntax_pos::Span; use std::fmt; -use std::u32; pub struct ElaborateDrops; @@ -46,7 +45,7 @@ impl MirPass for ElaborateDrops { } let id = src.item_id(); let param_env = tcx.param_env(tcx.hir.local_def_id(id)); - let move_data = MoveData::gather_moves(mir, tcx, param_env); + let move_data = MoveData::gather_moves(mir, tcx, param_env).unwrap(); let elaborate_patch = { let mir = &*mir; let env = MoveDataParamEnv { @@ -96,42 +95,42 @@ fn find_dead_unwinds<'a, 'tcx>( MaybeInitializedLvals::new(tcx, mir, &env), |bd, p| &bd.move_data().move_paths[p]); for (bb, bb_data) in mir.basic_blocks().iter_enumerated() { - match bb_data.terminator().kind { + let location = match bb_data.terminator().kind { TerminatorKind::Drop { ref location, unwind: Some(_), .. } | - TerminatorKind::DropAndReplace { ref location, unwind: Some(_), .. } => { - let mut init_data = InitializationData { - live: flow_inits.sets().on_entry_set_for(bb.index()).to_owned(), - dead: IdxSetBuf::new_empty(env.move_data.move_paths.len()), - }; - debug!("find_dead_unwinds @ {:?}: {:?}; init_data={:?}", - bb, bb_data, init_data.live); - for stmt in 0..bb_data.statements.len() { - let loc = Location { block: bb, statement_index: stmt }; - init_data.apply_location(tcx, mir, env, loc); - } + TerminatorKind::DropAndReplace { ref location, unwind: Some(_), .. } => location, + _ => continue, + }; - let path = match env.move_data.rev_lookup.find(location) { - LookupResult::Exact(e) => e, - LookupResult::Parent(..) => { - debug!("find_dead_unwinds: has parent; skipping"); - continue - } - }; + let mut init_data = InitializationData { + live: flow_inits.sets().on_entry_set_for(bb.index()).to_owned(), + dead: IdxSetBuf::new_empty(env.move_data.move_paths.len()), + }; + debug!("find_dead_unwinds @ {:?}: {:?}; init_data={:?}", + bb, bb_data, init_data.live); + for stmt in 0..bb_data.statements.len() { + let loc = Location { block: bb, statement_index: stmt }; + init_data.apply_location(tcx, mir, env, loc); + } - debug!("find_dead_unwinds @ {:?}: path({:?})={:?}", bb, location, path); + let path = match env.move_data.rev_lookup.find(location) { + LookupResult::Exact(e) => e, + LookupResult::Parent(..) => { + debug!("find_dead_unwinds: has parent; skipping"); + continue + } + }; - let mut maybe_live = false; - on_all_drop_children_bits(tcx, mir, &env, path, |child| { - let (child_maybe_live, _) = init_data.state(child); - maybe_live |= child_maybe_live; - }); + debug!("find_dead_unwinds @ {:?}: path({:?})={:?}", bb, location, path); - debug!("find_dead_unwinds @ {:?}: maybe_live={}", bb, maybe_live); - if !maybe_live { - dead_unwinds.add(&bb); - } - } - _ => {} + let mut maybe_live = false; + on_all_drop_children_bits(tcx, mir, &env, path, |child| { + let (child_maybe_live, _) = init_data.state(child); + maybe_live |= child_maybe_live; + }); + + debug!("find_dead_unwinds @ {:?}: maybe_live={}", bb, maybe_live); + if !maybe_live { + dead_unwinds.add(&bb); } } @@ -193,7 +192,7 @@ impl<'a, 'b, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, 'b, 'tcx> { self.ctxt.mir } - fn tcx(&self) -> ty::TyCtxt<'a, 'tcx, 'tcx> { + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.ctxt.tcx } @@ -314,7 +313,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { let patch = &mut self.patch; debug!("create_drop_flag({:?})", self.mir.span); self.drop_flags.entry(index).or_insert_with(|| { - patch.new_temp(tcx.types.bool, span) + patch.new_internal(tcx.types.bool, span) }); } @@ -521,7 +520,12 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { Rvalue::Use(Operand::Constant(Box::new(Constant { span, ty: self.tcx.types.bool, - literal: Literal::Value { value: ConstVal::Bool(val) } + literal: Literal::Value { + value: self.tcx.mk_const(ty::Const { + val: ConstVal::Bool(val), + ty: self.tcx.types.bool + }) + } }))) } diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index fa51cd91be..dc18cdd8f0 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -15,7 +15,7 @@ //! "types-as-contracts"-validation, namely, AcquireValid, ReleaseValid, and EndRegion. use rustc::ty::subst::Substs; -use rustc::ty::{Ty, TyCtxt, ClosureSubsts}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::mir::*; use rustc::mir::visit::{MutVisitor, Lookup}; use rustc::mir::transform::{MirPass, MirSource}; @@ -37,38 +37,25 @@ impl<'a, 'tcx> EraseRegionsVisitor<'a, 'tcx> { impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { fn visit_ty(&mut self, ty: &mut Ty<'tcx>, _: Lookup) { if !self.in_validation_statement { - *ty = self.tcx.erase_regions(&{*ty}); + *ty = self.tcx.erase_regions(ty); } self.super_ty(ty); } - fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>, _: Location) { - *substs = self.tcx.erase_regions(&{*substs}); + fn visit_region(&mut self, region: &mut ty::Region<'tcx>, _: Location) { + *region = self.tcx.types.re_erased; } - fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, location: Location) { - match *rvalue { - Rvalue::Ref(ref mut r, _, _) => { - *r = self.tcx.types.re_erased; - } - Rvalue::Use(..) | - Rvalue::Repeat(..) | - Rvalue::Len(..) | - Rvalue::Cast(..) | - Rvalue::BinaryOp(..) | - Rvalue::CheckedBinaryOp(..) | - Rvalue::UnaryOp(..) | - Rvalue::Discriminant(..) | - Rvalue::NullaryOp(..) | - Rvalue::Aggregate(..) => { - // These variants don't contain regions. - } - } - self.super_rvalue(rvalue, location); + fn visit_const(&mut self, constant: &mut &'tcx ty::Const<'tcx>, _: Location) { + *constant = self.tcx.erase_regions(constant); + } + + fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>, _: Location) { + *substs = self.tcx.erase_regions(substs); } fn visit_closure_substs(&mut self, - substs: &mut ClosureSubsts<'tcx>, + substs: &mut ty::ClosureSubsts<'tcx>, _: Location) { *substs = self.tcx.erase_regions(substs); } diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs new file mode 100644 index 0000000000..729fe46ef3 --- /dev/null +++ b/src/librustc_mir/transform/generator.rs @@ -0,0 +1,842 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This is the implementation of the pass which transforms generators into state machines. +//! +//! MIR generation for generators creates a function which has a self argument which +//! passes by value. This argument is effectively a generator type which only contains upvars and +//! is only used for this argument inside the MIR for the generator. +//! It is passed by value to enable upvars to be moved out of it. Drop elaboration runs on that +//! MIR before this pass and creates drop flags for MIR locals. +//! It will also drop the generator argument (which only consists of upvars) if any of the upvars +//! are moved out of. This pass elaborates the drops of upvars / generator argument in the case +//! that none of the upvars were moved out of. This is because we cannot have any drops of this +//! generator in the MIR, since it is used to create the drop glue for the generator. We'd get +//! infinite recursion otherwise. +//! +//! This pass creates the implementation for the Generator::resume function and the drop shim +//! for the generator based on the MIR input. It converts the generator argument from Self to +//! &mut Self adding derefs in the MIR as needed. It computes the final layout of the generator +//! struct which looks like this: +//! First upvars are stored +//! It is followed by the generator state field. +//! Then finally the MIR locals which are live across a suspension point are stored. +//! +//! struct Generator { +//! upvars..., +//! state: u32, +//! mir_locals..., +//! } +//! +//! This pass computes the meaning of the state field and the MIR locals which are live +//! across a suspension point. There are however two hardcoded generator states: +//! 0 - Generator have not been resumed yet +//! 1 - Generator has returned / is completed +//! 2 - Generator has been poisoned +//! +//! It also rewrites `return x` and `yield y` as setting a new generator state and returning +//! GeneratorState::Complete(x) and GeneratorState::Yielded(y) respectively. +//! MIR locals which are live across a suspension point are moved to the generator struct +//! with references to them being updated with references to the generator struct. +//! +//! The pass creates two functions which have a switch on the generator state giving +//! the action to take. +//! +//! One of them is the implementation of Generator::resume. +//! For generators with state 0 (unresumed) it starts the execution of the generator. +//! For generators with state 1 (returned) and state 2 (poisoned) it panics. +//! Otherwise it continues the execution from the last suspension point. +//! +//! The other function is the drop glue for the generator. +//! For generators with state 0 (unresumed) it drops the upvars of the generator. +//! For generators with state 1 (returned) and state 2 (poisoned) it does nothing. +//! Otherwise it drops all the values in scope at the last suspension point. + +use rustc::hir; +use rustc::hir::def_id::DefId; +use rustc::middle::const_val::ConstVal; +use rustc::mir::*; +use rustc::mir::transform::{MirPass, MirSource}; +use rustc::mir::visit::{LvalueContext, Visitor, MutVisitor}; +use rustc::ty::{self, TyCtxt, AdtDef, Ty, GeneratorInterior}; +use rustc::ty::subst::{Kind, Substs}; +use util::dump_mir; +use util::liveness; +use rustc_const_math::ConstInt; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::indexed_set::IdxSetBuf; +use std::collections::HashMap; +use std::borrow::Cow; +use std::iter::once; +use std::mem; +use transform::simplify; +use transform::no_landing_pads::no_landing_pads; +use dataflow::{self, MaybeStorageLive, state_for_location}; + +pub struct StateTransform; + +struct RenameLocalVisitor { + from: Local, + to: Local, +} + +impl<'tcx> MutVisitor<'tcx> for RenameLocalVisitor { + fn visit_local(&mut self, + local: &mut Local, + _: LvalueContext<'tcx>, + _: Location) { + if *local == self.from { + *local = self.to; + } + } +} + +struct DerefArgVisitor; + +impl<'tcx> MutVisitor<'tcx> for DerefArgVisitor { + fn visit_local(&mut self, + local: &mut Local, + _: LvalueContext<'tcx>, + _: Location) { + assert_ne!(*local, self_arg()); + } + + fn visit_lvalue(&mut self, + lvalue: &mut Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + if *lvalue == Lvalue::Local(self_arg()) { + *lvalue = Lvalue::Projection(Box::new(Projection { + base: lvalue.clone(), + elem: ProjectionElem::Deref, + })); + } else { + self.super_lvalue(lvalue, context, location); + } + } +} + +fn self_arg() -> Local { + Local::new(1) +} + +struct SuspensionPoint { + state: u32, + resume: BasicBlock, + drop: Option, + storage_liveness: liveness::LocalSet, +} + +struct TransformVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + state_adt_ref: &'tcx AdtDef, + state_substs: &'tcx Substs<'tcx>, + + // The index of the generator state in the generator struct + state_field: usize, + + // Mapping from Local to (type of local, generator struct index) + remap: HashMap, usize)>, + + // A map from a suspension point in a block to the locals which have live storage at that point + storage_liveness: HashMap, + + // A list of suspension points, generated during the transform + suspension_points: Vec, + + // The original RETURN_POINTER local + new_ret_local: Local, +} + +impl<'a, 'tcx> TransformVisitor<'a, 'tcx> { + // Make a GeneratorState rvalue + fn make_state(&self, idx: usize, val: Operand<'tcx>) -> Rvalue<'tcx> { + let adt = AggregateKind::Adt(self.state_adt_ref, idx, self.state_substs, None); + Rvalue::Aggregate(box adt, vec![val]) + } + + // Create a Lvalue referencing a generator struct field + fn make_field(&self, idx: usize, ty: Ty<'tcx>) -> Lvalue<'tcx> { + let base = Lvalue::Local(self_arg()); + let field = Projection { + base: base, + elem: ProjectionElem::Field(Field::new(idx), ty), + }; + Lvalue::Projection(Box::new(field)) + } + + // Create a statement which changes the generator state + fn set_state(&self, state_disc: u32, source_info: SourceInfo) -> Statement<'tcx> { + let state = self.make_field(self.state_field, self.tcx.types.u32); + let val = Operand::Constant(box Constant { + span: source_info.span, + ty: self.tcx.types.u32, + literal: Literal::Value { + value: self.tcx.mk_const(ty::Const { + val: ConstVal::Integral(ConstInt::U32(state_disc)), + ty: self.tcx.types.u32 + }), + }, + }); + Statement { + source_info, + kind: StatementKind::Assign(state, Rvalue::Use(val)), + } + } +} + +impl<'a, 'tcx> MutVisitor<'tcx> for TransformVisitor<'a, 'tcx> { + fn visit_local(&mut self, + local: &mut Local, + _: LvalueContext<'tcx>, + _: Location) { + assert_eq!(self.remap.get(local), None); + } + + fn visit_lvalue(&mut self, + lvalue: &mut Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + if let Lvalue::Local(l) = *lvalue { + // Replace an Local in the remap with a generator struct access + if let Some(&(ty, idx)) = self.remap.get(&l) { + *lvalue = self.make_field(idx, ty); + } + } else { + self.super_lvalue(lvalue, context, location); + } + } + + fn visit_basic_block_data(&mut self, + block: BasicBlock, + data: &mut BasicBlockData<'tcx>) { + // Remove StorageLive and StorageDead statements for remapped locals + data.retain_statements(|s| { + match s.kind { + StatementKind::StorageLive(l) | StatementKind::StorageDead(l) => { + !self.remap.contains_key(&l) + } + _ => true + } + }); + + let ret_val = match data.terminator().kind { + TerminatorKind::Return => Some((1, + None, + Operand::Consume(Lvalue::Local(self.new_ret_local)), + None)), + TerminatorKind::Yield { ref value, resume, drop } => Some((0, + Some(resume), + value.clone(), + drop)), + _ => None + }; + + if let Some((state_idx, resume, v, drop)) = ret_val { + let source_info = data.terminator().source_info; + // We must assign the value first in case it gets declared dead below + data.statements.push(Statement { + source_info, + kind: StatementKind::Assign(Lvalue::Local(RETURN_POINTER), + self.make_state(state_idx, v)), + }); + let state = if let Some(resume) = resume { // Yield + let state = 3 + self.suspension_points.len() as u32; + + self.suspension_points.push(SuspensionPoint { + state, + resume, + drop, + storage_liveness: self.storage_liveness.get(&block).unwrap().clone(), + }); + + state + } else { // Return + 1 // state for returned + }; + data.statements.push(self.set_state(state, source_info)); + data.terminator.as_mut().unwrap().kind = TerminatorKind::Return; + } + + self.super_basic_block_data(block, data); + } +} + +fn make_generator_state_argument_indirect<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + mir: &mut Mir<'tcx>) { + let gen_ty = mir.local_decls.raw[1].ty; + + let region = ty::ReFree(ty::FreeRegion { + scope: def_id, + bound_region: ty::BoundRegion::BrEnv, + }); + + let region = tcx.mk_region(region); + + let ref_gen_ty = tcx.mk_ref(region, ty::TypeAndMut { + ty: gen_ty, + mutbl: hir::MutMutable + }); + + // Replace the by value generator argument + mir.local_decls.raw[1].ty = ref_gen_ty; + + // Add a deref to accesses of the generator state + DerefArgVisitor.visit_mir(mir); +} + +fn replace_result_variable<'tcx>(ret_ty: Ty<'tcx>, + mir: &mut Mir<'tcx>) -> Local { + let new_ret = LocalDecl { + mutability: Mutability::Mut, + ty: ret_ty, + name: None, + source_info: source_info(mir), + lexical_scope: ARGUMENT_VISIBILITY_SCOPE, + internal: false, + is_user_variable: false, + }; + let new_ret_local = Local::new(mir.local_decls.len()); + mir.local_decls.push(new_ret); + mir.local_decls.swap(0, new_ret_local.index()); + + RenameLocalVisitor { + from: RETURN_POINTER, + to: new_ret_local, + }.visit_mir(mir); + + new_ret_local +} + +struct StorageIgnored(liveness::LocalSet); + +impl<'tcx> Visitor<'tcx> for StorageIgnored { + fn visit_statement(&mut self, + _block: BasicBlock, + statement: &Statement<'tcx>, + _location: Location) { + match statement.kind { + StatementKind::StorageLive(l) | + StatementKind::StorageDead(l) => { self.0.remove(&l); } + _ => (), + } + } +} + +fn locals_live_across_suspend_points<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + source: MirSource) -> + (liveness::LocalSet, + HashMap) { + let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len()); + let node_id = source.item_id(); + let analysis = MaybeStorageLive::new(mir); + let storage_live = + dataflow::do_dataflow(tcx, mir, node_id, &[], &dead_unwinds, analysis, + |bd, p| &bd.mir().local_decls[p]); + + let mut ignored = StorageIgnored(IdxSetBuf::new_filled(mir.local_decls.len())); + ignored.visit_mir(mir); + + let mut set = liveness::LocalSet::new_empty(mir.local_decls.len()); + let liveness = liveness::liveness_of_locals(mir); + liveness::dump_mir(tcx, "generator_liveness", source, mir, &liveness); + + let mut storage_liveness_map = HashMap::new(); + + for (block, data) in mir.basic_blocks().iter_enumerated() { + if let TerminatorKind::Yield { .. } = data.terminator().kind { + let loc = Location { + block: block, + statement_index: data.statements.len(), + }; + + let storage_liveness = state_for_location(loc, &analysis, &storage_live); + + storage_liveness_map.insert(block, storage_liveness.clone()); + + let mut live_locals = storage_liveness; + + // Mark locals without storage statements as always having live storage + live_locals.union(&ignored.0); + + // Locals live are live at this point only if they are used across suspension points + // and their storage is live + live_locals.intersect(&liveness.outs[block]); + + // Add the locals life at this suspension point to the set of locals which live across + // any suspension points + set.union(&live_locals); + } + } + + // The generator argument is ignored + set.remove(&self_arg()); + + (set, storage_liveness_map) +} + +fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + source: MirSource, + interior: GeneratorInterior<'tcx>, + mir: &mut Mir<'tcx>) + -> (HashMap, usize)>, + GeneratorLayout<'tcx>, + HashMap) +{ + // Use a liveness analysis to compute locals which are live across a suspension point + let (live_locals, storage_liveness) = locals_live_across_suspend_points(tcx, mir, source); + + // Erase regions from the types passed in from typeck so we can compare them with + // MIR types + let allowed = tcx.erase_regions(&interior.as_slice()); + + for (local, decl) in mir.local_decls.iter_enumerated() { + // Ignore locals which are internal or not live + if !live_locals.contains(&local) || decl.internal { + continue; + } + + // Sanity check that typeck knows about the type of locals which are + // live across a suspension point + if !allowed.contains(&decl.ty) { + span_bug!(mir.span, + "Broken MIR: generator contains type {} in MIR, \ + but typeck only knows about {}", + decl.ty, + interior); + } + } + + let upvar_len = mir.upvar_decls.len(); + let dummy_local = LocalDecl::new_internal(tcx.mk_nil(), mir.span); + + // Gather live locals and their indices replacing values in mir.local_decls with a dummy + // to avoid changing local indices + let live_decls = live_locals.iter().map(|local| { + let var = mem::replace(&mut mir.local_decls[local], dummy_local.clone()); + (local, var) + }); + + // Create a map from local indices to generator struct indices. + // These are offset by (upvar_len + 1) because of fields which comes before locals. + // We also create a vector of the LocalDecls of these locals. + let (remap, vars) = live_decls.enumerate().map(|(idx, (local, var))| { + ((local, (var.ty, upvar_len + 1 + idx)), var) + }).unzip(); + + let layout = GeneratorLayout { + fields: vars + }; + + (remap, layout, storage_liveness) +} + +fn insert_switch<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &mut Mir<'tcx>, + cases: Vec<(u32, BasicBlock)>, + transform: &TransformVisitor<'a, 'tcx>, + default: TerminatorKind<'tcx>) { + let default_block = insert_term_block(mir, default); + + let switch = TerminatorKind::SwitchInt { + discr: Operand::Consume(transform.make_field(transform.state_field, tcx.types.u32)), + switch_ty: tcx.types.u32, + values: Cow::from(cases.iter().map(|&(i, _)| ConstInt::U32(i)).collect::>()), + targets: cases.iter().map(|&(_, d)| d).chain(once(default_block)).collect(), + }; + + let source_info = source_info(mir); + mir.basic_blocks_mut().raw.insert(0, BasicBlockData { + statements: Vec::new(), + terminator: Some(Terminator { + source_info, + kind: switch, + }), + is_cleanup: false, + }); + + let blocks = mir.basic_blocks_mut().iter_mut(); + + for target in blocks.flat_map(|b| b.terminator_mut().successors_mut()) { + *target = BasicBlock::new(target.index() + 1); + } +} + +fn elaborate_generator_drops<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + mir: &mut Mir<'tcx>) { + use util::elaborate_drops::{elaborate_drop, Unwind}; + use util::patch::MirPatch; + use shim::DropShimElaborator; + + // Note that `elaborate_drops` only drops the upvars of a generator, and + // this is ok because `open_drop` can only be reached within that own + // generator's resume function. + + let param_env = tcx.param_env(def_id); + let gen = self_arg(); + + for block in mir.basic_blocks().indices() { + let (target, unwind, source_info) = match mir.basic_blocks()[block].terminator() { + &Terminator { + source_info, + kind: TerminatorKind::Drop { + location: Lvalue::Local(local), + target, + unwind + } + } if local == gen => (target, unwind, source_info), + _ => continue, + }; + let unwind = if let Some(unwind) = unwind { + Unwind::To(unwind) + } else { + Unwind::InCleanup + }; + let patch = { + let mut elaborator = DropShimElaborator { + mir: &mir, + patch: MirPatch::new(mir), + tcx, + param_env + }; + elaborate_drop( + &mut elaborator, + source_info, + &Lvalue::Local(gen), + (), + target, + unwind, + block + ); + elaborator.patch + }; + patch.apply(mir); + } +} + +fn create_generator_drop_shim<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + transform: &TransformVisitor<'a, 'tcx>, + def_id: DefId, + source: MirSource, + gen_ty: Ty<'tcx>, + mir: &Mir<'tcx>, + drop_clean: BasicBlock) -> Mir<'tcx> { + let mut mir = mir.clone(); + + let source_info = source_info(&mir); + + let mut cases = create_cases(&mut mir, transform, |point| point.drop); + + cases.insert(0, (0, drop_clean)); + + // The returned state (1) and the poisoned state (2) falls through to + // the default case which is just to return + + insert_switch(tcx, &mut mir, cases, &transform, TerminatorKind::Return); + + for block in mir.basic_blocks_mut() { + let kind = &mut block.terminator_mut().kind; + if let TerminatorKind::GeneratorDrop = *kind { + *kind = TerminatorKind::Return; + } + } + + // Replace the return variable + mir.return_ty = tcx.mk_nil(); + mir.local_decls[RETURN_POINTER] = LocalDecl { + mutability: Mutability::Mut, + ty: tcx.mk_nil(), + name: None, + source_info, + lexical_scope: ARGUMENT_VISIBILITY_SCOPE, + internal: false, + is_user_variable: false, + }; + + make_generator_state_argument_indirect(tcx, def_id, &mut mir); + + // Change the generator argument from &mut to *mut + mir.local_decls[self_arg()] = LocalDecl { + mutability: Mutability::Mut, + ty: tcx.mk_ptr(ty::TypeAndMut { + ty: gen_ty, + mutbl: hir::Mutability::MutMutable, + }), + name: None, + source_info, + lexical_scope: ARGUMENT_VISIBILITY_SCOPE, + internal: false, + is_user_variable: false, + }; + + no_landing_pads(tcx, &mut mir); + + // Make sure we remove dead blocks to remove + // unrelated code from the resume part of the function + simplify::remove_dead_blocks(&mut mir); + + dump_mir(tcx, None, "generator_drop", &0, source, &mut mir); + + mir +} + +fn insert_term_block<'tcx>(mir: &mut Mir<'tcx>, kind: TerminatorKind<'tcx>) -> BasicBlock { + let term_block = BasicBlock::new(mir.basic_blocks().len()); + let source_info = source_info(mir); + mir.basic_blocks_mut().push(BasicBlockData { + statements: Vec::new(), + terminator: Some(Terminator { + source_info, + kind, + }), + is_cleanup: false, + }); + term_block +} + +fn insert_panic_block<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &mut Mir<'tcx>, + message: AssertMessage<'tcx>) -> BasicBlock { + let assert_block = BasicBlock::new(mir.basic_blocks().len()); + let term = TerminatorKind::Assert { + cond: Operand::Constant(box Constant { + span: mir.span, + ty: tcx.types.bool, + literal: Literal::Value { + value: tcx.mk_const(ty::Const { + val: ConstVal::Bool(false), + ty: tcx.types.bool + }), + }, + }), + expected: true, + msg: message, + target: assert_block, + cleanup: None, + }; + + let source_info = source_info(mir); + mir.basic_blocks_mut().push(BasicBlockData { + statements: Vec::new(), + terminator: Some(Terminator { + source_info, + kind: term, + }), + is_cleanup: false, + }); + + assert_block +} + +fn create_generator_resume_function<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + transform: TransformVisitor<'a, 'tcx>, + def_id: DefId, + source: MirSource, + mir: &mut Mir<'tcx>) { + // Poison the generator when it unwinds + for block in mir.basic_blocks_mut() { + let source_info = block.terminator().source_info; + if let &TerminatorKind::Resume = &block.terminator().kind { + block.statements.push(transform.set_state(1, source_info)); + } + } + + let mut cases = create_cases(mir, &transform, |point| Some(point.resume)); + + // Jump to the entry point on the 0 state + cases.insert(0, (0, BasicBlock::new(0))); + // Panic when resumed on the returned (1) state + cases.insert(1, (1, insert_panic_block(tcx, mir, AssertMessage::GeneratorResumedAfterReturn))); + // Panic when resumed on the poisoned (2) state + cases.insert(2, (2, insert_panic_block(tcx, mir, AssertMessage::GeneratorResumedAfterPanic))); + + insert_switch(tcx, mir, cases, &transform, TerminatorKind::Unreachable); + + make_generator_state_argument_indirect(tcx, def_id, mir); + + no_landing_pads(tcx, mir); + + // Make sure we remove dead blocks to remove + // unrelated code from the drop part of the function + simplify::remove_dead_blocks(mir); + + dump_mir(tcx, None, "generator_resume", &0, source, mir); +} + +fn source_info<'a, 'tcx>(mir: &Mir<'tcx>) -> SourceInfo { + SourceInfo { + span: mir.span, + scope: ARGUMENT_VISIBILITY_SCOPE, + } +} + +fn insert_clean_drop<'a, 'tcx>(mir: &mut Mir<'tcx>) -> BasicBlock { + let return_block = insert_term_block(mir, TerminatorKind::Return); + + // Create a block to destroy an unresumed generators. This can only destroy upvars. + let drop_clean = BasicBlock::new(mir.basic_blocks().len()); + let term = TerminatorKind::Drop { + location: Lvalue::Local(self_arg()), + target: return_block, + unwind: None, + }; + let source_info = source_info(mir); + mir.basic_blocks_mut().push(BasicBlockData { + statements: Vec::new(), + terminator: Some(Terminator { + source_info, + kind: term, + }), + is_cleanup: false, + }); + + drop_clean +} + +fn create_cases<'a, 'tcx, F>(mir: &mut Mir<'tcx>, + transform: &TransformVisitor<'a, 'tcx>, + target: F) -> Vec<(u32, BasicBlock)> + where F: Fn(&SuspensionPoint) -> Option { + let source_info = source_info(mir); + + transform.suspension_points.iter().filter_map(|point| { + // Find the target for this suspension point, if applicable + target(point).map(|target| { + let block = BasicBlock::new(mir.basic_blocks().len()); + let mut statements = Vec::new(); + + // Create StorageLive instructions for locals with live storage + for i in 0..(mir.local_decls.len()) { + let l = Local::new(i); + if point.storage_liveness.contains(&l) && !transform.remap.contains_key(&l) { + statements.push(Statement { + source_info, + kind: StatementKind::StorageLive(l), + }); + } + } + + // Then jump to the real target + mir.basic_blocks_mut().push(BasicBlockData { + statements, + terminator: Some(Terminator { + source_info, + kind: TerminatorKind::Goto { + target, + }, + }), + is_cleanup: false, + }); + + (point.state, block) + }) + }).collect() +} + +impl MirPass for StateTransform { + fn run_pass<'a, 'tcx>(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + source: MirSource, + mir: &mut Mir<'tcx>) { + let yield_ty = if let Some(yield_ty) = mir.yield_ty { + yield_ty + } else { + // This only applies to generators + return + }; + + assert!(mir.generator_drop.is_none()); + + let node_id = source.item_id(); + let def_id = tcx.hir.local_def_id(source.item_id()); + let hir_id = tcx.hir.node_to_hir_id(node_id); + + // Get the interior types which typeck computed + let interior = *tcx.typeck_tables_of(def_id).generator_interiors().get(hir_id).unwrap(); + + // The first argument is the generator type passed by value + let gen_ty = mir.local_decls.raw[1].ty; + + // Compute GeneratorState + let state_did = tcx.lang_items().gen_state().unwrap(); + let state_adt_ref = tcx.adt_def(state_did); + let state_substs = tcx.mk_substs([Kind::from(yield_ty), + Kind::from(mir.return_ty)].iter()); + let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); + + // We rename RETURN_POINTER which has type mir.return_ty to new_ret_local + // RETURN_POINTER then is a fresh unused local with type ret_ty. + let new_ret_local = replace_result_variable(ret_ty, mir); + + // Extract locals which are live across suspension point into `layout` + // `remap` gives a mapping from local indices onto generator struct indices + // `storage_liveness` tells us which locals have live storage at suspension points + let (remap, layout, storage_liveness) = compute_layout(tcx, source, interior, mir); + + let state_field = mir.upvar_decls.len(); + + // Run the transformation which converts Lvalues from Local to generator struct + // accesses for locals in `remap`. + // It also rewrites `return x` and `yield y` as writing a new generator state and returning + // GeneratorState::Complete(x) and GeneratorState::Yielded(y) respectively. + let mut transform = TransformVisitor { + tcx, + state_adt_ref, + state_substs, + remap, + storage_liveness, + suspension_points: Vec::new(), + new_ret_local, + state_field, + }; + transform.visit_mir(mir); + + // Update our MIR struct to reflect the changed we've made + mir.return_ty = ret_ty; + mir.yield_ty = None; + mir.arg_count = 1; + mir.spread_arg = None; + mir.generator_layout = Some(layout); + + // Insert `drop(generator_struct)` which is used to drop upvars for generators in + // the unresumed (0) state. + // This is expanded to a drop ladder in `elaborate_generator_drops`. + let drop_clean = insert_clean_drop(mir); + + dump_mir(tcx, None, "generator_pre-elab", &0, source, mir); + + // Expand `drop(generator_struct)` to a drop ladder which destroys upvars. + // If any upvars are moved out of, drop elaboration will handle upvar destruction. + // However we need to also elaborate the code generated by `insert_clean_drop`. + elaborate_generator_drops(tcx, def_id, mir); + + dump_mir(tcx, None, "generator_post-transform", &0, source, mir); + + // Create a copy of our MIR and use it to create the drop shim for the generator + let drop_shim = create_generator_drop_shim(tcx, + &transform, + def_id, + source, + gen_ty, + &mir, + drop_clean); + + mir.generator_drop = Some(box drop_shim); + + // Create the Generator::resume function + create_generator_resume_function(tcx, transform, def_id, source, mir); + } +} diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 53b46dd268..9d32861aed 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -18,7 +18,7 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::mir::*; use rustc::mir::transform::{MirPass, MirSource}; use rustc::mir::visit::*; -use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::{self, Ty, TyCtxt, Instance}; use rustc::ty::subst::{Subst,Substs}; use std::collections::VecDeque; @@ -78,7 +78,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let mut callsites = VecDeque::new(); // Only do inlining into fn bodies. - if let MirSource::Fn(_) = self.source { + if let MirSource::Fn(caller_id) = self.source { for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated() { // Don't inline calls that are in cleanup blocks. if bb_data.is_cleanup { continue; } @@ -87,15 +87,23 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let terminator = bb_data.terminator(); if let TerminatorKind::Call { func: Operand::Constant(ref f), .. } = terminator.kind { - if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty { - callsites.push_back(CallSite { - callee: callee_def_id, - substs, - bb, - location: terminator.source_info - }); + if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty { + let caller_def_id = self.tcx.hir.local_def_id(caller_id); + let param_env = self.tcx.param_env(caller_def_id); + + if let Some(instance) = Instance::resolve(self.tcx, + param_env, + callee_def_id, + substs) { + callsites.push_back(CallSite { + callee: instance.def_id(), + substs: instance.substs, + bb, + location: terminator.source_info + }); + } + } } - } } } @@ -180,6 +188,10 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { return false; } + // Cannot inline generators which haven't been transformed yet + if callee_mir.yield_ty.is_some() { + return false; + } let attrs = tcx.get_attrs(callsite.callee); let hint = attr::find_inline_attr(None, &attrs[..]); @@ -334,7 +346,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => { debug!("Inlined {:?} into {:?}", callsite.callee, self.source); - let is_box_free = Some(callsite.callee) == self.tcx.lang_items.box_free_fn(); + let is_box_free = Some(callsite.callee) == self.tcx.lang_items().box_free_fn(); let mut local_map = IndexVec::with_capacity(callee_mir.local_decls.len()); let mut scope_map = IndexVec::with_capacity(callee_mir.visibility_scopes.len()); @@ -585,16 +597,6 @@ impl<'a, 'tcx> Integrator<'a, 'tcx> { new } - fn update_local(&self, local: Local) -> Option { - let idx = local.index(); - if idx < (self.args.len() + 1) { - return None; - } - let idx = idx - (self.args.len() + 1); - let local = Local::new(idx); - self.local_map.get(local).cloned() - } - fn arg_index(&self, arg: Local) -> Option { let idx = arg.index(); if idx > 0 && idx <= self.args.len() { @@ -606,32 +608,41 @@ impl<'a, 'tcx> Integrator<'a, 'tcx> { } impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { + fn visit_local(&mut self, + local: &mut Local, + _ctxt: LvalueContext<'tcx>, + _location: Location) { + if *local == RETURN_POINTER { + match self.destination { + Lvalue::Local(l) => { + *local = l; + return; + }, + ref lval => bug!("Return lvalue is {:?}, not local", lval) + } + } + let idx = local.index() - 1; + if idx < self.args.len() { + match self.args[idx] { + Operand::Consume(Lvalue::Local(l)) => { + *local = l; + return; + }, + ref op => bug!("Arg operand `{:?}` is {:?}, not local", idx, op) + } + } + *local = self.local_map[Local::new(idx - self.args.len())]; + } + fn visit_lvalue(&mut self, lvalue: &mut Lvalue<'tcx>, _ctxt: LvalueContext<'tcx>, _location: Location) { - if let Lvalue::Local(ref mut local) = *lvalue { - if let Some(l) = self.update_local(*local) { - // Temp or Var; update the local reference - *local = l; - return; - } - } - if let Lvalue::Local(local) = *lvalue { - if local == RETURN_POINTER { - // Return pointer; update the lvalue itself - *lvalue = self.destination.clone(); - } else if local.index() < (self.args.len() + 1) { - // Argument, once again update the the lvalue itself - let idx = local.index() - 1; - if let Operand::Consume(ref lval) = self.args[idx] { - *lvalue = lval.clone(); - } else { - bug!("Arg operand `{:?}` is not an Lvalue use.", idx) - } - } + if let Lvalue::Local(RETURN_POINTER) = *lvalue { + // Return pointer; update the lvalue itself + *lvalue = self.destination.clone(); } else { - self.super_lvalue(lvalue, _ctxt, _location) + self.super_lvalue(lvalue, _ctxt, _location); } } @@ -657,6 +668,8 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { self.super_terminator_kind(block, kind, loc); match *kind { + TerminatorKind::GeneratorDrop | + TerminatorKind::Yield { .. } => bug!(), TerminatorKind::Goto { ref mut target} => { *target = self.update_target(*target); } diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index d8dffa0366..34cc3a289d 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -13,7 +13,7 @@ use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc::mir::Mir; use rustc::mir::transform::{MirPassIndex, MirSuite, MirSource, MIR_CONST, MIR_VALIDATED, MIR_OPTIMIZED}; -use rustc::ty::{self, TyCtxt}; +use rustc::ty::TyCtxt; use rustc::ty::maps::Providers; use rustc::ty::steal::Steal; use rustc::hir; @@ -21,11 +21,12 @@ use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::util::nodemap::DefIdSet; use std::rc::Rc; use syntax::ast; -use syntax_pos::{DUMMY_SP, Span}; +use syntax_pos::Span; use transform; pub mod add_validation; pub mod clean_end_regions; +pub mod check_unsafety; pub mod simplify_branches; pub mod simplify; pub mod erase_regions; @@ -40,13 +41,16 @@ pub mod dump_mir; pub mod deaggregator; pub mod instcombine; pub mod copy_prop; +pub mod generator; pub mod inline; pub mod nll; pub(crate) fn provide(providers: &mut Providers) { self::qualify_consts::provide(providers); + self::check_unsafety::provide(providers); *providers = Providers { mir_keys, + mir_built, mir_const, mir_validated, optimized_mir, @@ -100,9 +104,17 @@ fn mir_keys<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, krate: CrateNum) Rc::new(set) } +fn mir_built<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Steal> { + let mir = build::mir_build(tcx, def_id); + tcx.alloc_steal_mir(mir) +} + fn mir_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Steal> { - let mut mir = build::mir_build(tcx, def_id); + // Unsafety check uses the raw mir, so make sure it is run + let _ = tcx.unsafety_check_result(def_id); + let source = MirSource::from_local_def_id(tcx, def_id); + let mut mir = tcx.mir_built(def_id).steal(); transform::run_suite(tcx, source, MIR_CONST, &mut mir); tcx.alloc_steal_mir(mir) } @@ -111,9 +123,8 @@ fn mir_validated<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx let source = MirSource::from_local_def_id(tcx, def_id); if let MirSource::Const(_) = source { // Ensure that we compute the `mir_const_qualif` for constants at - // this point, before we steal the mir-const result. We don't - // directly need the result or `mir_const_qualif`, so we can just force it. - ty::queries::mir_const_qualif::force(tcx, DUMMY_SP, def_id); + // this point, before we steal the mir-const result. + let _ = tcx.mir_const_qualif(def_id); } let mut mir = tcx.mir_const(def_id).steal(); @@ -124,8 +135,8 @@ fn mir_validated<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx fn optimized_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Mir<'tcx> { // (Mir-)Borrowck uses `mir_validated`, so we have to force it to // execute before we can steal. - ty::queries::mir_borrowck::force(tcx, DUMMY_SP, def_id); - ty::queries::borrowck::force(tcx, DUMMY_SP, def_id); + let _ = tcx.mir_borrowck(def_id); + let _ = tcx.borrowck(def_id); let mut mir = tcx.mir_validated(def_id).steal(); let source = MirSource::from_local_def_id(tcx, def_id); diff --git a/src/librustc_mir/transform/nll.rs b/src/librustc_mir/transform/nll/mod.rs similarity index 91% rename from src/librustc_mir/transform/nll.rs rename to src/librustc_mir/transform/nll/mod.rs index bd02788df1..d4a5354c78 100644 --- a/src/librustc_mir/transform/nll.rs +++ b/src/librustc_mir/transform/nll/mod.rs @@ -15,12 +15,15 @@ use rustc::mir::{Mir, Location, Rvalue, BasicBlock, Statement, StatementKind}; use rustc::mir::visit::{MutVisitor, Lookup}; use rustc::mir::transform::{MirPass, MirSource}; use rustc::infer::{self, InferCtxt}; +use rustc::util::nodemap::FxHashSet; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use syntax_pos::DUMMY_SP; use std::collections::HashMap; #[allow(dead_code)] struct NLLVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { lookup_map: HashMap, + regions: IndexVec, infcx: InferCtxt<'a, 'gcx, 'tcx>, } @@ -29,6 +32,7 @@ impl<'a, 'gcx, 'tcx> NLLVisitor<'a, 'gcx, 'tcx> { NLLVisitor { infcx, lookup_map: HashMap::new(), + regions: IndexVec::new(), } } @@ -36,8 +40,9 @@ impl<'a, 'gcx, 'tcx> NLLVisitor<'a, 'gcx, 'tcx> { self.lookup_map } - fn renumber_regions(&self, value: &T) -> T where T: TypeFoldable<'tcx> { + fn renumber_regions(&mut self, value: &T) -> T where T: TypeFoldable<'tcx> { self.infcx.tcx.fold_regions(value, &mut false, |_region, _depth| { + self.regions.push(Region::default()); self.infcx.next_region_var(infer::MiscVariable(DUMMY_SP)) }) } @@ -143,4 +148,11 @@ impl MirPass for NLL { let _results = visitor.into_results(); }) } -} \ No newline at end of file +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +struct Region { + points: FxHashSet, +} + +newtype_index!(RegionIndex); diff --git a/src/librustc_mir/transform/no_landing_pads.rs b/src/librustc_mir/transform/no_landing_pads.rs index 8595663ba1..fa6bb64487 100644 --- a/src/librustc_mir/transform/no_landing_pads.rs +++ b/src/librustc_mir/transform/no_landing_pads.rs @@ -43,6 +43,8 @@ impl<'tcx> MutVisitor<'tcx> for NoLandingPads { TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::Unreachable | + TerminatorKind::GeneratorDrop | + TerminatorKind::Yield { .. } | TerminatorKind::SwitchInt { .. } => { /* nothing to do */ }, diff --git a/src/librustc_mir/transform/promote_consts.rs b/src/librustc_mir/transform/promote_consts.rs index 1665cb2f15..339ea8a414 100644 --- a/src/librustc_mir/transform/promote_consts.rs +++ b/src/librustc_mir/transform/promote_consts.rs @@ -83,52 +83,49 @@ struct TempCollector<'tcx> { } impl<'tcx> Visitor<'tcx> for TempCollector<'tcx> { - fn visit_lvalue(&mut self, - lvalue: &Lvalue<'tcx>, - context: LvalueContext<'tcx>, - location: Location) { - self.super_lvalue(lvalue, context, location); - if let Lvalue::Local(index) = *lvalue { - // We're only interested in temporaries - if self.mir.local_kind(index) != LocalKind::Temp { - return; - } + fn visit_local(&mut self, + &index: &Local, + context: LvalueContext<'tcx>, + location: Location) { + // We're only interested in temporaries + if self.mir.local_kind(index) != LocalKind::Temp { + return; + } - // Ignore drops, if the temp gets promoted, - // then it's constant and thus drop is noop. - // Storage live ranges are also irrelevant. - if context.is_drop() || context.is_storage_marker() { - return; - } + // Ignore drops, if the temp gets promoted, + // then it's constant and thus drop is noop. + // Storage live ranges are also irrelevant. + if context.is_drop() || context.is_storage_marker() { + return; + } - let temp = &mut self.temps[index]; - if *temp == TempState::Undefined { - match context { - LvalueContext::Store | - LvalueContext::Call => { - *temp = TempState::Defined { - location, - uses: 0 - }; - return; - } - _ => { /* mark as unpromotable below */ } - } - } else if let TempState::Defined { ref mut uses, .. } = *temp { - // We always allow borrows, even mutable ones, as we need - // to promote mutable borrows of some ZSTs e.g. `&mut []`. - let allowed_use = match context { - LvalueContext::Borrow {..} => true, - _ => context.is_nonmutating_use() - }; - if allowed_use { - *uses += 1; + let temp = &mut self.temps[index]; + if *temp == TempState::Undefined { + match context { + LvalueContext::Store | + LvalueContext::Call => { + *temp = TempState::Defined { + location, + uses: 0 + }; return; } - /* mark as unpromotable below */ + _ => { /* mark as unpromotable below */ } } - *temp = TempState::Unpromotable; + } else if let TempState::Defined { ref mut uses, .. } = *temp { + // We always allow borrows, even mutable ones, as we need + // to promote mutable borrows of some ZSTs e.g. `&mut []`. + let allowed_use = match context { + LvalueContext::Borrow {..} => true, + _ => context.is_nonmutating_use() + }; + if allowed_use { + *uses += 1; + return; + } + /* mark as unpromotable below */ } + *temp = TempState::Unpromotable; } fn visit_source_info(&mut self, source_info: &SourceInfo) { @@ -326,16 +323,13 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { /// Replaces all temporaries with their promoted counterparts. impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> { - fn visit_lvalue(&mut self, - lvalue: &mut Lvalue<'tcx>, - context: LvalueContext<'tcx>, - location: Location) { - if let Lvalue::Local(ref mut temp) = *lvalue { - if self.source.local_kind(*temp) == LocalKind::Temp { - *temp = self.promote_temp(*temp); - } + fn visit_local(&mut self, + local: &mut Local, + _: LvalueContext<'tcx>, + _: Location) { + if self.source.local_kind(*local) == LocalKind::Temp { + *local = self.promote_temp(*local); } - self.super_lvalue(lvalue, context, location); } } @@ -386,12 +380,13 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, let mut promoter = Promoter { promoted: Mir::new( IndexVec::new(), - Some(VisibilityScopeData { - span, - parent_scope: None - }).into_iter().collect(), + // FIXME: maybe try to filter this to avoid blowing up + // memory usage? + mir.visibility_scopes.clone(), + mir.visibility_scope_info.clone(), IndexVec::new(), ty, + None, initial_locals, 0, vec![], @@ -411,8 +406,8 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, block.statements.retain(|statement| { match statement.kind { StatementKind::Assign(Lvalue::Local(index), _) | - StatementKind::StorageLive(Lvalue::Local(index)) | - StatementKind::StorageDead(Lvalue::Local(index)) => { + StatementKind::StorageLive(index) | + StatementKind::StorageDead(index) => { !promoted(index) } _ => true diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index ee99bb7d9d..3f3b9d177d 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -15,10 +15,11 @@ //! diagnostics as to why a constant rvalue wasn't promoted. use rustc_data_structures::bitvec::BitVector; +use rustc_data_structures::indexed_set::IdxSetBuf; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc::hir; -use rustc::hir::map as hir_map; use rustc::hir::def_id::DefId; +use rustc::middle::const_val::ConstVal; use rustc::traits::{self, Reveal}; use rustc::ty::{self, TyCtxt, Ty, TypeFoldable}; use rustc::ty::cast::CastTy; @@ -29,47 +30,49 @@ use rustc::mir::transform::{MirPass, MirSource}; use rustc::mir::visit::{LvalueContext, Visitor}; use rustc::middle::lang_items; use syntax::abi::Abi; +use syntax::attr; use syntax::feature_gate::UnstableFeatures; use syntax_pos::{Span, DUMMY_SP}; use std::fmt; +use std::rc::Rc; use std::usize; use super::promote_consts::{self, Candidate, TempState}; bitflags! { - flags Qualif: u8 { + struct Qualif: u8 { // Constant containing interior mutability (UnsafeCell). - const MUTABLE_INTERIOR = 1 << 0, + const MUTABLE_INTERIOR = 1 << 0; // Constant containing an ADT that implements Drop. - const NEEDS_DROP = 1 << 1, + const NEEDS_DROP = 1 << 1; // Function argument. - const FN_ARGUMENT = 1 << 2, + const FN_ARGUMENT = 1 << 2; // Static lvalue or move from a static. - const STATIC = 1 << 3, + const STATIC = 1 << 3; // Reference to a static. - const STATIC_REF = 1 << 4, + const STATIC_REF = 1 << 4; // Not constant at all - non-`const fn` calls, asm!, // pointer comparisons, ptr-to-int casts, etc. - const NOT_CONST = 1 << 5, + const NOT_CONST = 1 << 5; // Refers to temporaries which cannot be promoted as // promote_consts decided they weren't simple enough. - const NOT_PROMOTABLE = 1 << 6, + const NOT_PROMOTABLE = 1 << 6; // Borrows of temporaries can be promoted only // if they have none of the above qualifications. - const NEVER_PROMOTE = 0b111_1111, + const NEVER_PROMOTE = 0b111_1111; // Const items can only have MUTABLE_INTERIOR // and NOT_PROMOTABLE without producing an error. const CONST_ERROR = !Qualif::MUTABLE_INTERIOR.bits & - !Qualif::NOT_PROMOTABLE.bits + !Qualif::NOT_PROMOTABLE.bits; } } @@ -120,13 +123,13 @@ struct Qualifier<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { return_qualif: Option, qualif: Qualif, const_fn_arg_vars: BitVector, + local_needs_drop: IndexVec>, temp_promotion_state: IndexVec, promotion_candidates: Vec } impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, def_id: DefId, mir: &'a Mir<'tcx>, mode: Mode) @@ -141,11 +144,12 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { mir, rpo, tcx, - param_env, + param_env: tcx.param_env(def_id), temp_qualif: IndexVec::from_elem(None, &mir.local_decls), return_qualif: None, qualif: Qualif::empty(), const_fn_arg_vars: BitVector::new(mir.local_decls.len()), + local_needs_drop: IndexVec::from_elem(None, &mir.local_decls), temp_promotion_state: temps, promotion_candidates: vec![] } @@ -193,78 +197,6 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { self.add(original); } - /// Check for NEEDS_DROP (from an ADT or const fn call) and - /// error, unless we're in a function, or the feature-gate - /// for globals with destructors is enabled. - fn deny_drop(&self) { - if self.mode == Mode::Fn || !self.qualif.intersects(Qualif::NEEDS_DROP) { - return; - } - - // Static and const fn's allow destructors, but they're feature-gated. - let msg = if self.mode != Mode::Const { - // Feature-gate for globals with destructors is enabled. - if self.tcx.sess.features.borrow().drop_types_in_const { - return; - } - - // This comes from a macro that has #[allow_internal_unstable]. - if self.span.allows_unstable() { - return; - } - - format!("destructors in {}s are an unstable feature", - self.mode) - } else { - format!("{}s are not allowed to have destructors", - self.mode) - }; - - let mut err = - struct_span_err!(self.tcx.sess, self.span, E0493, "{}", msg); - - if self.mode != Mode::Const { - help!(&mut err, - "in Nightly builds, add `#![feature(drop_types_in_const)]` \ - to the crate attributes to enable"); - } else { - self.find_drop_implementation_method_span() - .map(|span| err.span_label(span, "destructor defined here")); - - err.span_label(self.span, "constants cannot have destructors"); - } - - err.emit(); - } - - fn find_drop_implementation_method_span(&self) -> Option { - self.tcx.lang_items - .drop_trait() - .and_then(|drop_trait_id| { - let mut span = None; - - self.tcx - .for_each_relevant_impl(drop_trait_id, self.mir.return_ty, |impl_did| { - self.tcx.hir - .as_local_node_id(impl_did) - .and_then(|impl_node_id| self.tcx.hir.find(impl_node_id)) - .map(|node| { - if let hir_map::NodeItem(item) = node { - if let hir::ItemImpl(.., ref impl_item_refs) = item.node { - span = impl_item_refs.first() - .map(|iiref| { - self.tcx.hir.impl_item(iiref.id) - .span - }); - } - } - }); - }); - - span - }) - } - /// Check if an Lvalue with the current qualifications could /// be consumed, by either an operand or a Deref projection. fn try_consume(&mut self) -> bool { @@ -314,6 +246,15 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { return; } + // When initializing a local, record whether the *value* being + // stored in it needs dropping, which it may not, even if its + // type does, e.g. `None::`. + if let Lvalue::Local(local) = *dest { + if qualif.intersects(Qualif::NEEDS_DROP) { + self.local_needs_drop[local] = Some(self.span); + } + } + match *dest { Lvalue::Local(index) if self.mir.local_kind(index) == LocalKind::Temp => { debug!("store to temp {:?}", index); @@ -346,7 +287,7 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { } /// Qualify a whole const, static initializer or const fn. - fn qualify_const(&mut self) -> Qualif { + fn qualify_const(&mut self) -> (Qualif, Rc>) { debug!("qualifying {} {:?}", self.mode, self.def_id); let mir = self.mir; @@ -360,7 +301,6 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { let target = match mir[bb].terminator().kind { TerminatorKind::Goto { target } | - // Drops are considered noops. TerminatorKind::Drop { target, .. } | TerminatorKind::Assert { target, .. } | TerminatorKind::Call { destination: Some((_, target)), .. } => { @@ -369,12 +309,14 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { // Non-terminating calls cannot produce any value. TerminatorKind::Call { destination: None, .. } => { - return Qualif::empty(); + break; } TerminatorKind::SwitchInt {..} | TerminatorKind::DropAndReplace { .. } | TerminatorKind::Resume | + TerminatorKind::GeneratorDrop | + TerminatorKind::Yield { .. } | TerminatorKind::Unreachable => None, TerminatorKind::Return => { @@ -431,25 +373,35 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { } } - let return_ty = mir.return_ty; self.qualif = self.return_qualif.unwrap_or(Qualif::NOT_CONST); - match self.mode { - Mode::StaticMut => { - // Check for destructors in static mut. - self.add_type(return_ty); - self.deny_drop(); - } - _ => { - // Account for errors in consts by using the - // conservative type qualification instead. - if self.qualif.intersects(Qualif::CONST_ERROR) { - self.qualif = Qualif::empty(); - self.add_type(return_ty); + // Account for errors in consts by using the + // conservative type qualification instead. + if self.qualif.intersects(Qualif::CONST_ERROR) { + self.qualif = Qualif::empty(); + let return_ty = mir.return_ty; + self.add_type(return_ty); + } + + + // Collect all the temps we need to promote. + let mut promoted_temps = IdxSetBuf::new_empty(self.temp_promotion_state.len()); + + for candidate in &self.promotion_candidates { + match *candidate { + Candidate::Ref(Location { block: bb, statement_index: stmt_idx }) => { + match self.mir[bb].statements[stmt_idx].kind { + StatementKind::Assign(_, Rvalue::Ref(_, _, Lvalue::Local(index))) => { + promoted_temps.add(&index); + } + _ => {} + } } + Candidate::ShuffleIndices(_) => {} } } - self.qualif + + (self.qualif, Rc::new(promoted_temps)) } } @@ -457,33 +409,40 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { /// For functions (constant or not), it also records /// candidates for promotion in promotion_candidates. impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { + fn visit_local(&mut self, + &local: &Local, + _: LvalueContext<'tcx>, + _: Location) { + match self.mir.local_kind(local) { + LocalKind::ReturnPointer => { + self.not_const(); + } + LocalKind::Arg => { + self.add(Qualif::FN_ARGUMENT); + } + LocalKind::Var => { + self.add(Qualif::NOT_CONST); + } + LocalKind::Temp => { + if !self.temp_promotion_state[local].is_promotable() { + self.add(Qualif::NOT_PROMOTABLE); + } + + if let Some(qualif) = self.temp_qualif[local] { + self.add(qualif); + } else { + self.not_const(); + } + } + } + } + fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext<'tcx>, location: Location) { match *lvalue { - Lvalue::Local(local) => match self.mir.local_kind(local) { - LocalKind::ReturnPointer => { - self.not_const(); - } - LocalKind::Arg => { - self.add(Qualif::FN_ARGUMENT); - } - LocalKind::Var => { - self.add(Qualif::NOT_CONST); - } - LocalKind::Temp => { - if !self.temp_promotion_state[local].is_promotable() { - self.add(Qualif::NOT_PROMOTABLE); - } - - if let Some(qualif) = self.temp_qualif[local] { - self.add(qualif); - } else { - self.not_const(); - } - } - }, + Lvalue::Local(ref local) => self.visit_local(local, context, location), Lvalue::Static(ref global) => { self.add(Qualif::STATIC); @@ -493,6 +452,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { span_err!(self.tcx.sess, self.span, E0625, "thread-local statics cannot be \ accessed at compile-time"); + self.add(Qualif::NOT_CONST); return; } } @@ -558,28 +518,34 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) { match *operand { - Operand::Consume(_) => { + Operand::Consume(ref lvalue) => { self.nest(|this| { this.super_operand(operand, location); this.try_consume(); }); + + // Mark the consumed locals to indicate later drops are noops. + if let Lvalue::Local(local) = *lvalue { + self.local_needs_drop[local] = None; + } } Operand::Constant(ref constant) => { - if let Literal::Item { def_id, substs } = constant.literal { - // Don't peek inside generic (associated) constants. - if substs.types().next().is_some() { - self.add_type(constant.ty); + if let Literal::Value { + value: &ty::Const { val: ConstVal::Unevaluated(def_id, _), ty } + } = constant.literal { + // Don't peek inside trait associated constants. + if self.tcx.trait_of_item(def_id).is_some() { + self.add_type(ty); } else { - let bits = self.tcx.at(constant.span).mir_const_qualif(def_id); + let (bits, _) = self.tcx.at(constant.span).mir_const_qualif(def_id); let qualif = Qualif::from_bits(bits).expect("invalid mir_const_qualif"); self.add(qualif); - } - // Let `const fn` transitively have destructors, - // but they do get stopped in `const` or `static`. - if self.mode != Mode::ConstFn { - self.deny_drop(); + // Just in case the type is more specific than + // the definition, e.g. impl associated const + // with type parameters, take it into account. + self.qualif.restrict(ty, self.tcx, self.param_env); } } } @@ -625,16 +591,12 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { let allow = if self.mode == Mode::StaticMut { // Inside a `static mut`, &mut [...] is also allowed. match ty.sty { - ty::TyArray(..) | ty::TySlice(_) => { - // Mutating can expose drops, be conservative. - self.add_type(ty); - self.deny_drop(); - true - } + ty::TyArray(..) | ty::TySlice(_) => true, _ => false } - } else if let ty::TyArray(_, 0) = ty.sty { - self.mode == Mode::Fn + } else if let ty::TyArray(_, len) = ty.sty { + len.val.to_const_int().unwrap().to_u64().unwrap() == 0 && + self.mode == Mode::Fn } else { false }; @@ -669,13 +631,11 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { // We might have a candidate for promotion. let candidate = Candidate::Ref(location); - if self.mode == Mode::Fn || self.mode == Mode::ConstFn { - if !self.qualif.intersects(Qualif::NEVER_PROMOTE) { - // We can only promote direct borrows of temps. - if let Lvalue::Local(local) = *lvalue { - if self.mir.local_kind(local) == LocalKind::Temp { - self.promotion_candidates.push(candidate); - } + if !self.qualif.intersects(Qualif::NEVER_PROMOTE) { + // We can only promote direct borrows of temps. + if let Lvalue::Local(local) = *lvalue { + if self.mir.local_kind(local) == LocalKind::Temp { + self.promotion_candidates.push(candidate); } } } @@ -734,18 +694,12 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if let AggregateKind::Adt(def, ..) = **kind { if def.has_dtor(self.tcx) { self.add(Qualif::NEEDS_DROP); - self.deny_drop(); } - if Some(def.did) == self.tcx.lang_items.unsafe_cell_type() { + if Some(def.did) == self.tcx.lang_items().unsafe_cell_type() { let ty = rvalue.ty(self.mir, self.tcx); self.add_type(ty); assert!(self.qualif.intersects(Qualif::MUTABLE_INTERIOR)); - // Even if the value inside may not need dropping, - // mutating it would change that. - if !self.qualif.intersects(Qualif::NOT_CONST) { - self.deny_drop(); - } } } } @@ -760,14 +714,14 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { self.visit_operand(func, location); let fn_ty = func.ty(self.mir, self.tcx); - let (mut is_shuffle, mut is_const_fn) = (false, false); + let (mut is_shuffle, mut is_const_fn) = (false, None); if let ty::TyFnDef(def_id, _) = fn_ty.sty { match self.tcx.fn_sig(def_id).abi() { Abi::RustIntrinsic | Abi::PlatformIntrinsic => { assert!(!self.tcx.is_const_fn(def_id)); - match &self.tcx.item_name(def_id).as_str()[..] { - "size_of" | "min_align_of" => is_const_fn = true, + match &self.tcx.item_name(def_id)[..] { + "size_of" | "min_align_of" => is_const_fn = Some(def_id), name if name.starts_with("simd_shuffle") => { is_shuffle = true; @@ -777,7 +731,9 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } } _ => { - is_const_fn = self.tcx.is_const_fn(def_id); + if self.tcx.is_const_fn(def_id) { + is_const_fn = Some(def_id); + } } } } @@ -798,25 +754,38 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } // Const fn calls. - if is_const_fn { - // We are in a const or static initializer, - if self.mode != Mode::Fn && - - // feature-gate is not enabled, - !self.tcx.sess.features.borrow().const_fn && - - // this doesn't come from a crate with the feature-gate enabled, - self.def_id.is_local() && - - // this doesn't come from a macro that has #[allow_internal_unstable] - !self.span.allows_unstable() - { - let mut err = self.tcx.sess.struct_span_err(self.span, - "const fns are an unstable feature"); - help!(&mut err, - "in Nightly builds, add `#![feature(const_fn)]` \ - to the crate attributes to enable"); - err.emit(); + if let Some(def_id) = is_const_fn { + // find corresponding rustc_const_unstable feature + if let Some(&attr::Stability { + rustc_const_unstable: Some(attr::RustcConstUnstable { + feature: ref feature_name + }), + .. }) = self.tcx.lookup_stability(def_id) { + + // We are in a const or static initializer, + if self.mode != Mode::Fn && + + // feature-gate is not enabled, + !self.tcx.sess.features.borrow() + .declared_lib_features + .iter() + .any(|&(ref sym, _)| sym == feature_name) && + + // this doesn't come from a crate with the feature-gate enabled, + self.def_id.is_local() && + + // this doesn't come from a macro that has #[allow_internal_unstable] + !self.span.allows_unstable() + { + let mut err = self.tcx.sess.struct_span_err(self.span, + &format!("`{}` is not yet stable as a const fn", + self.tcx.item_path_str(def_id))); + help!(&mut err, + "in Nightly builds, add `#![feature({})]` \ + to the crate attributes to enable", + feature_name); + err.emit(); + } } } else { self.qualif = Qualif::NOT_CONST; @@ -855,14 +824,33 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { let ty = dest.ty(self.mir, tcx).to_ty(tcx); self.qualif = Qualif::empty(); self.add_type(ty); + } + self.assign(dest, location); + } + } else if let TerminatorKind::Drop { location: ref lvalue, .. } = *kind { + self.super_terminator_kind(bb, kind, location); - // Let `const fn` transitively have destructors, - // but they do get stopped in `const` or `static`. - if self.mode != Mode::ConstFn { - self.deny_drop(); + // Deny *any* live drops anywhere other than functions. + if self.mode != Mode::Fn { + // HACK(eddyb) Emulate a bit of dataflow analysis, + // conservatively, that drop elaboration will do. + let needs_drop = if let Lvalue::Local(local) = *lvalue { + self.local_needs_drop[local] + } else { + None + }; + + if let Some(span) = needs_drop { + // Double-check the type being dropped, to minimize false positives. + let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); + if ty.needs_drop(self.tcx, self.param_env) { + struct_span_err!(self.tcx.sess, span, E0493, + "destructors cannot be evaluated at compile-time") + .span_label(span, format!("{}s cannot evaluate destructors", + self.mode)) + .emit(); } } - self.assign(dest, location); } } else { // Qualify any operands inside other terminators. @@ -942,7 +930,7 @@ pub fn provide(providers: &mut Providers) { fn mir_const_qualif<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) - -> u8 { + -> (u8, Rc>) { // NB: This `borrow()` is guaranteed to be valid (i.e., the value // cannot yet be stolen), because `mir_validated()`, which steals // from `mir_const(), forces this query to execute before @@ -950,13 +938,13 @@ fn mir_const_qualif<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mir = &tcx.mir_const(def_id).borrow(); if mir.return_ty.references_error() { - return Qualif::NOT_CONST.bits(); + tcx.sess.delay_span_bug(mir.span, "mir_const_qualif: Mir had errors"); + return (Qualif::NOT_CONST.bits(), Rc::new(IdxSetBuf::new_empty(0))); } - let param_env = tcx.param_env(def_id); - - let mut qualifier = Qualifier::new(tcx, param_env, def_id, mir, Mode::Const); - qualifier.qualify_const().bits() + let mut qualifier = Qualifier::new(tcx, def_id, mir, Mode::Const); + let (qualif, promoted_temps) = qualifier.qualify_const(); + (qualif.bits(), promoted_temps) } pub struct QualifyAndPromoteConstants; @@ -966,8 +954,15 @@ impl MirPass for QualifyAndPromoteConstants { tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mut Mir<'tcx>) { + // There's not really any point in promoting errorful MIR. + if mir.return_ty.references_error() { + tcx.sess.delay_span_bug(mir.span, "QualifyAndPromoteConstants: Mir had errors"); + return; + } + let id = src.item_id(); let def_id = tcx.hir.local_def_id(id); + let mut const_promoted_temps = None; let mode = match src { MirSource::Fn(_) => { if tcx.is_const_fn(def_id) { @@ -976,19 +971,21 @@ impl MirPass for QualifyAndPromoteConstants { Mode::Fn } } + MirSource::Const(_) => { + const_promoted_temps = Some(tcx.mir_const_qualif(def_id).1); + Mode::Const + } MirSource::Static(_, hir::MutImmutable) => Mode::Static, MirSource::Static(_, hir::MutMutable) => Mode::StaticMut, - MirSource::Const(_) | + MirSource::GeneratorDrop(_) | MirSource::Promoted(..) => return }; - let param_env = tcx.param_env(def_id); if mode == Mode::Fn || mode == Mode::ConstFn { // This is ugly because Qualifier holds onto mir, // which can't be mutated until its scope ends. let (temps, candidates) = { - let mut qualifier = Qualifier::new(tcx, param_env, - def_id, mir, mode); + let mut qualifier = Qualifier::new(tcx, def_id, mir, mode); if mode == Mode::ConstFn { // Enforce a constant-like CFG for `const fn`. qualifier.qualify_const(); @@ -1004,8 +1001,37 @@ impl MirPass for QualifyAndPromoteConstants { // Do the actual promotion, now that we know what's viable. promote_consts::promote_candidates(mir, tcx, temps, candidates); } else { - let mut qualifier = Qualifier::new(tcx, param_env, def_id, mir, mode); - qualifier.qualify_const(); + let promoted_temps = if mode == Mode::Const { + // Already computed by `mir_const_qualif`. + const_promoted_temps.unwrap() + } else { + Qualifier::new(tcx, def_id, mir, mode).qualify_const().1 + }; + + // In `const` and `static` everything without `StorageDead` + // is `'static`, we don't have to create promoted MIR fragments, + // just remove `Drop` and `StorageDead` on "promoted" locals. + for block in mir.basic_blocks_mut() { + block.statements.retain(|statement| { + match statement.kind { + StatementKind::StorageDead(index) => { + !promoted_temps.contains(&index) + } + _ => true + } + }); + let terminator = block.terminator_mut(); + match terminator.kind { + TerminatorKind::Drop { location: Lvalue::Local(index), target, .. } => { + if promoted_temps.contains(&index) { + terminator.kind = TerminatorKind::Goto { + target, + }; + } + } + _ => {} + } + } } // Statics must be Sync. diff --git a/src/librustc_mir/transform/rustc_peek.rs b/src/librustc_mir/transform/rustc_peek.rs index ceff52409b..8d6458d793 100644 --- a/src/librustc_mir/transform/rustc_peek.rs +++ b/src/librustc_mir/transform/rustc_peek.rs @@ -45,7 +45,7 @@ impl MirPass for SanityCheck { let attributes = tcx.get_attrs(def_id); let param_env = tcx.param_env(def_id); - let move_data = MoveData::gather_moves(mir, tcx, param_env); + let move_data = MoveData::gather_moves(mir, tcx, param_env).unwrap(); let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env }; let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len()); let flow_inits = diff --git a/src/librustc_mir/transform/simplify.rs b/src/librustc_mir/transform/simplify.rs index 070250cda4..89828cf375 100644 --- a/src/librustc_mir/transform/simplify.rs +++ b/src/librustc_mir/transform/simplify.rs @@ -352,15 +352,11 @@ struct DeclMarker { } impl<'tcx> Visitor<'tcx> for DeclMarker { - fn visit_lvalue(&mut self, lval: &Lvalue<'tcx>, ctx: LvalueContext<'tcx>, loc: Location) { - if ctx == LvalueContext::StorageLive || ctx == LvalueContext::StorageDead { - // ignore these altogether, they get removed along with their otherwise unused decls. - return; + fn visit_local(&mut self, local: &Local, ctx: LvalueContext<'tcx>, _: Location) { + // ignore these altogether, they get removed along with their otherwise unused decls. + if ctx != LvalueContext::StorageLive && ctx != LvalueContext::StorageDead { + self.locals.insert(local.index()); } - if let Lvalue::Local(ref v) = *lval { - self.locals.insert(v.index()); - } - self.super_lvalue(lval, ctx, loc); } } @@ -373,22 +369,15 @@ impl<'tcx> MutVisitor<'tcx> for LocalUpdater { // Remove unnecessary StorageLive and StorageDead annotations. data.statements.retain(|stmt| { match stmt.kind { - StatementKind::StorageLive(ref lval) | StatementKind::StorageDead(ref lval) => { - match *lval { - Lvalue::Local(l) => self.map[l.index()] != !0, - _ => true - } + StatementKind::StorageLive(l) | StatementKind::StorageDead(l) => { + self.map[l.index()] != !0 } _ => true } }); self.super_basic_block_data(block, data); } - fn visit_lvalue(&mut self, lval: &mut Lvalue<'tcx>, ctx: LvalueContext<'tcx>, loc: Location) { - match *lval { - Lvalue::Local(ref mut l) => *l = Local::new(self.map[l.index()]), - _ => (), - }; - self.super_lvalue(lval, ctx, loc); + fn visit_local(&mut self, l: &mut Local, _: LvalueContext<'tcx>, _: Location) { + *l = Local::new(self.map[l.index()]); } } diff --git a/src/librustc_mir/transform/simplify_branches.rs b/src/librustc_mir/transform/simplify_branches.rs index 1dcacb29c3..0dff145ecb 100644 --- a/src/librustc_mir/transform/simplify_branches.rs +++ b/src/librustc_mir/transform/simplify_branches.rs @@ -10,7 +10,7 @@ //! A pass that simplifies branches when their condition is known. -use rustc::ty::TyCtxt; +use rustc::ty::{self, TyCtxt}; use rustc::middle::const_val::ConstVal; use rustc::mir::transform::{MirPass, MirSource}; use rustc::mir::*; @@ -40,7 +40,7 @@ impl MirPass for SimplifyBranches { TerminatorKind::SwitchInt { discr: Operand::Constant(box Constant { literal: Literal::Value { ref value }, .. }), ref values, ref targets, .. } => { - if let Some(ref constint) = value.to_const_int() { + if let Some(ref constint) = value.val.to_const_int() { let (otherwise, targets) = targets.split_last().unwrap(); let mut ret = TerminatorKind::Goto { target: *otherwise }; for (v, t) in values.iter().zip(targets.iter()) { @@ -56,7 +56,7 @@ impl MirPass for SimplifyBranches { }, TerminatorKind::Assert { target, cond: Operand::Constant(box Constant { literal: Literal::Value { - value: ConstVal::Bool(cond) + value: &ty::Const { val: ConstVal::Bool(cond), .. } }, .. }), expected, .. } if cond == expected => { TerminatorKind::Goto { target: target } diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs index 72092042f8..ab5998a348 100644 --- a/src/librustc_mir/transform/type_check.rs +++ b/src/librustc_mir/transform/type_check.rs @@ -34,7 +34,10 @@ fn mirbug(tcx: TyCtxt, span: Span, msg: &str) { macro_rules! span_mirbug { ($context:expr, $elem:expr, $($message:tt)*) => ({ mirbug($context.tcx(), $context.last_span, - &format!("broken MIR ({:?}): {}", $elem, format!($($message)*))) + &format!("broken MIR in {:?} ({:?}): {}", + $context.body_id, + $elem, + format_args!($($message)*))) }) } @@ -60,6 +63,7 @@ struct TypeVerifier<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> { cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, mir: &'a Mir<'tcx>, last_span: Span, + body_id: ast::NodeId, errors_reported: bool } @@ -108,8 +112,9 @@ impl<'a, 'b, 'gcx, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'gcx, 'tcx> { impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { fn new(cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, mir: &'a Mir<'tcx>) -> Self { TypeVerifier { - cx, mir, + body_id: cx.body_id, + cx, last_span: mir.span, errors_reported: false } @@ -160,7 +165,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { base: LvalueTy<'tcx>, pi: &LvalueElem<'tcx>, lvalue: &Lvalue<'tcx>, - location: Location) + _: Location) -> LvalueTy<'tcx> { debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, lvalue); let tcx = self.tcx(); @@ -176,9 +181,8 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { }) } } - ProjectionElem::Index(ref i) => { - self.visit_operand(i, location); - let index_ty = i.ty(self.mir, tcx); + ProjectionElem::Index(i) => { + let index_ty = Lvalue::Local(i).ty(self.mir, tcx).to_ty(tcx); if index_ty != tcx.types.usize { LvalueTy::Ty { ty: span_mirbug_and_err!(self, i, "index by non-usize {:?}", i) @@ -205,7 +209,8 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { LvalueTy::Ty { ty: match base_ty.sty { ty::TyArray(inner, size) => { - let min_size = (from as usize) + (to as usize); + let size = size.val.to_const_int().unwrap().to_u64().unwrap(); + let min_size = (from as u64) + (to as u64); if let Some(rest_size) = size.checked_sub(min_size) { tcx.mk_array(inner, rest_size) } else { @@ -297,6 +302,19 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { }) } } + ty::TyGenerator(def_id, substs, _) => { + // Try upvars first. `field_tys` requires final optimized MIR. + if let Some(ty) = substs.upvar_tys(def_id, tcx).nth(field.index()) { + return Ok(ty); + } + + return match substs.field_tys(def_id, tcx).nth(field.index()) { + Some(ty) => Ok(ty), + None => Err(FieldAccessError::OutOfRange { + field_count: substs.field_tys(def_id, tcx).count() + 1 + }) + } + } ty::TyTuple(tys, _) => { return match tys.get(field.index()) { Some(&ty) => Ok(ty), @@ -403,15 +421,8 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { variant_index); }; } - StatementKind::StorageLive(ref lv) | - StatementKind::StorageDead(ref lv) => { - match *lv { - Lvalue::Local(_) => {} - _ => { - span_mirbug!(self, stmt, "bad lvalue: expected local"); - } - } - } + StatementKind::StorageLive(_) | + StatementKind::StorageDead(_) | StatementKind::InlineAsm { .. } | StatementKind::EndRegion(_) | StatementKind::Validate(..) | @@ -428,6 +439,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { TerminatorKind::Goto { .. } | TerminatorKind::Resume | TerminatorKind::Return | + TerminatorKind::GeneratorDrop | TerminatorKind::Unreachable | TerminatorKind::Drop { .. } => { // no checks needed for these @@ -494,6 +506,22 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } } } + TerminatorKind::Yield { ref value, .. } => { + let value_ty = value.ty(mir, tcx); + match mir.yield_ty { + None => span_mirbug!(self, term, "yield in non-generator"), + Some(ty) => { + if let Err(terr) = self.sub_types(value_ty, ty) { + span_mirbug!(self, + term, + "type of yield value is {:?}, but the yield type is {:?}: {:?}", + value_ty, + ty, + terr); + } + } + } + } } } @@ -545,10 +573,10 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { match operand { &Operand::Constant(box Constant { literal: Literal::Value { - value: ConstVal::Function(def_id, _), .. + value: &ty::Const { val: ConstVal::Function(def_id, _), .. }, .. }, .. }) => { - Some(def_id) == self.tcx().lang_items.box_free_fn() + Some(def_id) == self.tcx().lang_items().box_free_fn() } _ => false, } @@ -620,6 +648,20 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { span_mirbug!(self, block, "return on cleanup block") } } + TerminatorKind::GeneratorDrop { .. } => { + if is_cleanup { + span_mirbug!(self, block, "generator_drop in cleanup block") + } + } + TerminatorKind::Yield { resume, drop, .. } => { + if is_cleanup { + span_mirbug!(self, block, "yield in cleanup block") + } + self.assert_iscleanup(mir, block, resume, is_cleanup); + if let Some(drop) = drop { + self.assert_iscleanup(mir, block, drop, is_cleanup); + } + } TerminatorKind::Unreachable => {} TerminatorKind::Drop { target, unwind, .. } | TerminatorKind::DropAndReplace { target, unwind, .. } | diff --git a/src/librustc_mir/util/borrowck_errors.rs b/src/librustc_mir/util/borrowck_errors.rs index 9de3072658..37d53ca829 100644 --- a/src/librustc_mir/util/borrowck_errors.rs +++ b/src/librustc_mir/util/borrowck_errors.rs @@ -57,12 +57,22 @@ pub trait BorrowckErrors { desc, OGN=o) } - fn cannot_use_when_mutably_borrowed(&self, span: Span, desc: &str, o: Origin) + fn cannot_use_when_mutably_borrowed(&self, + span: Span, + desc: &str, + borrow_span: Span, + borrow_desc: &str, + o: Origin) -> DiagnosticBuilder { - struct_span_err!(self, span, E0503, + let mut err = struct_span_err!(self, span, E0503, "cannot use `{}` because it was mutably borrowed{OGN}", - desc, OGN=o) + desc, OGN=o); + + err.span_label(borrow_span, format!("borrow of `{}` occurs here", borrow_desc)); + err.span_label(span, format!("use of borrowed `{}`", borrow_desc)); + + err } fn cannot_act_on_uninitialized_variable(&self, @@ -78,50 +88,101 @@ pub trait BorrowckErrors { } fn cannot_mutably_borrow_multiply(&self, - span: Span, + new_loan_span: Span, desc: &str, opt_via: &str, + old_loan_span: Span, + old_opt_via: &str, + old_load_end_span:Span, o: Origin) -> DiagnosticBuilder { - struct_span_err!(self, span, E0499, + let mut err = struct_span_err!(self, new_loan_span, E0499, "cannot borrow `{}`{} as mutable more than once at a time{OGN}", - desc, opt_via, OGN=o) + desc, opt_via, OGN=o); + if old_loan_span == new_loan_span { + // Both borrows are happening in the same place + // Meaning the borrow is occurring in a loop + err.span_label(new_loan_span, + format!("mutable borrow starts here in previous \ + iteration of loop{}", opt_via)); + err.span_label(old_load_end_span, "mutable borrow ends here"); + } else { + err.span_label(old_loan_span, + format!("first mutable borrow occurs here{}", old_opt_via)); + err.span_label(new_loan_span, + format!("second mutable borrow occurs here{}", opt_via)); + err.span_label(old_load_end_span, "first borrow ends here"); + } + err } - fn cannot_uniquely_borrow_by_two_closures(&self, span: Span, desc: &str, o: Origin) + fn cannot_uniquely_borrow_by_two_closures(&self, + new_loan_span: Span, + desc: &str, + old_loan_span: Span, + old_load_end_span: Span, + o: Origin) -> DiagnosticBuilder { - struct_span_err!(self, span, E0524, + let mut err = struct_span_err!(self, new_loan_span, E0524, "two closures require unique access to `{}` at the same time{OGN}", - desc, OGN=o) + desc, OGN=o); + err.span_label( + old_loan_span, + "first closure is constructed here"); + err.span_label( + new_loan_span, + "second closure is constructed here"); + err.span_label( + old_load_end_span, + "borrow from first closure ends here"); + err } fn cannot_uniquely_borrow_by_one_closure(&self, - span: Span, + new_loan_span: Span, desc_new: &str, + opt_via: &str, + old_loan_span: Span, noun_old: &str, - msg_old: &str, + old_opt_via: &str, + previous_end_span: Span, o: Origin) -> DiagnosticBuilder { - struct_span_err!(self, span, E0500, + let mut err = struct_span_err!(self, new_loan_span, E0500, "closure requires unique access to `{}` but {} is already borrowed{}{OGN}", - desc_new, noun_old, msg_old, OGN=o) + desc_new, noun_old, old_opt_via, OGN=o); + err.span_label(new_loan_span, + format!("closure construction occurs here{}", opt_via)); + err.span_label(old_loan_span, + format!("borrow occurs here{}", old_opt_via)); + err.span_label(previous_end_span, "borrow ends here"); + err } fn cannot_reborrow_already_uniquely_borrowed(&self, - span: Span, + new_loan_span: Span, desc_new: &str, - msg_new: &str, + opt_via: &str, kind_new: &str, + old_loan_span: Span, + old_opt_via: &str, + previous_end_span: Span, o: Origin) -> DiagnosticBuilder { - struct_span_err!(self, span, E0501, + let mut err = struct_span_err!(self, new_loan_span, E0501, "cannot borrow `{}`{} as {} because previous closure \ requires unique access{OGN}", - desc_new, msg_new, kind_new, OGN=o) + desc_new, opt_via, kind_new, OGN=o); + err.span_label(new_loan_span, + format!("borrow occurs here{}", opt_via)); + err.span_label(old_loan_span, + format!("closure construction occurs here{}", old_opt_via)); + err.span_label(previous_end_span, "borrow from closure ends here"); + err } fn cannot_reborrow_already_borrowed(&self, @@ -129,23 +190,34 @@ pub trait BorrowckErrors { desc_new: &str, msg_new: &str, kind_new: &str, + old_span: Span, noun_old: &str, kind_old: &str, msg_old: &str, + old_load_end_span: Span, o: Origin) -> DiagnosticBuilder { - struct_span_err!(self, span, E0502, + let mut err = struct_span_err!(self, span, E0502, "cannot borrow `{}`{} as {} because {} is also borrowed as {}{}{OGN}", - desc_new, msg_new, kind_new, noun_old, kind_old, msg_old, OGN=o) + desc_new, msg_new, kind_new, noun_old, kind_old, msg_old, OGN=o); + err.span_label(span, format!("{} borrow occurs here{}", kind_new, msg_new)); + err.span_label(old_span, format!("{} borrow occurs here{}", kind_old, msg_old)); + err.span_label(old_load_end_span, format!("{} borrow ends here", kind_old)); + err } - fn cannot_assign_to_borrowed(&self, span: Span, desc: &str, o: Origin) + fn cannot_assign_to_borrowed(&self, span: Span, borrow_span: Span, desc: &str, o: Origin) -> DiagnosticBuilder { - struct_span_err!(self, span, E0506, + let mut err = struct_span_err!(self, span, E0506, "cannot assign to `{}` because it is borrowed{OGN}", - desc, OGN=o) + desc, OGN=o); + + err.span_label(borrow_span, format!("borrow of `{}` occurs here", desc)); + err.span_label(span, format!("assignment to borrowed `{}` occurs here", desc)); + + err } fn cannot_move_into_closure(&self, span: Span, desc: &str, o: Origin) @@ -164,11 +236,63 @@ pub trait BorrowckErrors { desc, OGN=o) } + fn cannot_assign(&self, span: Span, desc: &str, o: Origin) -> DiagnosticBuilder + { + struct_span_err!(self, span, E0594, + "cannot assign to {}{OGN}", + desc, OGN=o) + } + fn cannot_assign_static(&self, span: Span, desc: &str, o: Origin) -> DiagnosticBuilder { - self.struct_span_err(span, &format!("cannot assign to immutable static item {}{OGN}", - desc, OGN=o)) + self.cannot_assign(span, &format!("immutable static item `{}`", desc), o) + } + + fn cannot_move_out_of(&self, move_from_span: Span, move_from_desc: &str, o: Origin) + -> DiagnosticBuilder + { + let mut err = struct_span_err!(self, move_from_span, E0507, + "cannot move out of {}{OGN}", + move_from_desc, OGN=o); + err.span_label( + move_from_span, + format!("cannot move out of {}", move_from_desc)); + err + } + + fn cannot_move_out_of_interior_noncopy(&self, + move_from_span: Span, + ty: ty::Ty, + is_index: bool, + o: Origin) + -> DiagnosticBuilder + { + let type_name = match (&ty.sty, is_index) { + (&ty::TyArray(_, _), true) => "array", + (&ty::TySlice(_), _) => "slice", + _ => span_bug!(move_from_span, "this path should not cause illegal move"), + }; + let mut err = struct_span_err!(self, move_from_span, E0508, + "cannot move out of type `{}`, \ + a non-copy {}{OGN}", + ty, type_name, OGN=o); + err.span_label(move_from_span, "cannot move out of here"); + err + } + + fn cannot_move_out_of_interior_of_drop(&self, + move_from_span: Span, + container_ty: ty::Ty, + o: Origin) + -> DiagnosticBuilder + { + let mut err = struct_span_err!(self, move_from_span, E0509, + "cannot move out of type `{}`, \ + which implements the `Drop` trait{OGN}", + container_ty, OGN=o); + err.span_label(move_from_span, "cannot move out of here"); + err } } diff --git a/src/librustc_mir/util/def_use.rs b/src/librustc_mir/util/def_use.rs index 8263e149d8..bd9fb4bc3c 100644 --- a/src/librustc_mir/util/def_use.rs +++ b/src/librustc_mir/util/def_use.rs @@ -10,7 +10,7 @@ //! Def-use analysis. -use rustc::mir::{Local, Location, Lvalue, Mir}; +use rustc::mir::{Local, Location, Mir}; use rustc::mir::visit::{LvalueContext, MutVisitor, Visitor}; use rustc_data_structures::indexed_vec::IndexVec; use std::marker::PhantomData; @@ -51,7 +51,7 @@ impl<'tcx> DefUseAnalysis<'tcx> { } fn mutate_defs_and_uses(&self, local: Local, mir: &mut Mir<'tcx>, mut callback: F) - where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, + where F: for<'a> FnMut(&'a mut Local, LvalueContext<'tcx>, Location) { for lvalue_use in &self.info[local].defs_and_uses { @@ -65,8 +65,8 @@ impl<'tcx> DefUseAnalysis<'tcx> { pub fn replace_all_defs_and_uses_with(&self, local: Local, mir: &mut Mir<'tcx>, - new_lvalue: Lvalue<'tcx>) { - self.mutate_defs_and_uses(local, mir, |lvalue, _, _| *lvalue = new_lvalue.clone()) + new_local: Local) { + self.mutate_defs_and_uses(local, mir, |local, _, _| *local = new_local) } } @@ -74,30 +74,15 @@ struct DefUseFinder<'tcx> { info: IndexVec>, } -impl<'tcx> DefUseFinder<'tcx> { - fn lvalue_mut_info(&mut self, lvalue: &Lvalue<'tcx>) -> Option<&mut Info<'tcx>> { - let info = &mut self.info; - - if let Lvalue::Local(local) = *lvalue { - Some(&mut info[local]) - } else { - None - } - } -} - impl<'tcx> Visitor<'tcx> for DefUseFinder<'tcx> { - fn visit_lvalue(&mut self, - lvalue: &Lvalue<'tcx>, - context: LvalueContext<'tcx>, - location: Location) { - if let Some(ref mut info) = self.lvalue_mut_info(lvalue) { - info.defs_and_uses.push(Use { - context, - location, - }) - } - self.super_lvalue(lvalue, context, location) + fn visit_local(&mut self, + &local: &Local, + context: LvalueContext<'tcx>, + location: Location) { + self.info[local].defs_and_uses.push(Use { + context, + location, + }); } } @@ -134,7 +119,7 @@ struct MutateUseVisitor<'tcx, F> { impl<'tcx, F> MutateUseVisitor<'tcx, F> { fn new(query: Local, callback: F, _: &Mir<'tcx>) -> MutateUseVisitor<'tcx, F> - where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, LvalueContext<'tcx>, Location) { + where F: for<'a> FnMut(&'a mut Local, LvalueContext<'tcx>, Location) { MutateUseVisitor { query, callback, @@ -144,16 +129,13 @@ impl<'tcx, F> MutateUseVisitor<'tcx, F> { } impl<'tcx, F> MutVisitor<'tcx> for MutateUseVisitor<'tcx, F> - where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, LvalueContext<'tcx>, Location) { - fn visit_lvalue(&mut self, - lvalue: &mut Lvalue<'tcx>, + where F: for<'a> FnMut(&'a mut Local, LvalueContext<'tcx>, Location) { + fn visit_local(&mut self, + local: &mut Local, context: LvalueContext<'tcx>, location: Location) { - if let Lvalue::Local(local) = *lvalue { - if local == self.query { - (self.callback)(lvalue, context, location) - } + if *local == self.query { + (self.callback)(local, context, location) } - self.super_lvalue(lvalue, context, location) } } diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index d8a061f4b1..3b9772079a 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -13,7 +13,7 @@ use rustc::hir; use rustc::mir::*; use rustc::middle::const_val::{ConstInt, ConstVal}; use rustc::middle::lang_items; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::{Kind, Substs}; use rustc::ty::util::IntTypeExt; use rustc_data_structures::indexed_vec::Idx; @@ -84,7 +84,7 @@ pub trait DropElaborator<'a, 'tcx: 'a> : fmt::Debug { fn patch(&mut self) -> &mut MirPatch<'tcx>; fn mir(&self) -> &'a Mir<'tcx>; - fn tcx(&self) -> ty::TyCtxt<'a, 'tcx, 'tcx>; + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx>; fn param_env(&self) -> ty::ParamEnv<'tcx>; fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle; @@ -133,7 +133,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> lvalue.ty(self.elaborator.mir(), self.tcx()).to_ty(self.tcx()) } - fn tcx(&self) -> ty::TyCtxt<'b, 'tcx, 'tcx> { + fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> { self.elaborator.tcx() } @@ -515,7 +515,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> { debug!("destructor_call_block({:?}, {:?})", self, succ); let tcx = self.tcx(); - let drop_trait = tcx.lang_items.drop_trait().unwrap(); + let drop_trait = tcx.lang_items().drop_trait().unwrap(); let drop_fn = tcx.associated_items(drop_trait).next().unwrap(); let ty = self.lvalue_ty(self.lvalue); let substs = tcx.mk_substs(iter::once(Kind::from(ty))); @@ -565,7 +565,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// drop(ptr) fn drop_loop(&mut self, succ: BasicBlock, - cur: &Lvalue<'tcx>, + cur: Local, length_or_end: &Lvalue<'tcx>, ety: Ty<'tcx>, unwind: Unwind, @@ -584,20 +584,20 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let one = self.constant_usize(1); let (ptr_next, cur_next) = if ptr_based { - (Rvalue::Use(use_(cur)), - Rvalue::BinaryOp(BinOp::Offset, use_(cur), one)) + (Rvalue::Use(use_(&Lvalue::Local(cur))), + Rvalue::BinaryOp(BinOp::Offset, use_(&Lvalue::Local(cur)), one)) } else { (Rvalue::Ref( tcx.types.re_erased, BorrowKind::Mut, - self.lvalue.clone().index(use_(cur))), - Rvalue::BinaryOp(BinOp::Add, use_(cur), one)) + self.lvalue.clone().index(cur)), + Rvalue::BinaryOp(BinOp::Add, use_(&Lvalue::Local(cur)), one)) }; let drop_block = BasicBlockData { statements: vec![ self.assign(ptr, ptr_next), - self.assign(cur, cur_next) + self.assign(&Lvalue::Local(cur), cur_next) ], is_cleanup: unwind.is_cleanup(), terminator: Some(Terminator { @@ -611,7 +611,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let loop_block = BasicBlockData { statements: vec![ self.assign(can_go, Rvalue::BinaryOp(BinOp::Eq, - use_(cur), + use_(&Lvalue::Local(cur)), use_(length_or_end))) ], is_cleanup: unwind.is_cleanup(), @@ -678,7 +678,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> tcx.types.usize }; - let cur = Lvalue::Local(self.new_temp(iter_ty)); + let cur = self.new_temp(iter_ty); let length = Lvalue::Local(self.new_temp(tcx.types.usize)); let length_or_end = if ptr_based { Lvalue::Local(self.new_temp(iter_ty)) @@ -688,7 +688,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let unwind = self.unwind.map(|unwind| { self.drop_loop(unwind, - &cur, + cur, &length_or_end, ety, Unwind::InCleanup, @@ -698,12 +698,13 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let succ = self.succ; // FIXME(#6393) let loop_block = self.drop_loop( succ, - &cur, + cur, &length_or_end, ety, unwind, ptr_based); + let cur = Lvalue::Local(cur); let zero = self.constant_usize(0); let mut drop_block_stmts = vec![]; drop_block_stmts.push(self.assign(&length, Rvalue::Len(self.lvalue.clone()))); @@ -752,7 +753,14 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn open_drop<'a>(&mut self) -> BasicBlock { let ty = self.lvalue_ty(self.lvalue); match ty.sty { - ty::TyClosure(def_id, substs) => { + ty::TyClosure(def_id, substs) | + // Note that `elaborate_drops` only drops the upvars of a generator, + // and this is ok because `open_drop` here can only be reached + // within that own generator's resume function. + // This should only happen for the self argument on the resume function. + // It effetively only contains upvars until the generator transformation runs. + // See librustc_mir/transform/generator.rs for more details. + ty::TyGenerator(def_id, substs, _) => { let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect(); self.open_drop_for_tuple(&tys) } @@ -914,7 +922,12 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> Operand::Constant(box Constant { span: self.source_info.span, ty: self.tcx().types.usize, - literal: Literal::Value { value: ConstVal::Integral(self.tcx().const_usize(val)) } + literal: Literal::Value { + value: self.tcx().mk_const(ty::Const { + val: ConstVal::Integral(self.tcx().const_usize(val)), + ty: self.tcx().types.usize + }) + } }) } diff --git a/src/librustc_mir/util/liveness.rs b/src/librustc_mir/util/liveness.rs new file mode 100644 index 0000000000..e6d3a82ff9 --- /dev/null +++ b/src/librustc_mir/util/liveness.rs @@ -0,0 +1,245 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Liveness analysis which computes liveness of MIR local variables at the boundary of basic blocks +//! +//! This analysis considers references as being used only at the point of the +//! borrow. This means that this does not track uses because of references that +//! already exist: +//! +//! ```Rust +//! fn foo() { +//! x = 0; +//! // `x` is live here +//! GLOBAL = &x: *const u32; +//! // but not here, even while it can be accessed through `GLOBAL`. +//! foo(); +//! x = 1; +//! // `x` is live again here, because it is assigned to `OTHER_GLOBAL` +//! OTHER_GLOBAL = &x: *const u32; +//! // ... +//! } +//! ``` +//! +//! This means that users of this analysis still have to check whether +//! pre-existing references can be used to access the value (e.g. at movable +//! generator yield points, all pre-existing references are invalidated, so this +//! doesn't matter). + +use rustc::mir::*; +use rustc::mir::visit::{LvalueContext, Visitor}; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; +use rustc_data_structures::indexed_set::IdxSetBuf; +use util::pretty::{write_basic_block, dump_enabled, write_mir_intro}; +use rustc::mir::transform::MirSource; +use rustc::ty::item_path; +use std::path::{PathBuf, Path}; +use std::fs; +use rustc::ty::TyCtxt; +use std::io::{self, Write}; + +pub type LocalSet = IdxSetBuf; + +#[derive(Eq, PartialEq, Clone)] +struct BlockInfo { + defs: LocalSet, + uses: LocalSet, +} + +struct BlockInfoVisitor { + pre_defs: LocalSet, + defs: LocalSet, + uses: LocalSet, +} + +impl<'tcx> Visitor<'tcx> for BlockInfoVisitor { + fn visit_local(&mut self, + &local: &Local, + context: LvalueContext<'tcx>, + _: Location) { + match context { + LvalueContext::Store | + + // We let Call defined the result in both the success and unwind cases. + // This may not be right. + LvalueContext::Call | + + // Storage live and storage dead aren't proper defines, but we can ignore + // values that come before them. + LvalueContext::StorageLive | + LvalueContext::StorageDead => { + self.defs.add(&local); + } + LvalueContext::Projection(..) | + + // Borrows only consider their local used at the point of the borrow. + // This won't affect the results since we use this analysis for generators + // and we only care about the result at suspension points. Borrows cannot + // cross suspension points so this behavior is unproblematic. + LvalueContext::Borrow { .. } | + + LvalueContext::Inspect | + LvalueContext::Consume | + LvalueContext::Validate | + + // We consider drops to always be uses of locals. + // Drop eloboration should be run before this analysis otherwise + // the results might be too pessimistic. + LvalueContext::Drop => { + // Ignore uses which are already defined in this block + if !self.pre_defs.contains(&local) { + self.uses.add(&local); + } + } + } + } +} + +fn block<'tcx>(b: &BasicBlockData<'tcx>, locals: usize) -> BlockInfo { + let mut visitor = BlockInfoVisitor { + pre_defs: LocalSet::new_empty(locals), + defs: LocalSet::new_empty(locals), + uses: LocalSet::new_empty(locals), + }; + + let dummy_location = Location { block: BasicBlock::new(0), statement_index: 0 }; + + for statement in &b.statements { + visitor.visit_statement(BasicBlock::new(0), statement, dummy_location); + visitor.pre_defs.union(&visitor.defs); + } + visitor.visit_terminator(BasicBlock::new(0), b.terminator(), dummy_location); + + BlockInfo { + defs: visitor.defs, + uses: visitor.uses, + } +} + +// This gives the result of the liveness analysis at the boundary of basic blocks +pub struct LivenessResult { + pub ins: IndexVec, + pub outs: IndexVec, +} + +pub fn liveness_of_locals<'tcx>(mir: &Mir<'tcx>) -> LivenessResult { + let locals = mir.local_decls.len(); + let def_use: IndexVec<_, _> = mir.basic_blocks().iter().map(|b| { + block(b, locals) + }).collect(); + + let copy = |from: &IndexVec, to: &mut IndexVec| { + for (i, set) in to.iter_enumerated_mut() { + set.clone_from(&from[i]); + } + }; + + let mut ins: IndexVec<_, _> = mir.basic_blocks() + .indices() + .map(|_| LocalSet::new_empty(locals)).collect(); + let mut outs = ins.clone(); + + let mut ins_ = ins.clone(); + let mut outs_ = outs.clone(); + + loop { + copy(&ins, &mut ins_); + copy(&outs, &mut outs_); + + for b in mir.basic_blocks().indices().rev() { + // out = ∪ {ins of successors} + outs[b].clear(); + for &successor in mir.basic_blocks()[b].terminator().successors().into_iter() { + outs[b].union(&ins[successor]); + } + + // in = use ∪ (out - def) + ins[b].clone_from(&outs[b]); + ins[b].subtract(&def_use[b].defs); + ins[b].union(&def_use[b].uses); + } + + if ins_ == ins && outs_ == outs { + break; + } + } + + LivenessResult { + ins, + outs, + } +} + +pub fn dump_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + pass_name: &str, + source: MirSource, + mir: &Mir<'tcx>, + result: &LivenessResult) { + if !dump_enabled(tcx, pass_name, source) { + return; + } + let node_path = item_path::with_forced_impl_filename_line(|| { // see notes on #41697 below + tcx.item_path_str(tcx.hir.local_def_id(source.item_id())) + }); + dump_matched_mir_node(tcx, pass_name, &node_path, + source, mir, result); +} + +fn dump_matched_mir_node<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + pass_name: &str, + node_path: &str, + source: MirSource, + mir: &Mir<'tcx>, + result: &LivenessResult) { + let mut file_path = PathBuf::new(); + if let Some(ref file_dir) = tcx.sess.opts.debugging_opts.dump_mir_dir { + let p = Path::new(file_dir); + file_path.push(p); + }; + let file_name = format!("rustc.node{}{}-liveness.mir", + source.item_id(), pass_name); + file_path.push(&file_name); + let _ = fs::File::create(&file_path).and_then(|mut file| { + writeln!(file, "// MIR local liveness analysis for `{}`", node_path)?; + writeln!(file, "// source = {:?}", source)?; + writeln!(file, "// pass_name = {}", pass_name)?; + writeln!(file, "")?; + write_mir_fn(tcx, source, mir, &mut file, result)?; + Ok(()) + }); +} + +pub fn write_mir_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, + mir: &Mir<'tcx>, + w: &mut Write, + result: &LivenessResult) + -> io::Result<()> { + write_mir_intro(tcx, src, mir, w)?; + for block in mir.basic_blocks().indices() { + let print = |w: &mut Write, prefix, result: &IndexVec| { + let live: Vec = mir.local_decls.indices() + .filter(|i| result[block].contains(i)) + .map(|i| format!("{:?}", i)) + .collect(); + writeln!(w, "{} {{{}}}", prefix, live.join(", ")) + }; + print(w, " ", &result.ins)?; + write_basic_block(tcx, block, mir, w)?; + print(w, " ", &result.outs)?; + if block.index() + 1 != mir.basic_blocks().len() { + writeln!(w, "")?; + } + } + + writeln!(w, "}}")?; + Ok(()) +} + diff --git a/src/librustc_mir/util/mod.rs b/src/librustc_mir/util/mod.rs index f0d837e136..4b6da96824 100644 --- a/src/librustc_mir/util/mod.rs +++ b/src/librustc_mir/util/mod.rs @@ -15,6 +15,7 @@ pub mod patch; mod graphviz; mod pretty; +pub mod liveness; pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty}; pub use self::graphviz::{write_mir_graphviz}; diff --git a/src/librustc_mir/util/patch.rs b/src/librustc_mir/util/patch.rs index 1af0b6c67f..66607a9e09 100644 --- a/src/librustc_mir/util/patch.rs +++ b/src/librustc_mir/util/patch.rs @@ -101,6 +101,13 @@ impl<'tcx> MirPatch<'tcx> { Local::new(index as usize) } + pub fn new_internal(&mut self, ty: Ty<'tcx>, span: Span) -> Local { + let index = self.next_local; + self.next_local += 1; + self.new_locals.push(LocalDecl::new_internal(ty, span)); + Local::new(index as usize) + } + pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock { let block = BasicBlock::new(self.patch_map.len()); debug!("MirPatch: new_block: {:?}: {:?}", block, data); diff --git a/src/librustc_mir/util/pretty.rs b/src/librustc_mir/util/pretty.rs index 22a8c4378d..9e1f05f6d2 100644 --- a/src/librustc_mir/util/pretty.rs +++ b/src/librustc_mir/util/pretty.rs @@ -94,6 +94,7 @@ fn dump_matched_mir_node<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>) { let promotion_id = match source { MirSource::Promoted(_, id) => format!("-{:?}", id), + MirSource::GeneratorDrop(_) => format!("-drop"), _ => String::new() }; @@ -120,6 +121,9 @@ fn dump_matched_mir_node<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, writeln!(file, "// source = {:?}", source)?; writeln!(file, "// pass_name = {}", pass_name)?; writeln!(file, "// disambiguator = {}", disambiguator)?; + if let Some(ref layout) = mir.generator_layout { + writeln!(file, "// generator_layout = {:?}", layout)?; + } writeln!(file, "")?; write_mir_fn(tcx, source, mir, &mut file)?; Ok(()) @@ -176,7 +180,7 @@ pub fn write_mir_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } /// Write out a human-readable textual representation for the given basic block. -fn write_basic_block(tcx: TyCtxt, +pub fn write_basic_block(tcx: TyCtxt, block: BasicBlock, mir: &Mir, w: &mut Write) @@ -184,7 +188,9 @@ fn write_basic_block(tcx: TyCtxt, let data = &mir[block]; // Basic block label at the top. - writeln!(w, "{}{:?}: {{", INDENT, block)?; + let cleanup_text = if data.is_cleanup { " // cleanup" } else { "" }; + let lbl = format!("{}{:?}: {{", INDENT, block); + writeln!(w, "{0:1$}{2}", lbl, ALIGN, cleanup_text)?; // List of statements in the middle. let mut current_location = Location { block: block, statement_index: 0 }; @@ -274,7 +280,7 @@ fn write_scope_tree(tcx: TyCtxt, /// Write out a human-readable textual representation of the MIR's `fn` type and the types of its /// local variables (both user-defined bindings and compiler temporaries). -fn write_mir_intro<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub fn write_mir_intro<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &Mir, w: &mut Write) @@ -322,28 +328,34 @@ fn write_mir_sig(tcx: TyCtxt, src: MirSource, mir: &Mir, w: &mut Write) MirSource::Const(_) => write!(w, "const")?, MirSource::Static(_, hir::MutImmutable) => write!(w, "static")?, MirSource::Static(_, hir::MutMutable) => write!(w, "static mut")?, - MirSource::Promoted(_, i) => write!(w, "{:?} in", i)? + MirSource::Promoted(_, i) => write!(w, "{:?} in", i)?, + MirSource::GeneratorDrop(_) => write!(w, "drop_glue")?, } item_path::with_forced_impl_filename_line(|| { // see notes on #41697 elsewhere write!(w, " {}", tcx.node_path_str(src.item_id())) })?; - if let MirSource::Fn(_) = src { - write!(w, "(")?; - - // fn argument types. - for (i, arg) in mir.args_iter().enumerate() { - if i != 0 { - write!(w, ", ")?; + match src { + MirSource::Fn(_) | MirSource::GeneratorDrop(_) => { + write!(w, "(")?; + + // fn argument types. + for (i, arg) in mir.args_iter().enumerate() { + if i != 0 { + write!(w, ", ")?; + } + write!(w, "{:?}: {}", Lvalue::Local(arg), mir.local_decls[arg].ty)?; } - write!(w, "{:?}: {}", Lvalue::Local(arg), mir.local_decls[arg].ty)?; - } - write!(w, ") -> {}", mir.return_ty) - } else { - assert_eq!(mir.arg_count, 0); - write!(w, ": {} =", mir.return_ty) + write!(w, ") -> {}", mir.return_ty) + } + MirSource::Const(..) | + MirSource::Static(..) | + MirSource::Promoted(..) => { + assert_eq!(mir.arg_count, 0); + write!(w, ": {} =", mir.return_ty) + } } } diff --git a/src/librustc_passes/ast_validation.rs b/src/librustc_passes/ast_validation.rs index ffe444933a..efb5b03180 100644 --- a/src/librustc_passes/ast_validation.rs +++ b/src/librustc_passes/ast_validation.rs @@ -122,14 +122,6 @@ impl<'a> AstValidator<'a> { } impl<'a> Visitor<'a> for AstValidator<'a> { - fn visit_lifetime(&mut self, lt: &'a Lifetime) { - if lt.ident.name == "'_" { - self.err_handler().span_err(lt.span, &format!("invalid lifetime name `{}`", lt.ident)); - } - - visit::walk_lifetime(self, lt) - } - fn visit_expr(&mut self, expr: &'a Expr) { match expr.node { ExprKind::While(.., Some(ident)) | diff --git a/src/librustc_passes/consts.rs b/src/librustc_passes/consts.rs index 763f885b4d..547d63fc3d 100644 --- a/src/librustc_passes/consts.rs +++ b/src/librustc_passes/consts.rs @@ -56,6 +56,7 @@ use std::cmp::Ordering; struct CheckCrateVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, in_fn: bool, + in_static: bool, promotable: bool, mut_rvalue_borrows: NodeSet, param_env: ty::ParamEnv<'tcx>, @@ -87,19 +88,14 @@ impl<'a, 'gcx> CheckCrateVisitor<'a, 'gcx> { } } - // Adds the worst effect out of all the values of one type. - fn add_type(&mut self, ty: Ty<'gcx>) { - if !ty.is_freeze(self.tcx, self.param_env, DUMMY_SP) { - self.promotable = false; - } - - if ty.needs_drop(self.tcx, self.param_env) { - self.promotable = false; - } + // Returns true iff all the values of the type are promotable. + fn type_has_only_promotable_values(&mut self, ty: Ty<'gcx>) -> bool { + ty.is_freeze(self.tcx, self.param_env, DUMMY_SP) && + !ty.needs_drop(self.tcx, self.param_env) } fn handle_const_fn_call(&mut self, def_id: DefId, ret_ty: Ty<'gcx>) { - self.add_type(ret_ty); + self.promotable &= self.type_has_only_promotable_values(ret_ty); self.promotable &= if let Some(fn_id) = self.tcx.hir.as_local_node_id(def_id) { FnLikeNode::from_node(self.tcx.hir.get(fn_id)).map_or(false, |fn_like| { @@ -133,10 +129,16 @@ impl<'a, 'tcx> Visitor<'tcx> for CheckCrateVisitor<'a, 'tcx> { let outer_param_env = self.param_env; let outer_identity_substs = self.identity_substs; - self.in_fn = match MirSource::from_node(self.tcx, item_id) { - MirSource::Fn(_) => true, - _ => false + self.in_fn = false; + self.in_static = false; + + match MirSource::from_node(self.tcx, item_id) { + MirSource::Fn(_) => self.in_fn = true, + MirSource::Static(_, _) => self.in_static = true, + _ => {} }; + + self.tables = self.tcx.typeck_tables_of(item_def_id); self.param_env = self.tcx.param_env(item_def_id); self.identity_substs = Substs::identity_for_item(self.tcx, item_def_id); @@ -148,8 +150,8 @@ impl<'a, 'tcx> Visitor<'tcx> for CheckCrateVisitor<'a, 'tcx> { let tcx = self.tcx; let param_env = self.param_env; - let region_maps = self.tcx.region_maps(item_def_id); - euv::ExprUseVisitor::new(self, tcx, param_env, ®ion_maps, self.tables) + let region_scope_tree = self.tcx.region_scope_tree(item_def_id); + euv::ExprUseVisitor::new(self, tcx, param_env, ®ion_scope_tree, self.tables) .consume_body(body); self.visit_body(body); @@ -332,21 +334,61 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Expr, node let def = v.tables.qpath_def(qpath, e.hir_id); match def { Def::VariantCtor(..) | Def::StructCtor(..) | - Def::Fn(..) | Def::Method(..) => {} - Def::AssociatedConst(_) => v.add_type(node_ty), - Def::Const(did) => { - v.promotable &= if let Some(node_id) = v.tcx.hir.as_local_node_id(did) { - match v.tcx.hir.expect_item(node_id).node { - hir::ItemConst(_, body) => { + Def::Fn(..) | Def::Method(..) => {} + + // References to a static that are themselves within a static + // are inherently promotable with the exception + // of "#[thread_loca]" statics, which may not + // outlive the current function + Def::Static(did, _) => { + + if v.in_static { + let mut thread_local = false; + + for attr in &v.tcx.get_attrs(did)[..] { + if attr.check_name("thread_local") { + debug!("Reference to Static(id={:?}) is unpromotable \ + due to a #[thread_local] attribute", did); + v.promotable = false; + thread_local = true; + break; + } + } + + if !thread_local { + debug!("Allowing promotion of reference to Static(id={:?})", did); + } + } else { + debug!("Reference to Static(id={:?}) is unpromotable as it is not \ + referenced from a static", did); + v.promotable = false; + + } + } + + Def::Const(did) | + Def::AssociatedConst(did) => { + let promotable = if v.tcx.trait_of_item(did).is_some() { + // Don't peek inside trait associated constants. + false + } else if let Some(node_id) = v.tcx.hir.as_local_node_id(did) { + match v.tcx.hir.maybe_body_owned_by(node_id) { + Some(body) => { v.visit_nested_body(body); v.tcx.rvalue_promotable_to_static.borrow()[&body.node_id] } - _ => false + None => false } } else { v.tcx.const_is_rvalue_promotable_to_static(did) }; + + // Just in case the type is more specific than the definition, + // e.g. impl associated const with type parameters, check it. + // Also, trait associated consts are relaxed by this. + v.promotable &= promotable || v.type_has_only_promotable_values(node_ty); } + _ => { v.promotable = false; } @@ -396,7 +438,7 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Expr, node hir::ExprStruct(..) => { if let ty::TyAdt(adt, ..) = v.tables.expr_ty(e).sty { // unsafe_cell_type doesn't necessarily exist with no_core - if Some(adt.did) == v.tcx.lang_items.unsafe_cell_type() { + if Some(adt.did) == v.tcx.lang_items().unsafe_cell_type() { v.promotable = false; } } @@ -435,6 +477,9 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Expr, node hir::ExprAgain(_) | hir::ExprRet(_) | + // Generator expressions + hir::ExprYield(_) | + // Expressions with side-effects. hir::ExprAssign(..) | hir::ExprAssignOp(..) | @@ -473,6 +518,7 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { tcx, tables: &ty::TypeckTables::empty(None), in_fn: false, + in_static: false, promotable: false, mut_rvalue_borrows: NodeSet(), param_env: ty::ParamEnv::empty(Reveal::UserFacing), diff --git a/src/librustc_passes/loops.rs b/src/librustc_passes/loops.rs index 1b2a07cd1a..c23f28fe22 100644 --- a/src/librustc_passes/loops.rs +++ b/src/librustc_passes/loops.rs @@ -81,7 +81,7 @@ impl<'a, 'hir> Visitor<'hir> for CheckLoopVisitor<'a, 'hir> { hir::ExprLoop(ref b, _, source) => { self.with_context(Loop(LoopKind::Loop(source)), |v| v.visit_block(&b)); } - hir::ExprClosure(.., b, _) => { + hir::ExprClosure(.., b, _, _) => { self.with_context(Closure, |v| v.visit_nested_body(b)); } hir::ExprBreak(label, ref opt_expr) => { diff --git a/src/librustc_passes/mir_stats.rs b/src/librustc_passes/mir_stats.rs index 9975082c55..1fa4961458 100644 --- a/src/librustc_passes/mir_stats.rs +++ b/src/librustc_passes/mir_stats.rs @@ -13,7 +13,6 @@ // completely accurate (some things might be counted twice, others missed). use rustc_const_math::{ConstUsize}; -use rustc::middle::const_val::{ConstVal}; use rustc::mir::{AggregateKind, AssertMessage, BasicBlock, BasicBlockData}; use rustc::mir::{Constant, Literal, Location, LocalDecl}; use rustc::mir::{Lvalue, LvalueElem, LvalueProjection}; @@ -21,7 +20,7 @@ use rustc::mir::{Mir, Operand, ProjectionElem}; use rustc::mir::{Rvalue, SourceInfo, Statement, StatementKind}; use rustc::mir::{Terminator, TerminatorKind, VisibilityScope, VisibilityScopeData}; use rustc::mir::visit as mir_visit; -use rustc::ty::{ClosureSubsts, TyCtxt}; +use rustc::ty::{self, ClosureSubsts, TyCtxt}; use rustc::util::nodemap::{FxHashMap}; struct NodeData { @@ -120,6 +119,8 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { TerminatorKind::DropAndReplace { .. } => "TerminatorKind::DropAndReplace", TerminatorKind::Call { .. } => "TerminatorKind::Call", TerminatorKind::Assert { .. } => "TerminatorKind::Assert", + TerminatorKind::GeneratorDrop => "TerminatorKind::GeneratorDrop", + TerminatorKind::Yield { .. } => "TerminatorKind::Yield", }, kind); self.super_terminator_kind(block, kind, location); } @@ -131,6 +132,12 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { self.record(match *msg { AssertMessage::BoundsCheck { .. } => "AssertMessage::BoundsCheck", AssertMessage::Math(..) => "AssertMessage::Math", + AssertMessage::GeneratorResumedAfterReturn => { + "AssertMessage::GeneratorResumedAfterReturn" + } + AssertMessage::GeneratorResumedAfterPanic => { + "AssertMessage::GeneratorResumedAfterPanic" + } }, msg); self.super_assert_message(msg, location); } @@ -158,6 +165,7 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { AggregateKind::Tuple => "AggregateKind::Tuple", AggregateKind::Adt(..) => "AggregateKind::Adt", AggregateKind::Closure(..) => "AggregateKind::Closure", + AggregateKind::Generator(..) => "AggregateKind::Generator", }, kind); "Rvalue::Aggregate" @@ -227,7 +235,6 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { location: Location) { self.record("Literal", literal); self.record(match *literal { - Literal::Item { .. } => "Literal::Item", Literal::Value { .. } => "Literal::Value", Literal::Promoted { .. } => "Literal::Promoted", }, literal); @@ -247,11 +254,11 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { self.super_closure_substs(substs); } - fn visit_const_val(&mut self, - const_val: &ConstVal, - _: Location) { - self.record("ConstVal", const_val); - self.super_const_val(const_val); + fn visit_const(&mut self, + constant: &&'tcx ty::Const<'tcx>, + _: Location) { + self.record("Const", constant); + self.super_const(constant); } fn visit_const_usize(&mut self, diff --git a/src/librustc_platform_intrinsics/powerpc.rs b/src/librustc_platform_intrinsics/powerpc.rs index 1a2e8e9c5d..93ee9fe06d 100644 --- a/src/librustc_platform_intrinsics/powerpc.rs +++ b/src/librustc_platform_intrinsics/powerpc.rs @@ -337,6 +337,116 @@ pub fn find(name: &str) -> Option { output: &::I32x4, definition: Named("llvm.ppc.altivec.vupkhsh") }, + "_vec_madds" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vmhaddshs") + }, + "_vec_msumubm" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.ppc.altivec.vmsumubm") + }, + "_vec_msumuhm" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.ppc.altivec.vmsumuhm") + }, + "_vec_msummbm" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::U8x16, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vmsummbm") + }, + "_vec_msumshm" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vmsumshm") + }, + "_vec_msumshs" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vmsumshs") + }, + "_vec_msumuhs" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.ppc.altivec.vmsumuhs") + }, + "_vec_sum2s" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vsum2sws") + }, + "_vec_sum4sbs" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vsum4sbs") + }, + "_vec_sum4ubs" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.ppc.altivec.vsum4ubs") + }, + "_vec_sum4shs" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vsum4shs") + }, + "_vec_sums" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vsumsws") + }, + "_vec_madd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vmaddfp") + }, + "_vec_nmsub" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vnmsubfp") + }, + "_vec_expte" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vexptefp") + }, + "_vec_floor" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vrfim") + }, + "_vec_ceil" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vrfip") + }, + "_vec_round" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vrfin") + }, + "_vec_trunc" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vrfiz") + }, + "_vec_loge" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vlogefp") + }, + "_vec_re" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vrefp") + }, + "_vec_rsqrte" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.ppc.altivec.vrsqrtefp") + }, _ => return None, }) } diff --git a/src/librustc_privacy/Cargo.toml b/src/librustc_privacy/Cargo.toml index 439fa661e0..c65312e9a8 100644 --- a/src/librustc_privacy/Cargo.toml +++ b/src/librustc_privacy/Cargo.toml @@ -10,5 +10,6 @@ crate-type = ["dylib"] [dependencies] rustc = { path = "../librustc" } +rustc_typeck = { path = "../librustc_typeck" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_privacy/lib.rs b/src/librustc_privacy/lib.rs index 772b16bbec..e7a1dd6b04 100644 --- a/src/librustc_privacy/lib.rs +++ b/src/librustc_privacy/lib.rs @@ -17,6 +17,7 @@ #[macro_use] extern crate rustc; #[macro_use] extern crate syntax; +extern crate rustc_typeck; extern crate syntax_pos; use rustc::hir::{self, PatKind}; @@ -325,8 +326,9 @@ impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { // This code is here instead of in visit_item so that the // crate module gets processed as well. if self.prev_level.is_some() { - if let Some(exports) = self.tcx.export_map.get(&id) { - for export in exports { + let def_id = self.tcx.hir.local_def_id(id); + if let Some(exports) = self.tcx.module_exports(def_id) { + for export in exports.iter() { if let Some(node_id) = self.tcx.hir.as_local_node_id(export.def.def_id()) { self.update(node_id, Some(AccessLevel::Exported)); } @@ -446,6 +448,7 @@ impl<'b, 'a, 'tcx> TypeVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'b ty::TyProjection(ref proj) => Some(proj.item_def_id), ty::TyFnDef(def_id, ..) | ty::TyClosure(def_id, ..) | + ty::TyGenerator(def_id, ..) | ty::TyAnon(def_id, _) => Some(def_id), _ => None }; @@ -477,7 +480,7 @@ struct NamePrivacyVisitor<'a, 'tcx: 'a> { impl<'a, 'tcx> NamePrivacyVisitor<'a, 'tcx> { // Checks that a field is accessible. fn check_field(&mut self, span: Span, def: &'tcx ty::AdtDef, field: &'tcx ty::FieldDef) { - let ident = Ident { ctxt: span.ctxt.modern(), ..keywords::Invalid.ident() }; + let ident = Ident { ctxt: span.ctxt().modern(), ..keywords::Invalid.ident() }; let def_id = self.tcx.adjust_ident(ident, def.did, self.current_item).1; if !def.is_enum() && !field.vis.is_accessible_from(def_id, self.tcx) { struct_span_err!(self.tcx.sess, span, E0451, "field `{}` of {} `{}` is private", @@ -628,7 +631,7 @@ impl<'a, 'tcx> TypePrivacyVisitor<'a, 'tcx> { }; ty::Visibility::from_hir(vis, node_id, self.tcx) } - None => self.tcx.sess.cstore.visibility(did), + None => self.tcx.visibility(did), } } @@ -656,65 +659,6 @@ impl<'a, 'tcx> TypePrivacyVisitor<'a, 'tcx> { } false } - - fn check_item(&mut self, item_id: ast::NodeId) -> &mut Self { - self.current_item = self.tcx.hir.local_def_id(item_id); - self.span = self.tcx.hir.span(item_id); - self - } - - // Convenience methods for checking item interfaces - fn ty(&mut self) -> &mut Self { - self.tcx.type_of(self.current_item).visit_with(self); - self - } - - fn generics(&mut self) -> &mut Self { - for def in &self.tcx.generics_of(self.current_item).types { - if def.has_default { - self.tcx.type_of(def.def_id).visit_with(self); - } - } - self - } - - fn predicates(&mut self) -> &mut Self { - let predicates = self.tcx.predicates_of(self.current_item); - for predicate in &predicates.predicates { - predicate.visit_with(self); - match predicate { - &ty::Predicate::Trait(poly_predicate) => { - self.check_trait_ref(poly_predicate.skip_binder().trait_ref); - }, - &ty::Predicate::Projection(poly_predicate) => { - let tcx = self.tcx; - self.check_trait_ref( - poly_predicate.skip_binder().projection_ty.trait_ref(tcx) - ); - }, - _ => (), - }; - } - self - } - - fn impl_trait_ref(&mut self) -> &mut Self { - if let Some(impl_trait_ref) = self.tcx.impl_trait_ref(self.current_item) { - self.check_trait_ref(impl_trait_ref); - } - self.tcx.predicates_of(self.current_item).visit_with(self); - self - } - - fn check_trait_ref(&mut self, trait_ref: ty::TraitRef<'tcx>) -> bool { - if !self.item_is_accessible(trait_ref.def_id) { - let msg = format!("trait `{}` is private", trait_ref); - self.tcx.sess.span_err(self.span, &msg); - return true; - } - - trait_ref.super_visit_with(self) - } } impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { @@ -731,6 +675,35 @@ impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { self.tables = orig_tables; } + fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty) { + self.span = hir_ty.span; + if let Some(ty) = self.tables.node_id_to_type_opt(hir_ty.hir_id) { + // Types in bodies. + if ty.visit_with(self) { + return; + } + } else { + // Types in signatures. + // FIXME: This is very ineffective. Ideally each HIR type should be converted + // into a semantic type only once and the result should be cached somehow. + if rustc_typeck::hir_ty_to_ty(self.tcx, hir_ty).visit_with(self) { + return; + } + } + + intravisit::walk_ty(self, hir_ty); + } + + fn visit_trait_ref(&mut self, trait_ref: &'tcx hir::TraitRef) { + if !self.item_is_accessible(trait_ref.path.def.def_id()) { + let msg = format!("trait `{:?}` is private", trait_ref.path); + self.tcx.sess.span_err(self.span, &msg); + return; + } + + intravisit::walk_trait_ref(self, trait_ref); + } + // Check types of expressions fn visit_expr(&mut self, expr: &'tcx hir::Expr) { if self.check_expr_pat_type(expr.hir_id, expr.span) { @@ -805,63 +778,6 @@ impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { item.id, &mut self.tables, self.empty_tables); - - match item.node { - hir::ItemExternCrate(..) | hir::ItemMod(..) | - hir::ItemUse(..) | hir::ItemGlobalAsm(..) => {} - hir::ItemConst(..) | hir::ItemStatic(..) | - hir::ItemTy(..) | hir::ItemFn(..) => { - self.check_item(item.id).generics().predicates().ty(); - } - hir::ItemTrait(.., ref trait_item_refs) => { - self.check_item(item.id).generics().predicates(); - for trait_item_ref in trait_item_refs { - let check = self.check_item(trait_item_ref.id.node_id); - check.generics().predicates(); - if trait_item_ref.kind != hir::AssociatedItemKind::Type || - trait_item_ref.defaultness.has_value() { - check.ty(); - } - } - } - hir::ItemEnum(ref def, _) => { - self.check_item(item.id).generics().predicates(); - for variant in &def.variants { - for field in variant.node.data.fields() { - self.check_item(field.id).ty(); - } - } - } - hir::ItemForeignMod(ref foreign_mod) => { - for foreign_item in &foreign_mod.items { - self.check_item(foreign_item.id).generics().predicates().ty(); - } - } - hir::ItemStruct(ref struct_def, _) | - hir::ItemUnion(ref struct_def, _) => { - self.check_item(item.id).generics().predicates(); - for field in struct_def.fields() { - self.check_item(field.id).ty(); - } - } - hir::ItemDefaultImpl(..) => { - self.check_item(item.id).impl_trait_ref(); - } - hir::ItemImpl(.., ref trait_ref, _, ref impl_item_refs) => { - { - let check = self.check_item(item.id); - check.ty().generics().predicates(); - if trait_ref.is_some() { - check.impl_trait_ref(); - } - } - for impl_item_ref in impl_item_refs { - let impl_item = self.tcx.hir.impl_item(impl_item_ref.id); - self.check_item(impl_item.id).generics().predicates().ty(); - } - } - } - self.current_item = self.tcx.hir.local_def_id(item.id); intravisit::walk_item(self, item); self.tables = orig_tables; @@ -922,8 +838,13 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { } } ty::TyProjection(ref proj) => { - let tcx = self.tcx; - if self.check_trait_ref(proj.trait_ref(tcx)) { + let trait_ref = proj.trait_ref(self.tcx); + if !self.item_is_accessible(trait_ref.def_id) { + let msg = format!("trait `{}` is private", trait_ref); + self.tcx.sess.span_err(self.span, &msg); + return true; + } + if trait_ref.super_visit_with(self) { return true; } } diff --git a/src/librustc_resolve/build_reduced_graph.rs b/src/librustc_resolve/build_reduced_graph.rs index 47fa5357ab..a4d1ae1621 100644 --- a/src/librustc_resolve/build_reduced_graph.rs +++ b/src/librustc_resolve/build_reduced_graph.rs @@ -42,6 +42,7 @@ use syntax::ext::hygiene::Mark; use syntax::ext::tt::macro_rules; use syntax::parse::token; use syntax::symbol::keywords; +use syntax::symbol::Symbol; use syntax::visit::{self, Visitor}; use syntax_pos::{Span, DUMMY_SP}; @@ -253,7 +254,7 @@ impl<'a> Resolver<'a> { self.crate_loader.process_item(item, &self.definitions); // n.b. we don't need to look at the path option here, because cstore already did - let crate_id = self.session.cstore.extern_mod_stmt_cnum(item.id).unwrap(); + let crate_id = self.cstore.extern_mod_stmt_cnum_untracked(item.id).unwrap(); let module = self.get_module(DefId { krate: crate_id, index: CRATE_DEF_INDEX }); self.populate_module_if_necessary(module); @@ -449,7 +450,7 @@ impl<'a> Resolver<'a> { let ident = child.ident; let def = child.def; let def_id = def.def_id(); - let vis = self.session.cstore.visibility(def_id); + let vis = self.cstore.visibility_untracked(def_id); let span = child.span; let expansion = Mark::root(); // FIXME(jseyfried) intercrate hygiene match def { @@ -471,7 +472,7 @@ impl<'a> Resolver<'a> { self.define(parent, ident, ValueNS, (def, vis, DUMMY_SP, expansion)); if let Some(struct_def_id) = - self.session.cstore.def_key(def_id).parent + self.cstore.def_key(def_id).parent .map(|index| DefId { krate: def_id.krate, index: index }) { self.struct_constructors.insert(struct_def_id, (def, vis)); } @@ -485,12 +486,12 @@ impl<'a> Resolver<'a> { span); self.define(parent, ident, TypeNS, (module, vis, DUMMY_SP, expansion)); - for child in self.session.cstore.item_children(def_id, self.session) { + for child in self.cstore.item_children_untracked(def_id, self.session) { let ns = if let Def::AssociatedTy(..) = child.def { TypeNS } else { ValueNS }; self.define(module, child.ident, ns, (child.def, ty::Visibility::Public, DUMMY_SP, expansion)); - if self.session.cstore.associated_item_cloned(child.def.def_id()) + if self.cstore.associated_item_cloned_untracked(child.def.def_id()) .method_has_self_argument { self.has_self.insert(child.def.def_id()); } @@ -501,7 +502,7 @@ impl<'a> Resolver<'a> { self.define(parent, ident, TypeNS, (def, vis, DUMMY_SP, expansion)); // Record field names for error reporting. - let field_names = self.session.cstore.struct_field_names(def_id); + let field_names = self.cstore.struct_field_names_untracked(def_id); self.insert_field_names(def_id, field_names); } Def::Macro(..) => { @@ -516,20 +517,20 @@ impl<'a> Resolver<'a> { return self.module_map[&def_id] } - let macros_only = self.session.cstore.dep_kind(def_id.krate).macros_only(); + let macros_only = self.cstore.dep_kind_untracked(def_id.krate).macros_only(); if let Some(&module) = self.extern_module_map.get(&(def_id, macros_only)) { return module; } let (name, parent) = if def_id.index == CRATE_DEF_INDEX { - (self.session.cstore.crate_name(def_id.krate), None) + (self.cstore.crate_name_untracked(def_id.krate).as_str(), None) } else { - let def_key = self.session.cstore.def_key(def_id); + let def_key = self.cstore.def_key(def_id); (def_key.disambiguated_data.data.get_opt_name().unwrap(), Some(self.get_module(DefId { index: def_key.parent.unwrap(), ..def_id }))) }; - let kind = ModuleKind::Def(Def::Mod(def_id), name); + let kind = ModuleKind::Def(Def::Mod(def_id), Symbol::intern(&name)); let module = self.arenas.alloc_module(ModuleData::new(parent, kind, def_id, Mark::root(), DUMMY_SP)); self.extern_module_map.insert((def_id, macros_only), module); @@ -558,7 +559,7 @@ impl<'a> Resolver<'a> { return ext.clone(); } - let macro_def = match self.session.cstore.load_macro(def_id, &self.session) { + let macro_def = match self.cstore.load_macro_untracked(def_id, &self.session) { LoadedMacro::MacroDef(macro_def) => macro_def, LoadedMacro::ProcMacro(ext) => return ext, }; @@ -574,7 +575,8 @@ impl<'a> Resolver<'a> { /// is built, building it if it is not. pub fn populate_module_if_necessary(&mut self, module: Module<'a>) { if module.populated.get() { return } - for child in self.session.cstore.item_children(module.def_id().unwrap(), self.session) { + let def_id = module.def_id().unwrap(); + for child in self.cstore.item_children_untracked(def_id, self.session) { self.build_reduced_graph_for_external_crate_def(module, child); } module.populated.set(true) @@ -605,7 +607,8 @@ impl<'a> Resolver<'a> { span_err!(self.session, item.span, E0468, "an `extern crate` loading macros must be at the crate root"); } else if !self.use_extern_macros && !used && - self.session.cstore.dep_kind(module.def_id().unwrap().krate).macros_only() { + self.cstore.dep_kind_untracked(module.def_id().unwrap().krate) + .macros_only() { let msg = "proc macro crates and `#[no_link]` crates have no effect without \ `#[macro_use]`"; self.session.span_warn(item.span, msg); @@ -648,7 +651,7 @@ impl<'a> Resolver<'a> { } } for (name, span) in legacy_imports.reexports { - self.session.cstore.export_macros(module.def_id().unwrap().krate); + self.cstore.export_macros_untracked(module.def_id().unwrap().krate); let ident = Ident::with_empty_ctxt(name); let result = self.resolve_ident_in_module(module, ident, MacroNS, false, false, span); if let Ok(binding) = result { diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index ab3950fb6f..36cd69f91b 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -31,7 +31,7 @@ use self::RibKind::*; use rustc::hir::map::{Definitions, DefCollector}; use rustc::hir::{self, PrimTy, TyBool, TyChar, TyFloat, TyInt, TyUint, TyStr}; -use rustc::middle::cstore::CrateLoader; +use rustc::middle::cstore::{CrateStore, CrateLoader}; use rustc::session::Session; use rustc::lint; use rustc::hir::def::*; @@ -605,10 +605,8 @@ impl<'tcx> Visitor<'tcx> for UsePlacementFinder { ItemKind::Use(..) => { // don't suggest placing a use before the prelude // import or other generated ones - if item.span.ctxt.outer().expn_info().is_none() { - let mut span = item.span; - span.hi = span.lo; - self.span = Some(span); + if item.span.ctxt().outer().expn_info().is_none() { + self.span = Some(item.span.with_hi(item.span.lo())); self.found_use = true; return; } @@ -617,19 +615,15 @@ impl<'tcx> Visitor<'tcx> for UsePlacementFinder { ItemKind::ExternCrate(_) => {} // but place them before the first other item _ => if self.span.map_or(true, |span| item.span < span ) { - if item.span.ctxt.outer().expn_info().is_none() { + if item.span.ctxt().outer().expn_info().is_none() { // don't insert between attributes and an item if item.attrs.is_empty() { - let mut span = item.span; - span.hi = span.lo; - self.span = Some(span); + self.span = Some(item.span.with_hi(item.span.lo())); } else { // find the first attribute on the item for attr in &item.attrs { if self.span.map_or(true, |span| attr.span < span) { - let mut span = attr.span; - span.hi = span.lo; - self.span = Some(span); + self.span = Some(attr.span.with_hi(attr.span.lo())); } } } @@ -1193,6 +1187,7 @@ impl PrimitiveTypeTable { /// The main resolver class. pub struct Resolver<'a> { session: &'a Session, + cstore: &'a CrateStore, pub definitions: Definitions, @@ -1360,7 +1355,7 @@ impl<'a, 'b: 'a> ty::DefIdTree for &'a Resolver<'b> { fn parent(self, id: DefId) -> Option { match id.krate { LOCAL_CRATE => self.definitions.def_key(id.index).parent, - _ => self.session.cstore.def_key(id).parent, + _ => self.cstore.def_key(id).parent, }.map(|index| DefId { index: index, ..id }) } } @@ -1400,6 +1395,7 @@ impl<'a> hir::lowering::Resolver for Resolver<'a> { impl<'a> Resolver<'a> { pub fn new(session: &'a Session, + cstore: &'a CrateStore, krate: &Crate, crate_name: &str, make_glob_map: MakeGlobMap, @@ -1431,6 +1427,8 @@ impl<'a> Resolver<'a> { Resolver { session, + cstore, + definitions, // The outermost module has def ID 0; this is not reflected in the @@ -1460,7 +1458,7 @@ impl<'a> Resolver<'a> { def_map: NodeMap(), freevars: NodeMap(), freevars_seen: NodeMap(), - export_map: NodeMap(), + export_map: FxHashMap(), trait_map: NodeMap(), module_map, block_map: NodeMap(), @@ -1745,7 +1743,7 @@ impl<'a> Resolver<'a> { fn resolve_self(&mut self, ctxt: &mut SyntaxContext, module: Module<'a>) -> Module<'a> { let mut module = self.get_module(module.normal_ancestor_id); - while module.span.ctxt.modern() != *ctxt { + while module.span.ctxt().modern() != *ctxt { let parent = module.parent.unwrap_or_else(|| self.macro_def_scope(ctxt.remove_mark())); module = self.get_module(parent.normal_ancestor_id); } @@ -2308,7 +2306,7 @@ impl<'a> Resolver<'a> { // must not add it if it's in the bindings map // because that breaks the assumptions later // passes make about or-patterns.) - let mut def = Def::Local(self.definitions.local_def_id(pat_id)); + let mut def = Def::Local(pat_id); match bindings.get(&ident.node).cloned() { Some(id) if id == outer_pat_id => { // `Variant(a, a)`, error @@ -2672,8 +2670,8 @@ impl<'a> Resolver<'a> { sp = sp.next_point(); if let Ok(snippet) = cm.span_to_snippet(sp.to(sp.next_point())) { debug!("snippet {:?}", snippet); - let line_sp = cm.lookup_char_pos(sp.hi).line; - let line_base_sp = cm.lookup_char_pos(base_span.lo).line; + let line_sp = cm.lookup_char_pos(sp.hi()).line; + let line_base_sp = cm.lookup_char_pos(base_span.lo()).line; debug!("{:?} {:?}", line_sp, line_base_sp); if snippet == ":" { err.span_label(base_span, @@ -2963,7 +2961,7 @@ impl<'a> Resolver<'a> { Def::Upvar(..) => { span_bug!(span, "unexpected {:?} in bindings", def) } - Def::Local(def_id) => { + Def::Local(node_id) => { for rib in ribs { match rib.kind { NormalRibKind | ModuleRibKind(..) | MacroDefinition(..) | @@ -2972,20 +2970,19 @@ impl<'a> Resolver<'a> { } ClosureRibKind(function_id) => { let prev_def = def; - let node_id = self.definitions.as_local_node_id(def_id).unwrap(); let seen = self.freevars_seen .entry(function_id) .or_insert_with(|| NodeMap()); if let Some(&index) = seen.get(&node_id) { - def = Def::Upvar(def_id, index, function_id); + def = Def::Upvar(node_id, index, function_id); continue; } let vec = self.freevars .entry(function_id) .or_insert_with(|| vec![]); let depth = vec.len(); - def = Def::Upvar(def_id, depth, function_id); + def = Def::Upvar(node_id, depth, function_id); if record_used { vec.push(Freevar { @@ -3060,7 +3057,7 @@ impl<'a> Resolver<'a> { } // Fields are generally expected in the same contexts as locals. - if filter_fn(Def::Local(DefId::local(CRATE_DEF_INDEX))) { + if filter_fn(Def::Local(ast::DUMMY_NODE_ID)) { if let Some(node_id) = self.current_self_type.as_ref().and_then(extract_node_id) { // Look for a field with the same name in the current self_type. if let Some(resolution) = self.def_map.get(&node_id) { @@ -3373,7 +3370,7 @@ impl<'a> Resolver<'a> { for &(trait_name, binding) in traits.as_ref().unwrap().iter() { let module = binding.module().unwrap(); let mut ident = ident; - if ident.ctxt.glob_adjust(module.expansion, binding.span.ctxt.modern()).is_none() { + if ident.ctxt.glob_adjust(module.expansion, binding.span.ctxt().modern()).is_none() { continue } if self.resolve_ident_in_module_unadjusted(module, ident, ns, false, false, module.span) @@ -3598,7 +3595,7 @@ impl<'a> Resolver<'a> { new_binding: &NameBinding, old_binding: &NameBinding) { // Error on the second of two conflicting names - if old_binding.span.lo > new_binding.span.lo { + if old_binding.span.lo() > new_binding.span.lo() { return self.report_conflict(parent, ident, ns, old_binding, new_binding); } diff --git a/src/librustc_resolve/macros.rs b/src/librustc_resolve/macros.rs index f8dc341653..064032b888 100644 --- a/src/librustc_resolve/macros.rs +++ b/src/librustc_resolve/macros.rs @@ -402,7 +402,8 @@ impl<'a> Resolver<'a> { let ast::Path { ref segments, span } = *path; let path: Vec<_> = segments.iter().map(|seg| respan(seg.span, seg.identifier)).collect(); let invocation = self.invocations[&scope]; - self.current_module = invocation.module.get(); + let module = invocation.module.get(); + self.current_module = if module.is_trait() { module.parent.unwrap() } else { module }; if path.len() > 1 { if !self.use_extern_macros && self.gated_errors.insert(span) { @@ -777,7 +778,7 @@ impl<'a> Resolver<'a> { _ => return, }; - let crate_name = self.session.cstore.crate_name(krate); + let crate_name = self.cstore.crate_name_untracked(krate); self.session.struct_span_err(use_span, warn_msg) .help(&format!("instead, import the procedural macro like any other item: \ diff --git a/src/librustc_resolve/resolve_imports.rs b/src/librustc_resolve/resolve_imports.rs index 5616971e9d..b85bf18ea8 100644 --- a/src/librustc_resolve/resolve_imports.rs +++ b/src/librustc_resolve/resolve_imports.rs @@ -237,7 +237,7 @@ impl<'a> Resolver<'a> { } let module = unwrap_or!(directive.imported_module.get(), return Err(Undetermined)); let (orig_current_module, mut ident) = (self.current_module, ident.modern()); - match ident.ctxt.glob_adjust(module.expansion, directive.span.ctxt.modern()) { + match ident.ctxt.glob_adjust(module.expansion, directive.span.ctxt().modern()) { Some(Some(def)) => self.current_module = self.macro_def_scope(def), Some(None) => {} None => continue, @@ -398,7 +398,7 @@ impl<'a> Resolver<'a> { for directive in module.glob_importers.borrow_mut().iter() { let mut ident = ident.modern(); let scope = match ident.ctxt.reverse_glob_adjust(module.expansion, - directive.span.ctxt.modern()) { + directive.span.ctxt().modern()) { Some(Some(def)) => self.macro_def_scope(def), Some(None) => directive.parent, None => continue, @@ -800,7 +800,7 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { }).collect::>(); for ((mut ident, ns), binding) in bindings { let scope = match ident.ctxt.reverse_glob_adjust(module.expansion, - directive.span.ctxt.modern()) { + directive.span.ctxt().modern()) { Some(Some(def)) => self.macro_def_scope(def), Some(None) => self.current_module, None => continue, @@ -844,7 +844,7 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { let def = binding.def(); if def != Def::Err { if !def.def_id().is_local() { - self.session.cstore.export_macros(def.def_id().krate); + self.cstore.export_macros_untracked(def.def_id().krate); } if let Def::Macro(..) = def { if let Some(&span) = exported_macro_names.get(&ident.modern()) { @@ -892,8 +892,7 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { if reexports.len() > 0 { if let Some(def_id) = module.def_id() { - let node_id = self.definitions.as_local_node_id(def_id).unwrap(); - self.export_map.insert(node_id, reexports); + self.export_map.insert(def_id, reexports); } } } diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs index 619ebbc542..3e730cf836 100644 --- a/src/librustc_save_analysis/dump_visitor.rs +++ b/src/librustc_save_analysis/dump_visitor.rs @@ -1432,8 +1432,7 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> Visitor<'l> for DumpVisitor<'l, 'tc // process collected paths for &(id, ref p, immut) in &collector.collected_paths { match self.save_ctxt.get_path_def(id) { - HirDef::Local(def_id) => { - let id = self.tcx.hir.as_local_node_id(def_id).unwrap(); + HirDef::Local(id) => { let mut value = if immut == ast::Mutability::Immutable { self.span.snippet(p.span).to_string() } else { diff --git a/src/librustc_save_analysis/lib.rs b/src/librustc_save_analysis/lib.rs index 9b74df865d..1c6007966a 100644 --- a/src/librustc_save_analysis/lib.rs +++ b/src/librustc_save_analysis/lib.rs @@ -38,7 +38,7 @@ mod sig; use rustc::hir; use rustc::hir::def::Def as HirDef; use rustc::hir::map::{Node, NodeItem}; -use rustc::hir::def_id::DefId; +use rustc::hir::def_id::{LOCAL_CRATE, DefId}; use rustc::session::config::CrateType::CrateTypeExecutable; use rustc::ty::{self, TyCtxt}; use rustc_typeck::hir_ty_to_ty; @@ -91,13 +91,13 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { use rls_span::{Row, Column}; let cm = self.tcx.sess.codemap(); - let start = cm.lookup_char_pos(span.lo); - let end = cm.lookup_char_pos(span.hi); + let start = cm.lookup_char_pos(span.lo()); + let end = cm.lookup_char_pos(span.hi()); SpanData { file_name: start.file.name.clone().into(), - byte_start: span.lo.0, - byte_end: span.hi.0, + byte_start: span.lo().0, + byte_end: span.hi().0, line_start: Row::new_one_indexed(start.line as u32), line_end: Row::new_one_indexed(end.line as u32), column_start: Column::new_one_indexed(start.col.0 as u32 + 1), @@ -109,7 +109,7 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { pub fn get_external_crates(&self) -> Vec { let mut result = Vec::new(); - for n in self.tcx.sess.cstore.crates() { + for &n in self.tcx.crates().iter() { let span = match *self.tcx.extern_crate(n.as_def_id()) { Some(ref c) => c.span, None => { @@ -117,9 +117,9 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { continue; } }; - let lo_loc = self.span_utils.sess.codemap().lookup_char_pos(span.lo); + let lo_loc = self.span_utils.sess.codemap().lookup_char_pos(span.lo()); result.push(ExternalCrateData { - name: self.tcx.sess.cstore.crate_name(n).to_string(), + name: self.tcx.crate_name(n).to_string(), num: n.as_u32(), file_name: SpanUtils::make_path_string(&lo_loc.file.name), }); @@ -586,9 +586,9 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { self.tables.qpath_def(qpath, hir_id) } - Node::NodeBinding(&hir::Pat { node: hir::PatKind::Binding(_, def_id, ..), .. }) => { - HirDef::Local(def_id) - } + Node::NodeBinding(&hir::Pat { + node: hir::PatKind::Binding(_, canonical_id, ..), .. + }) => HirDef::Local(canonical_id), Node::NodeTy(ty) => { if let hir::Ty { node: hir::TyPath(ref qpath), .. } = *ty { @@ -616,8 +616,15 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { let sub_span = self.span_utils.span_for_last_ident(path.span); filter!(self.span_utils, sub_span, path.span, None); match def { - HirDef::Upvar(..) | - HirDef::Local(..) | + HirDef::Upvar(id, ..) | + HirDef::Local(id) => { + let span = self.span_from_span(sub_span.unwrap()); + Some(Ref { + kind: RefKind::Variable, + span, + ref_id: id_from_node_id(id, self), + }) + } HirDef::Static(..) | HirDef::Const(..) | HirDef::AssociatedConst(..) | @@ -999,7 +1006,7 @@ fn escape(s: String) -> String { // Helper function to determine if a span came from a // macro expansion or syntax extension. fn generated_code(span: Span) -> bool { - span.ctxt != NO_EXPANSION || span == DUMMY_SP + span.ctxt() != NO_EXPANSION || span == DUMMY_SP } // DefId::index is a newtype and so the JSON serialisation is ugly. Therefore @@ -1013,7 +1020,15 @@ fn id_from_def_id(id: DefId) -> rls_data::Id { fn id_from_node_id(id: NodeId, scx: &SaveContext) -> rls_data::Id { let def_id = scx.tcx.hir.opt_local_def_id(id); - def_id.map(|id| id_from_def_id(id)).unwrap_or_else(null_id) + def_id.map(|id| id_from_def_id(id)).unwrap_or_else(|| { + // Create a *fake* `DefId` out of a `NodeId` by subtracting the `NodeId` + // out of the maximum u32 value. This will work unless you have *billions* + // of definitions in a single crate (very unlikely to actually happen). + rls_data::Id { + krate: LOCAL_CRATE.as_u32(), + index: !id.as_u32(), + } + }) } fn null_id() -> rls_data::Id { diff --git a/src/librustc_save_analysis/span_utils.rs b/src/librustc_save_analysis/span_utils.rs index 36e4d09c96..b9d82b8e25 100644 --- a/src/librustc_save_analysis/span_utils.rs +++ b/src/librustc_save_analysis/span_utils.rs @@ -192,7 +192,7 @@ impl<'a> SpanUtils<'a> { prev = next; } if angle_count != 0 || bracket_count != 0 { - let loc = self.sess.codemap().lookup_char_pos(span.lo); + let loc = self.sess.codemap().lookup_char_pos(span.lo()); span_bug!(span, "Mis-counted brackets when breaking path? Parsing '{}' \ in {}, line {}", @@ -319,7 +319,7 @@ impl<'a> SpanUtils<'a> { }; //If the span comes from a fake filemap, filter it. - if !self.sess.codemap().lookup_char_pos(parent.lo).file.is_real_file() { + if !self.sess.codemap().lookup_char_pos(parent.lo()).file.is_real_file() { return true; } diff --git a/src/librustc_trans/Cargo.toml b/src/librustc_trans/Cargo.toml index 4a92a57986..482350d04b 100644 --- a/src/librustc_trans/Cargo.toml +++ b/src/librustc_trans/Cargo.toml @@ -10,6 +10,7 @@ crate-type = ["dylib"] test = false [dependencies] +bitflags = "1.0" num_cpus = "1.0" flate2 = "0.2" jobserver = "0.1.5" @@ -19,7 +20,6 @@ rustc-demangle = "0.1.4" rustc = { path = "../librustc" } rustc_allocator = { path = "../librustc_allocator" } rustc_back = { path = "../librustc_back" } -rustc_bitflags = { path = "../librustc_bitflags" } rustc_const_math = { path = "../librustc_const_math" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } @@ -32,4 +32,4 @@ syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } [target."cfg(windows)".dependencies] -gcc = "0.3.50" +cc = "1.0" diff --git a/src/librustc_trans/README.md b/src/librustc_trans/README.md index cd43cbd705..b69d632a6a 100644 --- a/src/librustc_trans/README.md +++ b/src/librustc_trans/README.md @@ -1 +1,7 @@ -See [librustc/README.md](../librustc/README.md). +NB: This crate is part of the Rust compiler. For an overview of the +compiler as a whole, see +[the README.md file found in `librustc`](../librustc/README.md). + +The `trans` crate contains the code to convert from MIR into LLVM IR, +and then from LLVM IR into machine code. In general it contains code +that runs towards the end of the compilation process. diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 5e50696b56..c3b6ede24b 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef, AttributePlace}; use base; use builder::Builder; -use common::{instance_ty, ty_fn_sig, type_is_fat_ptr, C_uint}; +use common::{instance_ty, ty_fn_sig, type_is_fat_ptr, C_usize}; use context::CrateContext; use cabi_x86; use cabi_x86_64; @@ -37,6 +37,7 @@ use type_of; use rustc::hir; use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Layout, LayoutTyper, TyLayout, Size}; +use rustc_back::PanicStrategy; use libc::c_uint; use std::cmp; @@ -65,17 +66,17 @@ pub use self::attr_impl::ArgAttribute; mod attr_impl { // The subset of llvm::Attribute needed for arguments, packed into a bitfield. bitflags! { - #[derive(Default, Debug)] - flags ArgAttribute : u16 { - const ByVal = 1 << 0, - const NoAlias = 1 << 1, - const NoCapture = 1 << 2, - const NonNull = 1 << 3, - const ReadOnly = 1 << 4, - const SExt = 1 << 5, - const StructRet = 1 << 6, - const ZExt = 1 << 7, - const InReg = 1 << 8, + #[derive(Default)] + pub struct ArgAttribute: u16 { + const ByVal = 1 << 0; + const NoAlias = 1 << 1; + const NoCapture = 1 << 2; + const NonNull = 1 << 3; + const ReadOnly = 1 << 4; + const SExt = 1 << 5; + const StructRet = 1 << 6; + const ZExt = 1 << 7; + const InReg = 1 << 8; } } } @@ -527,7 +528,7 @@ impl<'a, 'tcx> ArgType<'tcx> { } let ccx = bcx.ccx; if self.is_indirect() { - let llsz = C_uint(ccx, self.layout.size(ccx).bytes()); + let llsz = C_usize(ccx, self.layout.size(ccx).bytes()); let llalign = self.layout.align(ccx).abi(); base::call_memcpy(bcx, dst, val, llsz, llalign as u32); } else if let Some(ty) = self.cast { @@ -564,7 +565,7 @@ impl<'a, 'tcx> ArgType<'tcx> { base::call_memcpy(bcx, bcx.pointercast(dst, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), - C_uint(ccx, self.layout.size(ccx).bytes()), + C_usize(ccx, self.layout.size(ccx).bytes()), cmp::min(self.layout.align(ccx).abi() as u32, llalign_of_min(ccx, ty))); @@ -612,7 +613,7 @@ pub struct FnType<'tcx> { impl<'a, 'tcx> FnType<'tcx> { pub fn of_instance(ccx: &CrateContext<'a, 'tcx>, instance: &ty::Instance<'tcx>) -> Self { - let fn_ty = instance_ty(ccx.shared(), &instance); + let fn_ty = instance_ty(ccx.tcx(), &instance); let sig = ty_fn_sig(ccx, fn_ty); let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); Self::new(ccx, sig, &[]) @@ -750,9 +751,7 @@ impl<'a, 'tcx> FnType<'tcx> { Some(ty.boxed_ty()) } - ty::TyRef(b, mt) => { - use rustc::ty::{BrAnon, ReLateBound}; - + ty::TyRef(_, mt) => { // `&mut` pointer parameters never alias other parameters, or mutable global data // // `&T` where `T` contains no `UnsafeCell` is immutable, and can be marked as @@ -760,7 +759,17 @@ impl<'a, 'tcx> FnType<'tcx> { // on memory dependencies rather than pointer equality let is_freeze = ccx.shared().type_is_freeze(mt.ty); - if mt.mutbl != hir::MutMutable && is_freeze { + let no_alias_is_safe = + if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias || + ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort { + // Mutable refrences or immutable shared references + mt.mutbl == hir::MutMutable || is_freeze + } else { + // Only immutable shared references + mt.mutbl != hir::MutMutable && is_freeze + }; + + if no_alias_is_safe { arg.attrs.set(ArgAttribute::NoAlias); } @@ -768,13 +777,6 @@ impl<'a, 'tcx> FnType<'tcx> { arg.attrs.set(ArgAttribute::ReadOnly); } - // When a reference in an argument has no named lifetime, it's - // impossible for that reference to escape this function - // (returned or stored beyond the call by a closure). - if let ReLateBound(_, BrAnon(_)) = *b { - arg.attrs.set(ArgAttribute::NoCapture); - } - Some(mt.ty) } _ => None diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index d1c1dd7436..23a45a7962 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -77,6 +77,12 @@ pub fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);} substs.upvar_tys(def_id, cx.tcx()).collect() }, + ty::TyGenerator(def_id, substs, _) => { + if variant_index > 0 { bug!("{} is a generator, which only has one variant", t);} + substs.field_tys(def_id, cx.tcx()).map(|t| { + cx.tcx().normalize_associated_type(&t) + }).collect() + }, _ => bug!("{} is not a type that can have fields.", t) } } @@ -391,11 +397,11 @@ pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: Valu match *l { layout::CEnum{ discr, min, max, .. } => { assert_discr_in_range(min, max, to); - bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to, true), + bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), val, None); } layout::General{ discr, .. } => { - bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to, true), + bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), bcx.struct_gep(val, 0), None); } layout::Univariant { .. } @@ -417,7 +423,7 @@ pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: Valu // than storing null to single target field. let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to()); let fill_byte = C_u8(bcx.ccx, 0); - let size = C_uint(bcx.ccx, nonnull.stride().bytes()); + let size = C_usize(bcx.ccx, nonnull.stride().bytes()); let align = C_i32(bcx.ccx, nonnull.align.abi() as i32); base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { diff --git a/src/librustc_trans/assert_module_sources.rs b/src/librustc_trans/assert_module_sources.rs index 6e661a5a8c..c891bd8aaf 100644 --- a/src/librustc_trans/assert_module_sources.rs +++ b/src/librustc_trans/assert_module_sources.rs @@ -27,47 +27,32 @@ //! the HIR doesn't change as a result of the annotations, which might //! perturb the reuse results. +use rustc::dep_graph::{DepNode, DepConstructor}; use rustc::ty::TyCtxt; use syntax::ast; - -use {ModuleSource, ModuleTranslation}; - use rustc::ich::{ATTR_PARTITION_REUSED, ATTR_PARTITION_TRANSLATED}; const MODULE: &'static str = "module"; const CFG: &'static str = "cfg"; #[derive(Debug, PartialEq, Clone, Copy)] -pub enum Disposition { Reused, Translated } - -impl ModuleTranslation { - pub fn disposition(&self) -> (String, Disposition) { - let disposition = match self.source { - ModuleSource::Preexisting(_) => Disposition::Reused, - ModuleSource::Translated(_) => Disposition::Translated, - }; +enum Disposition { Reused, Translated } - (self.name.clone(), disposition) - } -} - -pub(crate) fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - modules: &[(String, Disposition)]) { +pub(crate) fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let _ignore = tcx.dep_graph.in_ignore(); if tcx.sess.opts.incremental.is_none() { return; } - let ams = AssertModuleSource { tcx: tcx, modules: modules }; + let ams = AssertModuleSource { tcx }; for attr in &tcx.hir.krate().attrs { ams.check_attr(attr); } } struct AssertModuleSource<'a, 'tcx: 'a> { - tcx: TyCtxt<'a, 'tcx, 'tcx>, - modules: &'a [(String, Disposition)], + tcx: TyCtxt<'a, 'tcx, 'tcx> } impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { @@ -86,32 +71,31 @@ impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { } let mname = self.field(attr, MODULE); - let mtrans = self.modules.iter().find(|&&(ref name, _)| name == mname.as_str()); - let mtrans = match mtrans { - Some(m) => m, - None => { - debug!("module name `{}` not found amongst:", mname); - for &(ref name, ref disposition) in self.modules { - debug!("module named `{}` with disposition {:?}", - name, - disposition); - } - self.tcx.sess.span_err( - attr.span, - &format!("no module named `{}`", mname)); - return; - } - }; + let dep_node = DepNode::new(self.tcx, + DepConstructor::CompileCodegenUnit(mname.as_str())); - let mtrans_disposition = mtrans.1; - if disposition != mtrans_disposition { - self.tcx.sess.span_err( - attr.span, - &format!("expected module named `{}` to be {:?} but is {:?}", - mname, - disposition, - mtrans_disposition)); + if let Some(loaded_from_cache) = self.tcx.dep_graph.was_loaded_from_cache(&dep_node) { + match (disposition, loaded_from_cache) { + (Disposition::Reused, false) => { + self.tcx.sess.span_err( + attr.span, + &format!("expected module named `{}` to be Reused but is Translated", + mname)); + } + (Disposition::Translated, true) => { + self.tcx.sess.span_err( + attr.span, + &format!("expected module named `{}` to be Translated but is Reused", + mname)); + } + (Disposition::Reused, true) | + (Disposition::Translated, false) => { + // These are what we would expect. + } + } + } else { + self.tcx.sess.span_err(attr.span, &format!("no module named `{}`", mname)); } } diff --git a/src/librustc_trans/back/archive.rs b/src/librustc_trans/back/archive.rs index 0d39db9e10..179ef20b19 100644 --- a/src/librustc_trans/back/archive.rs +++ b/src/librustc_trans/back/archive.rs @@ -17,6 +17,7 @@ use std::path::{Path, PathBuf}; use std::ptr; use std::str; +use back::bytecode::RLIB_BYTECODE_EXTENSION; use libc; use llvm::archive_ro::{ArchiveRO, Child}; use llvm::{self, ArchiveKind}; @@ -154,12 +155,9 @@ impl<'a> ArchiveBuilder<'a> { // might be also an extra name suffix let obj_start = format!("{}", name); - // Ignoring all bytecode files, no matter of - // name - let bc_ext = ".bytecode.deflate"; - self.add_archive(rlib, move |fname: &str| { - if fname.ends_with(bc_ext) || fname == METADATA_FILENAME { + // Ignore bytecode/metadata files, no matter the name. + if fname.ends_with(RLIB_BYTECODE_EXTENSION) || fname == METADATA_FILENAME { return true } diff --git a/src/librustc_trans/back/bytecode.rs b/src/librustc_trans/back/bytecode.rs new file mode 100644 index 0000000000..55c96322a9 --- /dev/null +++ b/src/librustc_trans/back/bytecode.rs @@ -0,0 +1,160 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Management of the encoding of LLVM bytecode into rlibs +//! +//! This module contains the management of encoding LLVM bytecode into rlibs, +//! primarily for the usage in LTO situations. Currently the compiler will +//! unconditionally encode LLVM-IR into rlibs regardless of what's happening +//! elsewhere, so we currently compress the bytecode via deflate to avoid taking +//! up too much space on disk. +//! +//! After compressing the bytecode we then have the rest of the format to +//! basically deal with various bugs in various archive implementations. The +//! format currently is: +//! +//! RLIB LLVM-BYTECODE OBJECT LAYOUT +//! Version 2 +//! Bytes Data +//! 0..10 "RUST_OBJECT" encoded in ASCII +//! 11..14 format version as little-endian u32 +//! 15..19 the length of the module identifier string +//! 20..n the module identifier string +//! n..n+8 size in bytes of deflate compressed LLVM bitcode as +//! little-endian u64 +//! n+9.. compressed LLVM bitcode +//! ? maybe a byte to make this whole thing even length + +use std::io::{Read, Write}; +use std::ptr; +use std::str; + +use flate2::Compression; +use flate2::read::DeflateDecoder; +use flate2::write::DeflateEncoder; + +// This is the "magic number" expected at the beginning of a LLVM bytecode +// object in an rlib. +pub const RLIB_BYTECODE_OBJECT_MAGIC: &'static [u8] = b"RUST_OBJECT"; + +// The version number this compiler will write to bytecode objects in rlibs +pub const RLIB_BYTECODE_OBJECT_VERSION: u8 = 2; + +pub const RLIB_BYTECODE_EXTENSION: &str = "bytecode.encoded"; + +pub fn encode(identifier: &str, bytecode: &[u8]) -> Vec { + let mut encoded = Vec::new(); + + // Start off with the magic string + encoded.extend_from_slice(RLIB_BYTECODE_OBJECT_MAGIC); + + // Next up is the version + encoded.extend_from_slice(&[RLIB_BYTECODE_OBJECT_VERSION, 0, 0, 0]); + + // Next is the LLVM module identifier length + contents + let identifier_len = identifier.len(); + encoded.extend_from_slice(&[ + (identifier_len >> 0) as u8, + (identifier_len >> 8) as u8, + (identifier_len >> 16) as u8, + (identifier_len >> 24) as u8, + ]); + encoded.extend_from_slice(identifier.as_bytes()); + + // Next is the LLVM module deflate compressed, prefixed with its length. We + // don't know its length yet, so fill in 0s + let deflated_size_pos = encoded.len(); + encoded.extend_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); + + let before = encoded.len(); + DeflateEncoder::new(&mut encoded, Compression::Fast) + .write_all(bytecode) + .unwrap(); + let after = encoded.len(); + + // Fill in the length we reserved space for before + let bytecode_len = (after - before) as u64; + encoded[deflated_size_pos + 0] = (bytecode_len >> 0) as u8; + encoded[deflated_size_pos + 1] = (bytecode_len >> 8) as u8; + encoded[deflated_size_pos + 2] = (bytecode_len >> 16) as u8; + encoded[deflated_size_pos + 3] = (bytecode_len >> 24) as u8; + encoded[deflated_size_pos + 4] = (bytecode_len >> 32) as u8; + encoded[deflated_size_pos + 5] = (bytecode_len >> 40) as u8; + encoded[deflated_size_pos + 6] = (bytecode_len >> 48) as u8; + encoded[deflated_size_pos + 7] = (bytecode_len >> 56) as u8; + + // If the number of bytes written to the object so far is odd, add a + // padding byte to make it even. This works around a crash bug in LLDB + // (see issue #15950) + if encoded.len() % 2 == 1 { + encoded.push(0); + } + + return encoded +} + +pub struct DecodedBytecode<'a> { + identifier: &'a str, + encoded_bytecode: &'a [u8], +} + +impl<'a> DecodedBytecode<'a> { + pub fn new(data: &'a [u8]) -> Result, String> { + if !data.starts_with(RLIB_BYTECODE_OBJECT_MAGIC) { + return Err(format!("magic bytecode prefix not found")) + } + let data = &data[RLIB_BYTECODE_OBJECT_MAGIC.len()..]; + if !data.starts_with(&[RLIB_BYTECODE_OBJECT_VERSION, 0, 0, 0]) { + return Err(format!("wrong version prefix found in bytecode")) + } + let data = &data[4..]; + if data.len() < 4 { + return Err(format!("bytecode corrupted")) + } + let identifier_len = unsafe { + u32::from_le(ptr::read_unaligned(data.as_ptr() as *const u32)) as usize + }; + let data = &data[4..]; + if data.len() < identifier_len { + return Err(format!("bytecode corrupted")) + } + let identifier = match str::from_utf8(&data[..identifier_len]) { + Ok(s) => s, + Err(_) => return Err(format!("bytecode corrupted")) + }; + let data = &data[identifier_len..]; + if data.len() < 8 { + return Err(format!("bytecode corrupted")) + } + let bytecode_len = unsafe { + u64::from_le(ptr::read_unaligned(data.as_ptr() as *const u64)) as usize + }; + let data = &data[8..]; + if data.len() < bytecode_len { + return Err(format!("bytecode corrupted")) + } + let encoded_bytecode = &data[..bytecode_len]; + + Ok(DecodedBytecode { + identifier, + encoded_bytecode, + }) + } + + pub fn bytecode(&self) -> Vec { + let mut data = Vec::new(); + DeflateDecoder::new(self.encoded_bytecode).read_to_end(&mut data).unwrap(); + return data + } + + pub fn identifier(&self) -> &'a str { + self.identifier + } +} diff --git a/src/librustc_trans/back/command.rs b/src/librustc_trans/back/command.rs new file mode 100644 index 0000000000..ea68e3b28b --- /dev/null +++ b/src/librustc_trans/back/command.rs @@ -0,0 +1,114 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A thin wrapper around `Command` in the standard library which allows us to +//! read the arguments that are built up. + +use std::ffi::{OsStr, OsString}; +use std::fmt; +use std::io; +use std::process::{self, Output, Child}; + +pub struct Command { + program: OsString, + args: Vec, + env: Vec<(OsString, OsString)>, +} + +impl Command { + pub fn new>(program: P) -> Command { + Command::_new(program.as_ref()) + } + + fn _new(program: &OsStr) -> Command { + Command { + program: program.to_owned(), + args: Vec::new(), + env: Vec::new(), + } + } + + pub fn arg>(&mut self, arg: P) -> &mut Command { + self._arg(arg.as_ref()); + self + } + + pub fn args(&mut self, args: I) -> &mut Command + where I: IntoIterator, + I::Item: AsRef, + { + for arg in args { + self._arg(arg.as_ref()); + } + self + } + + fn _arg(&mut self, arg: &OsStr) { + self.args.push(arg.to_owned()); + } + + pub fn env(&mut self, key: K, value: V) -> &mut Command + where K: AsRef, + V: AsRef + { + self._env(key.as_ref(), value.as_ref()); + self + } + + pub fn envs(&mut self, envs: I) -> &mut Command + where I: IntoIterator, + K: AsRef, + V: AsRef + { + for (key, value) in envs { + self._env(key.as_ref(), value.as_ref()); + } + self + } + + fn _env(&mut self, key: &OsStr, value: &OsStr) { + self.env.push((key.to_owned(), value.to_owned())); + } + + pub fn output(&mut self) -> io::Result { + self.command().output() + } + + pub fn spawn(&mut self) -> io::Result { + self.command().spawn() + } + + pub fn command(&self) -> process::Command { + let mut ret = process::Command::new(&self.program); + ret.args(&self.args); + ret.envs(self.env.clone()); + return ret + } + + // extensions + + pub fn get_program(&self) -> &OsStr { + &self.program + } + + pub fn get_args(&self) -> &[OsString] { + &self.args + } + + pub fn get_env(&self) -> &[(OsString, OsString)] { + &self.env + } +} + +impl fmt::Debug for Command { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.command().fmt(f) + } +} diff --git a/src/librustc_trans/back/link.rs b/src/librustc_trans/back/link.rs index 5de48fbce9..3f25c182fa 100644 --- a/src/librustc_trans/back/link.rs +++ b/src/librustc_trans/back/link.rs @@ -8,29 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -extern crate rustc_trans_utils; - use super::archive::{ArchiveBuilder, ArchiveConfig}; +use super::bytecode::{self, RLIB_BYTECODE_EXTENSION}; use super::linker::Linker; +use super::command::Command; use super::rpath::RPathConfig; use super::rpath; use metadata::METADATA_FILENAME; -use rustc::session::config::{self, NoDebugInfo, OutputFilenames, OutputType}; +use rustc::session::config::{self, NoDebugInfo, OutputFilenames, OutputType, PrintRequest}; +use rustc::session::config::RUST_CGU_EXT; use rustc::session::filesearch; use rustc::session::search_paths::PathKind; use rustc::session::Session; -use rustc::middle::cstore::{LinkMeta, NativeLibrary, LibSource, LinkagePreference, - NativeLibraryKind}; +use rustc::middle::cstore::{NativeLibrary, LibSource, NativeLibraryKind}; use rustc::middle::dependency_format::Linkage; -use CrateTranslation; +use {CrateTranslation, CrateInfo}; use rustc::util::common::time; use rustc::util::fs::fix_windows_verbatim_for_gcc; -use rustc::dep_graph::{DepKind, DepNode}; use rustc::hir::def_id::CrateNum; -use rustc::hir::svh::Svh; use rustc_back::tempdir::TempDir; use rustc_back::{PanicStrategy, RelroLevel}; -use rustc_incremental::IncrementalHashesMap; use context::get_reloc_model; use llvm; @@ -38,67 +35,24 @@ use std::ascii; use std::char; use std::env; use std::ffi::OsString; -use std::fs; -use std::io::{self, Read, Write}; -use std::mem; +use std::fmt; +use std::fs::{self, File}; +use std::io::{self, Read, Write, BufWriter}; use std::path::{Path, PathBuf}; -use std::process::Command; +use std::process::{Output, Stdio}; use std::str; -use flate2::Compression; -use flate2::write::DeflateEncoder; use syntax::attr; /// The LLVM module name containing crate-metadata. This includes a `.` on /// purpose, so it cannot clash with the name of a user-defined module. pub const METADATA_MODULE_NAME: &'static str = "crate.metadata"; -/// The name of the crate-metadata object file the compiler generates. Must -/// match up with `METADATA_MODULE_NAME`. -pub const METADATA_OBJ_NAME: &'static str = "crate.metadata.o"; // same as for metadata above, but for allocator shim pub const ALLOCATOR_MODULE_NAME: &'static str = "crate.allocator"; -pub const ALLOCATOR_OBJ_NAME: &'static str = "crate.allocator.o"; - -// RLIB LLVM-BYTECODE OBJECT LAYOUT -// Version 1 -// Bytes Data -// 0..10 "RUST_OBJECT" encoded in ASCII -// 11..14 format version as little-endian u32 -// 15..22 size in bytes of deflate compressed LLVM bitcode as -// little-endian u64 -// 23.. compressed LLVM bitcode - -// This is the "magic number" expected at the beginning of a LLVM bytecode -// object in an rlib. -pub const RLIB_BYTECODE_OBJECT_MAGIC: &'static [u8] = b"RUST_OBJECT"; - -// The version number this compiler will write to bytecode objects in rlibs -pub const RLIB_BYTECODE_OBJECT_VERSION: u32 = 1; - -// The offset in bytes the bytecode object format version number can be found at -pub const RLIB_BYTECODE_OBJECT_VERSION_OFFSET: usize = 11; - -// The offset in bytes the size of the compressed bytecode can be found at in -// format version 1 -pub const RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET: usize = - RLIB_BYTECODE_OBJECT_VERSION_OFFSET + 4; - -// The offset in bytes the compressed LLVM bytecode can be found at in format -// version 1 -pub const RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET: usize = - RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET + 8; - -pub use self::rustc_trans_utils::link::{find_crate_name, filename_for_input, - default_output_for_target, invalid_output_for_target}; - -pub fn build_link_meta(incremental_hashes_map: &IncrementalHashesMap) -> LinkMeta { - let krate_dep_node = &DepNode::new_no_params(DepKind::Krate); - let r = LinkMeta { - crate_hash: Svh::new(incremental_hashes_map[krate_dep_node].to_smaller_hash()), - }; - info!("{:?}", r); - return r; -} + +pub use rustc_trans_utils::link::{find_crate_name, filename_for_input, default_output_for_target, + invalid_output_for_target, build_link_meta, out_filename, + check_file_is_writeable}; // The third parameter is for env vars, used on windows to set up the // path for MSVC to find its DLLs, and gcc to find its bundled @@ -137,14 +91,19 @@ pub fn get_linker(sess: &Session) -> (String, Command, Vec<(OsString, OsString)> #[cfg(windows)] pub fn msvc_link_exe_cmd(sess: &Session) -> (Command, Vec<(OsString, OsString)>) { - use gcc::windows_registry; + use cc::windows_registry; let target = &sess.opts.target_triple; let tool = windows_registry::find_tool(target, "link.exe"); if let Some(tool) = tool { + let mut cmd = Command::new(tool.path()); + cmd.args(tool.args()); + for &(ref k, ref v) in tool.env() { + cmd.env(k, v); + } let envs = tool.env().to_vec(); - (tool.to_command(), envs) + (cmd, envs) } else { debug!("Failed to locate linker."); (Command::new("link.exe"), vec![]) @@ -167,6 +126,14 @@ fn command_path(sess: &Session) -> OsString { env::join_paths(new_path).unwrap() } +fn metadata_obj(outputs: &OutputFilenames) -> PathBuf { + outputs.temp_path(OutputType::Object, Some(METADATA_MODULE_NAME)) +} + +fn allocator_obj(outputs: &OutputFilenames) -> PathBuf { + outputs.temp_path(OutputType::Object, Some(ALLOCATOR_MODULE_NAME)) +} + pub fn remove(sess: &Session, path: &Path) { match fs::remove_file(path) { Ok(..) => {} @@ -197,33 +164,30 @@ pub fn link_binary(sess: &Session, bug!("invalid output type `{:?}` for target os `{}`", crate_type, sess.opts.target_triple); } - let mut out_files = link_binary_output(sess, trans, crate_type, outputs, crate_name); + let mut out_files = link_binary_output(sess, + trans, + crate_type, + outputs, + crate_name); out_filenames.append(&mut out_files); } // Remove the temporary object file and metadata if we aren't saving temps if !sess.opts.cg.save_temps { if sess.opts.output_types.should_trans() { - for obj in object_filenames(trans, outputs) { - remove(sess, &obj); + for obj in trans.modules.iter() { + remove(sess, &obj.object); } } - remove(sess, &outputs.with_extension(METADATA_OBJ_NAME)); + remove(sess, &metadata_obj(outputs)); if trans.allocator_module.is_some() { - remove(sess, &outputs.with_extension(ALLOCATOR_OBJ_NAME)); + remove(sess, &allocator_obj(outputs)); } } out_filenames } -fn is_writeable(p: &Path) -> bool { - match p.metadata() { - Err(..) => true, - Ok(m) => !m.permissions().readonly() - } -} - fn filename_for_metadata(sess: &Session, crate_name: &str, outputs: &OutputFilenames) -> PathBuf { let out_filename = outputs.single_output_file.clone() .unwrap_or(outputs @@ -234,8 +198,9 @@ fn filename_for_metadata(sess: &Session, crate_name: &str, outputs: &OutputFilen } pub fn each_linked_rlib(sess: &Session, + info: &CrateInfo, f: &mut FnMut(CrateNum, &Path)) -> Result<(), String> { - let crates = sess.cstore.used_crates(LinkagePreference::RequireStatic).into_iter(); + let crates = info.used_crates_static.iter(); let fmts = sess.dependency_formats.borrow(); let fmts = fmts.get(&config::CrateTypeExecutable) .or_else(|| fmts.get(&config::CrateTypeStaticlib)) @@ -245,16 +210,16 @@ pub fn each_linked_rlib(sess: &Session, Some(f) => f, None => return Err(format!("could not find formats for rlibs")) }; - for (cnum, path) in crates { + for &(cnum, ref path) in crates { match fmts.get(cnum.as_usize() - 1) { Some(&Linkage::NotLinked) | Some(&Linkage::IncludedFromDylib) => continue, Some(_) => {} None => return Err(format!("could not find formats for rlibs")) } - let name = sess.cstore.crate_name(cnum).clone(); - let path = match path { - LibSource::Some(p) => p, + let name = &info.crate_name[&cnum]; + let path = match *path { + LibSource::Some(ref p) => p, LibSource::MetadataOnly => { return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", name)) @@ -278,38 +243,12 @@ pub fn each_linked_rlib(sess: &Session, /// It's unusual for a crate to not participate in LTO. Typically only /// compiler-specific and unstable crates have a reason to not participate in /// LTO. -pub fn ignored_for_lto(sess: &Session, cnum: CrateNum) -> bool { +pub fn ignored_for_lto(info: &CrateInfo, cnum: CrateNum) -> bool { // `#![no_builtins]` crates don't participate in LTO because the state // of builtins gets messed up (our crate isn't tagged with no builtins). // Similarly `#![compiler_builtins]` doesn't participate because we want // those builtins! - sess.cstore.is_no_builtins(cnum) || sess.cstore.is_compiler_builtins(cnum) -} - -fn out_filename(sess: &Session, - crate_type: config::CrateType, - outputs: &OutputFilenames, - crate_name: &str) - -> PathBuf { - let default_filename = filename_for_input(sess, crate_type, crate_name, outputs); - let out_filename = outputs.outputs.get(&OutputType::Exe) - .and_then(|s| s.to_owned()) - .or_else(|| outputs.single_output_file.clone()) - .unwrap_or(default_filename); - - check_file_is_writeable(&out_filename, sess); - - out_filename -} - -// Make sure files are writeable. Mac, FreeBSD, and Windows system linkers -// check this already -- however, the Linux linker will happily overwrite a -// read-only file. We should be consistent. -fn check_file_is_writeable(file: &Path, sess: &Session) { - if !is_writeable(file) { - sess.fatal(&format!("output file {} is not writeable -- check its \ - permissions", file.display())); - } + info.is_no_builtins.contains(&cnum) || info.compiler_builtins == Some(cnum) } fn link_binary_output(sess: &Session, @@ -317,10 +256,8 @@ fn link_binary_output(sess: &Session, crate_type: config::CrateType, outputs: &OutputFilenames, crate_name: &str) -> Vec { - let objects = object_filenames(trans, outputs); - - for file in &objects { - check_file_is_writeable(file, sess); + for module in trans.modules.iter() { + check_file_is_writeable(&module.object, sess); } let tmpdir = match TempDir::new("rustc") { @@ -343,7 +280,6 @@ fn link_binary_output(sess: &Session, link_rlib(sess, trans, RlibFlavor::Normal, - &objects, outputs, &out_filename, tmpdir.path()).build(); @@ -352,13 +288,12 @@ fn link_binary_output(sess: &Session, link_staticlib(sess, trans, outputs, - &objects, &out_filename, tmpdir.path()); } _ => { - link_natively(sess, crate_type, &objects, &out_filename, trans, - outputs, tmpdir.path()); + link_natively(sess, crate_type, &out_filename, + trans, outputs, tmpdir.path()); } } out_filenames.push(out_filename); @@ -371,14 +306,6 @@ fn link_binary_output(sess: &Session, out_filenames } -fn object_filenames(trans: &CrateTranslation, - outputs: &OutputFilenames) - -> Vec { - trans.modules.iter().map(|module| { - outputs.temp_path(OutputType::Object, Some(&module.name)) - }).collect() -} - fn archive_search_paths(sess: &Session) -> Vec { let mut search = Vec::new(); sess.target_filesearch(PathKind::Native).for_each_lib_search_path(|path, _| { @@ -422,15 +349,14 @@ enum RlibFlavor { fn link_rlib<'a>(sess: &'a Session, trans: &CrateTranslation, flavor: RlibFlavor, - objects: &[PathBuf], outputs: &OutputFilenames, out_filename: &Path, tmpdir: &Path) -> ArchiveBuilder<'a> { - info!("preparing rlib from {:?} to {:?}", objects, out_filename); + info!("preparing rlib to {:?}", out_filename); let mut ab = ArchiveBuilder::new(archive_config(sess, out_filename, None)); - for obj in objects { - ab.add_file(obj); + for module in trans.modules.iter() { + ab.add_file(&module.object); } // Note that in this loop we are ignoring the value of `lib.cfg`. That is, @@ -449,7 +375,7 @@ fn link_rlib<'a>(sess: &'a Session, // feature then we'll need to figure out how to record what objects were // loaded from the libraries found here and then encode that into the // metadata of the rlib we're generating somehow. - for lib in sess.cstore.used_libraries() { + for lib in trans.crate_info.used_libraries.iter() { match lib.kind { NativeLibraryKind::NativeStatic => {} NativeLibraryKind::NativeStaticNobundle | @@ -497,15 +423,15 @@ fn link_rlib<'a>(sess: &'a Session, // For LTO purposes, the bytecode of this library is also inserted // into the archive. If codegen_units > 1, we insert each of the // bitcode files. - for obj in objects { + for module in trans.modules.iter() { // Note that we make sure that the bytecode filename in the // archive is never exactly 16 bytes long by adding a 16 byte // extension to it. This is to work around a bug in LLDB that // would cause it to crash if the name of a file in an archive // was exactly 16 bytes. - let bc_filename = obj.with_extension("bc"); - let bc_deflated_filename = tmpdir.join({ - obj.with_extension("bytecode.deflate").file_name().unwrap() + let bc_filename = module.object.with_extension("bc"); + let bc_encoded_filename = tmpdir.join({ + module.object.with_extension(RLIB_BYTECODE_EXTENSION).file_name().unwrap() }); let mut bc_data = Vec::new(); @@ -517,11 +443,9 @@ fn link_rlib<'a>(sess: &'a Session, e)) } - let mut bc_data_deflated = Vec::new(); - DeflateEncoder::new(&mut bc_data_deflated, Compression::Fast) - .write_all(&bc_data).unwrap(); + let encoded = bytecode::encode(&module.llmod_id, &bc_data); - let mut bc_file_deflated = match fs::File::create(&bc_deflated_filename) { + let mut bc_file_deflated = match fs::File::create(&bc_encoded_filename) { Ok(file) => file, Err(e) => { sess.fatal(&format!("failed to create compressed \ @@ -529,8 +453,7 @@ fn link_rlib<'a>(sess: &'a Session, } }; - match write_rlib_bytecode_object_v1(&mut bc_file_deflated, - &bc_data_deflated) { + match bc_file_deflated.write_all(&encoded) { Ok(()) => {} Err(e) => { sess.fatal(&format!("failed to write compressed \ @@ -538,13 +461,13 @@ fn link_rlib<'a>(sess: &'a Session, } }; - ab.add_file(&bc_deflated_filename); + ab.add_file(&bc_encoded_filename); // See the bottom of back::write::run_passes for an explanation // of when we do and don't keep .#module-name#.bc files around. let user_wants_numbered_bitcode = sess.opts.output_types.contains_key(&OutputType::Bitcode) && - sess.opts.cg.codegen_units > 1; + sess.opts.codegen_units > 1; if !sess.opts.cg.save_temps && !user_wants_numbered_bitcode { remove(sess, &bc_filename); } @@ -560,7 +483,7 @@ fn link_rlib<'a>(sess: &'a Session, RlibFlavor::StaticlibBase => { if trans.allocator_module.is_some() { - ab.add_file(&outputs.with_extension(ALLOCATOR_OBJ_NAME)); + ab.add_file(&allocator_obj(outputs)); } } } @@ -568,40 +491,6 @@ fn link_rlib<'a>(sess: &'a Session, ab } -fn write_rlib_bytecode_object_v1(writer: &mut Write, - bc_data_deflated: &[u8]) -> io::Result<()> { - let bc_data_deflated_size: u64 = bc_data_deflated.len() as u64; - - writer.write_all(RLIB_BYTECODE_OBJECT_MAGIC)?; - writer.write_all(&[1, 0, 0, 0])?; - writer.write_all(&[ - (bc_data_deflated_size >> 0) as u8, - (bc_data_deflated_size >> 8) as u8, - (bc_data_deflated_size >> 16) as u8, - (bc_data_deflated_size >> 24) as u8, - (bc_data_deflated_size >> 32) as u8, - (bc_data_deflated_size >> 40) as u8, - (bc_data_deflated_size >> 48) as u8, - (bc_data_deflated_size >> 56) as u8, - ])?; - writer.write_all(&bc_data_deflated)?; - - let number_of_bytes_written_so_far = - RLIB_BYTECODE_OBJECT_MAGIC.len() + // magic id - mem::size_of_val(&RLIB_BYTECODE_OBJECT_VERSION) + // version - mem::size_of_val(&bc_data_deflated_size) + // data size field - bc_data_deflated_size as usize; // actual data - - // If the number of bytes written to the object so far is odd, add a - // padding byte to make it even. This works around a crash bug in LLDB - // (see issue #15950) - if number_of_bytes_written_so_far % 2 == 1 { - writer.write_all(&[0])?; - } - - return Ok(()); -} - // Create a static archive // // This is essentially the same thing as an rlib, but it also involves adding @@ -617,21 +506,19 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write, fn link_staticlib(sess: &Session, trans: &CrateTranslation, outputs: &OutputFilenames, - objects: &[PathBuf], out_filename: &Path, tempdir: &Path) { let mut ab = link_rlib(sess, trans, RlibFlavor::StaticlibBase, - objects, outputs, out_filename, tempdir); let mut all_native_libs = vec![]; - let res = each_linked_rlib(sess, &mut |cnum, path| { - let name = sess.cstore.crate_name(cnum); - let native_libs = sess.cstore.native_libraries(cnum); + let res = each_linked_rlib(sess, &trans.crate_info, &mut |cnum, path| { + let name = &trans.crate_info.crate_name[&cnum]; + let native_libs = &trans.crate_info.native_libraries[&cnum]; // Here when we include the rlib into our staticlib we need to make a // decision whether to include the extra object files along the way. @@ -652,10 +539,10 @@ fn link_staticlib(sess: &Session, }); ab.add_rlib(path, &name.as_str(), - sess.lto() && !ignored_for_lto(sess, cnum), + sess.lto() && !ignored_for_lto(&trans.crate_info, cnum), skip_object_files).unwrap(); - all_native_libs.extend(sess.cstore.native_libraries(cnum)); + all_native_libs.extend(trans.crate_info.native_libraries[&cnum].iter().cloned()); }); if let Err(e) = res { sess.fatal(&e); @@ -665,11 +552,20 @@ fn link_staticlib(sess: &Session, ab.build(); if !all_native_libs.is_empty() { - sess.note_without_error("link against the following native artifacts when linking against \ - this static library"); - sess.note_without_error("the order and any duplication can be significant on some \ - platforms, and so may need to be preserved"); + if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) { + print_native_static_libs(sess, &all_native_libs); + } else { + // Fallback for backwards compatibility only + print_native_static_libs_legacy(sess, &all_native_libs); + } } +} + +fn print_native_static_libs_legacy(sess: &Session, all_native_libs: &[NativeLibrary]) { + sess.note_without_error("link against the following native artifacts when linking against \ + this static library"); + sess.note_without_error("This list will not be printed by default. \ + Please add --print=native-static-libs if you need this information"); for lib in all_native_libs.iter().filter(|l| relevant_lib(sess, l)) { let name = match lib.kind { @@ -683,18 +579,46 @@ fn link_staticlib(sess: &Session, } } +fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLibrary]) { + let lib_args: Vec<_> = all_native_libs.iter() + .filter(|l| relevant_lib(sess, l)) + .filter_map(|lib| match lib.kind { + NativeLibraryKind::NativeStaticNobundle | + NativeLibraryKind::NativeUnknown => { + if sess.target.target.options.is_like_msvc { + Some(format!("{}.lib", lib.name)) + } else { + Some(format!("-l{}", lib.name)) + } + }, + NativeLibraryKind::NativeFramework => { + // ld-only syntax, since there are no frameworks in MSVC + Some(format!("-framework {}", lib.name)) + }, + // These are included, no need to print them + NativeLibraryKind::NativeStatic => None, + }) + .collect(); + if !lib_args.is_empty() { + sess.note_without_error("Link against the following native artifacts when linking \ + against this static library. The order and any duplication \ + can be significant on some platforms."); + // Prefix for greppability + sess.note_without_error(&format!("native-static-libs: {}", &lib_args.join(" "))); + } +} + // Create a dynamic library or executable // // This will invoke the system linker/cc to create the resulting file. This // links to all upstream files as well. fn link_natively(sess: &Session, crate_type: config::CrateType, - objects: &[PathBuf], out_filename: &Path, trans: &CrateTranslation, outputs: &OutputFilenames, tmpdir: &Path) { - info!("preparing {:?} from {:?} to {:?}", crate_type, objects, out_filename); + info!("preparing {:?} to {:?}", crate_type, out_filename); let flavor = sess.linker_flavor(); // The invocations of cc share some flags across platforms @@ -732,7 +656,7 @@ fn link_natively(sess: &Session, { let mut linker = trans.linker_info.to_linker(cmd, &sess); link_args(&mut *linker, sess, crate_type, tmpdir, - objects, out_filename, outputs, trans); + out_filename, outputs, trans); cmd = linker.finalize(); } if let Some(args) = sess.target.target.options.late_link_args.get(&flavor) { @@ -777,7 +701,9 @@ fn link_natively(sess: &Session, let mut i = 0; loop { i += 1; - prog = time(sess.time_passes(), "running linker", || cmd.output()); + prog = time(sess.time_passes(), "running linker", || { + exec_linker(sess, &mut cmd, tmpdir) + }); if !retry_on_segfault || i > 3 { break } @@ -855,11 +781,102 @@ fn link_natively(sess: &Session, } } +fn exec_linker(sess: &Session, cmd: &mut Command, tmpdir: &Path) + -> io::Result +{ + // When attempting to spawn the linker we run a risk of blowing out the + // size limits for spawning a new process with respect to the arguments + // we pass on the command line. + // + // Here we attempt to handle errors from the OS saying "your list of + // arguments is too big" by reinvoking the linker again with an `@`-file + // that contains all the arguments. The theory is that this is then + // accepted on all linkers and the linker will read all its options out of + // there instead of looking at the command line. + match cmd.command().stdout(Stdio::piped()).stderr(Stdio::piped()).spawn() { + Ok(child) => return child.wait_with_output(), + Err(ref e) if command_line_too_big(e) => {} + Err(e) => return Err(e) + } + + let file = tmpdir.join("linker-arguments"); + let mut cmd2 = Command::new(cmd.get_program()); + cmd2.arg(format!("@{}", file.display())); + for &(ref k, ref v) in cmd.get_env() { + cmd2.env(k, v); + } + let mut f = BufWriter::new(File::create(&file)?); + for arg in cmd.get_args() { + writeln!(f, "{}", Escape { + arg: arg.to_str().unwrap(), + is_like_msvc: sess.target.target.options.is_like_msvc, + })?; + } + f.into_inner()?; + return cmd2.output(); + + #[cfg(unix)] + fn command_line_too_big(err: &io::Error) -> bool { + err.raw_os_error() == Some(::libc::E2BIG) + } + + #[cfg(windows)] + fn command_line_too_big(err: &io::Error) -> bool { + const ERROR_FILENAME_EXCED_RANGE: i32 = 206; + err.raw_os_error() == Some(ERROR_FILENAME_EXCED_RANGE) + } + + struct Escape<'a> { + arg: &'a str, + is_like_msvc: bool, + } + + impl<'a> fmt::Display for Escape<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.is_like_msvc { + // This is "documented" at + // https://msdn.microsoft.com/en-us/library/4xdcbak7.aspx + // + // Unfortunately there's not a great specification of the + // syntax I could find online (at least) but some local + // testing showed that this seemed sufficient-ish to catch + // at least a few edge cases. + write!(f, "\"")?; + for c in self.arg.chars() { + match c { + '"' => write!(f, "\\{}", c)?, + c => write!(f, "{}", c)?, + } + } + write!(f, "\"")?; + } else { + // This is documented at https://linux.die.net/man/1/ld, namely: + // + // > Options in file are separated by whitespace. A whitespace + // > character may be included in an option by surrounding the + // > entire option in either single or double quotes. Any + // > character (including a backslash) may be included by + // > prefixing the character to be included with a backslash. + // + // We put an argument on each line, so all we need to do is + // ensure the line is interpreted as one whole argument. + for c in self.arg.chars() { + match c { + '\\' | + ' ' => write!(f, "\\{}", c)?, + c => write!(f, "{}", c)?, + } + } + } + Ok(()) + } + } +} + fn link_args(cmd: &mut Linker, sess: &Session, crate_type: config::CrateType, tmpdir: &Path, - objects: &[PathBuf], out_filename: &Path, outputs: &OutputFilenames, trans: &CrateTranslation) { @@ -872,8 +889,8 @@ fn link_args(cmd: &mut Linker, let t = &sess.target.target; cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path)); - for obj in objects { - cmd.add_object(obj); + for module in trans.modules.iter() { + cmd.add_object(&module.object); } cmd.output_filename(out_filename); @@ -896,11 +913,11 @@ fn link_args(cmd: &mut Linker, // object file, so we link that in here. if crate_type == config::CrateTypeDylib || crate_type == config::CrateTypeProcMacro { - cmd.add_object(&outputs.with_extension(METADATA_OBJ_NAME)); + cmd.add_object(&metadata_obj(outputs)); } if trans.allocator_module.is_some() { - cmd.add_object(&outputs.with_extension(ALLOCATOR_OBJ_NAME)); + cmd.add_object(&allocator_obj(outputs)); } // Try to strip as much out of the generated object by removing unused @@ -910,7 +927,7 @@ fn link_args(cmd: &mut Linker, cmd.gc_sections(keep_metadata); } - let used_link_args = sess.cstore.used_link_args(); + let used_link_args = &trans.crate_info.link_args; if crate_type == config::CrateTypeExecutable && t.options.position_independent_executables { @@ -980,9 +997,9 @@ fn link_args(cmd: &mut Linker, // link line. And finally upstream native libraries can't depend on anything // in this DAG so far because they're only dylibs and dylibs can only depend // on other dylibs (e.g. other native deps). - add_local_native_libraries(cmd, sess); - add_upstream_rust_crates(cmd, sess, crate_type, tmpdir); - add_upstream_native_libraries(cmd, sess, crate_type); + add_local_native_libraries(cmd, sess, trans); + add_upstream_rust_crates(cmd, sess, trans, crate_type, tmpdir); + add_upstream_native_libraries(cmd, sess, trans, crate_type); // Tell the linker what we're doing. if crate_type != config::CrateTypeExecutable { @@ -1007,7 +1024,7 @@ fn link_args(cmd: &mut Linker, path }; let mut rpath_config = RPathConfig { - used_crates: sess.cstore.used_crates(LinkagePreference::RequireDynamic), + used_crates: &trans.crate_info.used_crates_dynamic, out_filename: out_filename.to_path_buf(), has_rpath: sess.target.target.options.has_rpath, is_like_osx: sess.target.target.options.is_like_osx, @@ -1037,7 +1054,9 @@ fn link_args(cmd: &mut Linker, // Also note that the native libraries linked here are only the ones located // in the current crate. Upstream crates with native library dependencies // may have their native library pulled in above. -fn add_local_native_libraries(cmd: &mut Linker, sess: &Session) { +fn add_local_native_libraries(cmd: &mut Linker, + sess: &Session, + trans: &CrateTranslation) { sess.target_filesearch(PathKind::All).for_each_lib_search_path(|path, k| { match k { PathKind::Framework => { cmd.framework_path(path); } @@ -1045,7 +1064,7 @@ fn add_local_native_libraries(cmd: &mut Linker, sess: &Session) { } }); - let relevant_libs = sess.cstore.used_libraries().into_iter().filter(|l| { + let relevant_libs = trans.crate_info.used_libraries.iter().filter(|l| { relevant_lib(sess, l) }); @@ -1068,6 +1087,7 @@ fn add_local_native_libraries(cmd: &mut Linker, sess: &Session) { // the intermediate rlib version) fn add_upstream_rust_crates(cmd: &mut Linker, sess: &Session, + trans: &CrateTranslation, crate_type: config::CrateType, tmpdir: &Path) { // All of the heavy lifting has previously been accomplished by the @@ -1083,35 +1103,35 @@ fn add_upstream_rust_crates(cmd: &mut Linker, // Invoke get_used_crates to ensure that we get a topological sorting of // crates. - let deps = sess.cstore.used_crates(LinkagePreference::RequireDynamic); + let deps = &trans.crate_info.used_crates_dynamic; let mut compiler_builtins = None; - for &(cnum, _) in &deps { + for &(cnum, _) in deps.iter() { // We may not pass all crates through to the linker. Some crates may // appear statically in an existing dylib, meaning we'll pick up all the // symbols from the dylib. - let src = sess.cstore.used_crate_source(cnum); + let src = &trans.crate_info.used_crate_source[&cnum]; match data[cnum.as_usize() - 1] { - _ if sess.cstore.is_profiler_runtime(cnum) => { - add_static_crate(cmd, sess, tmpdir, crate_type, cnum); + _ if trans.crate_info.profiler_runtime == Some(cnum) => { + add_static_crate(cmd, sess, trans, tmpdir, crate_type, cnum); } - _ if sess.cstore.is_sanitizer_runtime(cnum) => { - link_sanitizer_runtime(cmd, sess, tmpdir, cnum); + _ if trans.crate_info.sanitizer_runtime == Some(cnum) => { + link_sanitizer_runtime(cmd, sess, trans, tmpdir, cnum); } // compiler-builtins are always placed last to ensure that they're // linked correctly. - _ if sess.cstore.is_compiler_builtins(cnum) => { + _ if trans.crate_info.compiler_builtins == Some(cnum) => { assert!(compiler_builtins.is_none()); compiler_builtins = Some(cnum); } Linkage::NotLinked | Linkage::IncludedFromDylib => {} Linkage::Static => { - add_static_crate(cmd, sess, tmpdir, crate_type, cnum); + add_static_crate(cmd, sess, trans, tmpdir, crate_type, cnum); } Linkage::Dynamic => { - add_dynamic_crate(cmd, sess, &src.dylib.unwrap().0) + add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0) } } } @@ -1122,7 +1142,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker, // was already "included" in a dylib (e.g. `libstd` when `-C prefer-dynamic` // is used) if let Some(cnum) = compiler_builtins { - add_static_crate(cmd, sess, tmpdir, crate_type, cnum); + add_static_crate(cmd, sess, trans, tmpdir, crate_type, cnum); } // Converts a library file-stem into a cc -l argument @@ -1140,10 +1160,11 @@ fn add_upstream_rust_crates(cmd: &mut Linker, // linking it. fn link_sanitizer_runtime(cmd: &mut Linker, sess: &Session, + trans: &CrateTranslation, tmpdir: &Path, cnum: CrateNum) { - let src = sess.cstore.used_crate_source(cnum); - let cratepath = &src.rlib.unwrap().0; + let src = &trans.crate_info.used_crate_source[&cnum]; + let cratepath = &src.rlib.as_ref().unwrap().0; if sess.target.target.options.is_like_osx { // On Apple platforms, the sanitizer is always built as a dylib, and @@ -1163,7 +1184,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker, archive.update_symbols(); for f in archive.src_files() { - if f.ends_with("bytecode.deflate") || f == METADATA_FILENAME { + if f.ends_with(RLIB_BYTECODE_EXTENSION) || f == METADATA_FILENAME { archive.remove_file(&f); continue } @@ -1208,21 +1229,22 @@ fn add_upstream_rust_crates(cmd: &mut Linker, // we're at the end of the dependency chain. fn add_static_crate(cmd: &mut Linker, sess: &Session, + trans: &CrateTranslation, tmpdir: &Path, crate_type: config::CrateType, cnum: CrateNum) { - let src = sess.cstore.used_crate_source(cnum); - let cratepath = &src.rlib.unwrap().0; + let src = &trans.crate_info.used_crate_source[&cnum]; + let cratepath = &src.rlib.as_ref().unwrap().0; // See the comment above in `link_staticlib` and `link_rlib` for why if // there's a static library that's not relevant we skip all object // files. - let native_libs = sess.cstore.native_libraries(cnum); + let native_libs = &trans.crate_info.native_libraries[&cnum]; let skip_native = native_libs.iter().any(|lib| { lib.kind == NativeLibraryKind::NativeStatic && !relevant_lib(sess, lib) }); - if (!sess.lto() || ignored_for_lto(sess, cnum)) && + if (!sess.lto() || ignored_for_lto(&trans.crate_info, cnum)) && crate_type != config::CrateTypeDylib && !skip_native { cmd.link_rlib(&fix_windows_verbatim_for_gcc(cratepath)); @@ -1240,7 +1262,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker, let mut any_objects = false; for f in archive.src_files() { - if f.ends_with("bytecode.deflate") || f == METADATA_FILENAME { + if f.ends_with(RLIB_BYTECODE_EXTENSION) || f == METADATA_FILENAME { archive.remove_file(&f); continue } @@ -1248,11 +1270,23 @@ fn add_upstream_rust_crates(cmd: &mut Linker, let canonical = f.replace("-", "_"); let canonical_name = name.replace("-", "_"); + // Look for `.rust-cgu.o` at the end of the filename to conclude + // that this is a Rust-related object file. + fn looks_like_rust(s: &str) -> bool { + let path = Path::new(s); + let ext = path.extension().and_then(|s| s.to_str()); + if ext != Some(OutputType::Object.extension()) { + return false + } + let ext2 = path.file_stem() + .and_then(|s| Path::new(s).extension()) + .and_then(|s| s.to_str()); + ext2 == Some(RUST_CGU_EXT) + } + let is_rust_object = - canonical.starts_with(&canonical_name) && { - let num = &f[name.len()..f.len() - 2]; - num.len() > 0 && num[1..].parse::().is_ok() - }; + canonical.starts_with(&canonical_name) && + looks_like_rust(&f); // If we've been requested to skip all native object files // (those not generated by the rust compiler) then we can skip @@ -1264,7 +1298,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker, // LTO module. Note that `#![no_builtins]` is excluded from LTO, // though, so we let that object file slide. let skip_because_lto = sess.lto() && is_rust_object && - !sess.cstore.is_no_builtins(cnum); + !trans.crate_info.is_no_builtins.contains(&cnum); if skip_because_cfg_say_so || skip_because_lto { archive.remove_file(&f); @@ -1286,7 +1320,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker, // compiler-builtins crate (e.g. compiler-rt) because it'll get // repeatedly linked anyway. if crate_type == config::CrateTypeDylib && - !sess.cstore.is_compiler_builtins(cnum) { + trans.crate_info.compiler_builtins != Some(cnum) { cmd.link_whole_rlib(&fix_windows_verbatim_for_gcc(&dst)); } else { cmd.link_rlib(&fix_windows_verbatim_for_gcc(&dst)); @@ -1330,7 +1364,10 @@ fn add_upstream_rust_crates(cmd: &mut Linker, // generic function calls a native function, then the generic function must // be instantiated in the target crate, meaning that the native symbol must // also be resolved in the target crate. -fn add_upstream_native_libraries(cmd: &mut Linker, sess: &Session, crate_type: config::CrateType) { +fn add_upstream_native_libraries(cmd: &mut Linker, + sess: &Session, + trans: &CrateTranslation, + crate_type: config::CrateType) { // Be sure to use a topological sorting of crates because there may be // interdependencies between native libraries. When passing -nodefaultlibs, // for example, almost all native libraries depend on libc, so we have to @@ -1343,9 +1380,9 @@ fn add_upstream_native_libraries(cmd: &mut Linker, sess: &Session, crate_type: c let formats = sess.dependency_formats.borrow(); let data = formats.get(&crate_type).unwrap(); - let crates = sess.cstore.used_crates(LinkagePreference::RequireStatic); - for (cnum, _) in crates { - for lib in sess.cstore.native_libraries(cnum) { + let crates = &trans.crate_info.used_crates_static; + for &(cnum, _) in crates { + for lib in trans.crate_info.native_libraries[&cnum].iter() { if !relevant_lib(sess, &lib) { continue } diff --git a/src/librustc_trans/back/linker.rs b/src/librustc_trans/back/linker.rs index 9b0a5e3f4a..99422bf8c9 100644 --- a/src/librustc_trans/back/linker.rs +++ b/src/librustc_trans/back/linker.rs @@ -14,17 +14,16 @@ use std::fs::{self, File}; use std::io::prelude::*; use std::io::{self, BufWriter}; use std::path::{Path, PathBuf}; -use std::process::Command; - -use context::SharedCrateContext; use back::archive; -use back::symbol_export::ExportedSymbols; -use rustc::middle::dependency_format::Linkage; +use back::command::Command; +use back::symbol_export; use rustc::hir::def_id::{LOCAL_CRATE, CrateNum}; -use rustc_back::LinkerFlavor; +use rustc::middle::dependency_format::Linkage; use rustc::session::Session; use rustc::session::config::{self, CrateType, OptLevel, DebugInfoLevel}; +use rustc::ty::TyCtxt; +use rustc_back::LinkerFlavor; use serialize::{json, Encoder}; /// For all the linkers we support, and information they might @@ -33,19 +32,18 @@ pub struct LinkerInfo { exports: HashMap>, } -impl<'a, 'tcx> LinkerInfo { - pub fn new(scx: &SharedCrateContext<'a, 'tcx>, - exports: &ExportedSymbols) -> LinkerInfo { +impl LinkerInfo { + pub fn new(tcx: TyCtxt) -> LinkerInfo { LinkerInfo { - exports: scx.sess().crate_types.borrow().iter().map(|&c| { - (c, exported_symbols(scx, exports, c)) + exports: tcx.sess.crate_types.borrow().iter().map(|&c| { + (c, exported_symbols(tcx, c)) }).collect(), } } - pub fn to_linker(&'a self, - cmd: Command, - sess: &'a Session) -> Box { + pub fn to_linker<'a>(&'a self, + cmd: Command, + sess: &'a Session) -> Box { match sess.linker_flavor() { LinkerFlavor::Msvc => { Box::new(MsvcLinker { @@ -498,6 +496,18 @@ impl<'a> Linker for MsvcLinker<'a> { let sysroot = self.sess.sysroot(); let natvis_dir_path = sysroot.join("lib\\rustlib\\etc"); if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) { + // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes + // on, the /NATVIS:... flags. LLVM 6 (or earlier) should at worst ignore + // them, eventually mooting this workaround, per this landed patch: + // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100 + if let Some(ref linker_path) = self.sess.opts.cg.linker { + if let Some(linker_name) = Path::new(&linker_path).file_stem() { + if linker_name.to_str().unwrap().to_lowercase() == "lld-link" { + self.sess.warn("not embedding natvis: lld-link may not support the flag"); + return; + } + } + } for entry in natvis_dir { match entry { Ok(entry) => { @@ -734,16 +744,17 @@ impl<'a> Linker for EmLinker<'a> { } } -fn exported_symbols(scx: &SharedCrateContext, - exported_symbols: &ExportedSymbols, - crate_type: CrateType) - -> Vec { +fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { let mut symbols = Vec::new(); - exported_symbols.for_each_exported_symbol(LOCAL_CRATE, |name, _, _| { - symbols.push(name.to_owned()); - }); - let formats = scx.sess().dependency_formats.borrow(); + let export_threshold = symbol_export::threshold(tcx); + for &(ref name, _, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(name.clone()); + } + } + + let formats = tcx.sess.dependency_formats.borrow(); let deps = formats[&crate_type].iter(); for (index, dep_format) in deps.enumerate() { @@ -751,9 +762,11 @@ fn exported_symbols(scx: &SharedCrateContext, // For each dependency that we are linking to statically ... if *dep_format == Linkage::Static { // ... we add its symbol list to our export list. - exported_symbols.for_each_exported_symbol(cnum, |name, _, _| { - symbols.push(name.to_owned()); - }) + for &(ref name, _, level) in tcx.exported_symbols(cnum).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(name.clone()); + } + } } } diff --git a/src/librustc_trans/back/lto.rs b/src/librustc_trans/back/lto.rs index 3e2d9f5c32..8f75b891a3 100644 --- a/src/librustc_trans/back/lto.rs +++ b/src/librustc_trans/back/lto.rs @@ -8,26 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use back::link; -use back::write; +use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION}; use back::symbol_export; -use rustc::session::config; +use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext}; +use back::write; use errors::{FatalError, Handler}; -use llvm; use llvm::archive_ro::ArchiveRO; use llvm::{ModuleRef, TargetMachineRef, True, False}; -use rustc::util::common::time; -use rustc::util::common::path2cstr; +use llvm; use rustc::hir::def_id::LOCAL_CRATE; -use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext}; +use rustc::middle::exported_symbols::SymbolExportLevel; +use rustc::session::config; +use rustc::util::common::time; +use time_graph::Timeline; +use {ModuleTranslation, ModuleLlvm, ModuleKind, ModuleSource}; use libc; -use flate2::read::DeflateDecoder; -use std::io::Read; use std::ffi::CString; -use std::path::Path; -use std::ptr::read_unaligned; +use std::slice; +use std::sync::Arc; pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { match crate_type { @@ -41,34 +41,82 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { } } -pub fn run(cgcx: &CodegenContext, - diag_handler: &Handler, - llmod: ModuleRef, - tm: TargetMachineRef, - config: &ModuleConfig, - temp_no_opt_bc_filename: &Path) -> Result<(), FatalError> { - if cgcx.opts.cg.prefer_dynamic { - diag_handler.struct_err("cannot prefer dynamic linking when performing LTO") - .note("only 'staticlib', 'bin', and 'cdylib' outputs are \ - supported with LTO") - .emit(); - return Err(FatalError) +pub enum LtoModuleTranslation { + Fat { + module: Option, + _serialized_bitcode: Vec, + }, + + Thin(ThinModule), +} + +impl LtoModuleTranslation { + pub fn name(&self) -> &str { + match *self { + LtoModuleTranslation::Fat { .. } => "everything", + LtoModuleTranslation::Thin(ref m) => m.name(), + } } - // Make sure we actually can run LTO - for crate_type in cgcx.crate_types.iter() { - if !crate_type_allows_lto(*crate_type) { - let e = diag_handler.fatal("lto can only be run for executables, cdylibs and \ - static library outputs"); - return Err(e) + /// Optimize this module within the given codegen context. + /// + /// This function is unsafe as it'll return a `ModuleTranslation` still + /// points to LLVM data structures owned by this `LtoModuleTranslation`. + /// It's intended that the module returned is immediately code generated and + /// dropped, and then this LTO module is dropped. + pub unsafe fn optimize(&mut self, + cgcx: &CodegenContext, + timeline: &mut Timeline) + -> Result + { + match *self { + LtoModuleTranslation::Fat { ref mut module, .. } => { + let trans = module.take().unwrap(); + let config = cgcx.config(trans.kind); + let llmod = trans.llvm().unwrap().llmod; + let tm = trans.llvm().unwrap().tm; + run_pass_manager(cgcx, tm, llmod, config, false); + timeline.record("fat-done"); + Ok(trans) + } + LtoModuleTranslation::Thin(ref mut thin) => thin.optimize(cgcx, timeline), } } - let export_threshold = - symbol_export::crates_export_threshold(&cgcx.crate_types); + /// A "guage" of how costly it is to optimize this module, used to sort + /// biggest modules first. + pub fn cost(&self) -> u64 { + match *self { + // Only one module with fat LTO, so the cost doesn't matter. + LtoModuleTranslation::Fat { .. } => 0, + LtoModuleTranslation::Thin(ref m) => m.cost(), + } + } +} + +pub enum LTOMode { + WholeCrateGraph, + JustThisCrate, +} - let symbol_filter = &|&(ref name, _, level): &(String, _, _)| { - if symbol_export::is_below_threshold(level, export_threshold) { +pub fn run(cgcx: &CodegenContext, + modules: Vec, + mode: LTOMode, + timeline: &mut Timeline) + -> Result, FatalError> +{ + let diag_handler = cgcx.create_diag_handler(); + let export_threshold = match mode { + LTOMode::WholeCrateGraph => { + symbol_export::crates_export_threshold(&cgcx.crate_types) + } + LTOMode::JustThisCrate => { + SymbolExportLevel::Rust + } + }; + + let symbol_filter = &|&(ref name, _, level): &(String, _, SymbolExportLevel)| { + if level.is_below_threshold(export_threshold) { let mut bytes = Vec::with_capacity(name.len() + 1); bytes.extend(name.bytes()); Some(CString::new(bytes).unwrap()) @@ -77,112 +125,305 @@ pub fn run(cgcx: &CodegenContext, } }; - let mut symbol_white_list: Vec = cgcx.exported_symbols - .exported_symbols(LOCAL_CRATE) + let mut symbol_white_list = cgcx.exported_symbols[&LOCAL_CRATE] .iter() .filter_map(symbol_filter) - .collect(); - - // For each of our upstream dependencies, find the corresponding rlib and - // load the bitcode from the archive. Then merge it into the current LLVM - // module that we've got. - for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() { - symbol_white_list.extend( - cgcx.exported_symbols.exported_symbols(cnum) - .iter() - .filter_map(symbol_filter)); - - let archive = ArchiveRO::open(&path).expect("wanted an rlib"); - let bytecodes = archive.iter().filter_map(|child| { - child.ok().and_then(|c| c.name().map(|name| (name, c))) - }).filter(|&(name, _)| name.ends_with("bytecode.deflate")); - for (name, data) in bytecodes { - let bc_encoded = data.data(); - - let bc_decoded = if is_versioned_bytecode_format(bc_encoded) { - time(cgcx.time_passes, &format!("decode {}", name), || { - // Read the version - let version = extract_bytecode_format_version(bc_encoded); - - if version == 1 { - // The only version existing so far - let data_size = extract_compressed_bytecode_size_v1(bc_encoded); - let compressed_data = &bc_encoded[ - link::RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET.. - (link::RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET + data_size as usize)]; - - let mut inflated = Vec::new(); - let res = DeflateDecoder::new(compressed_data) - .read_to_end(&mut inflated); - if res.is_err() { - let msg = format!("failed to decompress bc of `{}`", - name); - Err(diag_handler.fatal(&msg)) - } else { - Ok(inflated) - } - } else { - Err(diag_handler.fatal(&format!("Unsupported bytecode format version {}", - version))) - } - })? - } else { - time(cgcx.time_passes, &format!("decode {}", name), || { - // the object must be in the old, pre-versioning format, so - // simply inflate everything and let LLVM decide if it can - // make sense of it - let mut inflated = Vec::new(); - let res = DeflateDecoder::new(bc_encoded) - .read_to_end(&mut inflated); - if res.is_err() { - let msg = format!("failed to decompress bc of `{}`", - name); - Err(diag_handler.fatal(&msg)) - } else { - Ok(inflated) + .collect::>(); + timeline.record("whitelist"); + + // If we're performing LTO for the entire crate graph, then for each of our + // upstream dependencies, find the corresponding rlib and load the bitcode + // from the archive. + // + // We save off all the bytecode and LLVM module ids for later processing + // with either fat or thin LTO + let mut upstream_modules = Vec::new(); + if let LTOMode::WholeCrateGraph = mode { + if cgcx.opts.cg.prefer_dynamic { + diag_handler.struct_err("cannot prefer dynamic linking when performing LTO") + .note("only 'staticlib', 'bin', and 'cdylib' outputs are \ + supported with LTO") + .emit(); + return Err(FatalError) + } + + // Make sure we actually can run LTO + for crate_type in cgcx.crate_types.iter() { + if !crate_type_allows_lto(*crate_type) { + let e = diag_handler.fatal("lto can only be run for executables, cdylibs and \ + static library outputs"); + return Err(e) + } + } + + for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() { + symbol_white_list.extend( + cgcx.exported_symbols[&cnum] + .iter() + .filter_map(symbol_filter)); + + let archive = ArchiveRO::open(&path).expect("wanted an rlib"); + let bytecodes = archive.iter().filter_map(|child| { + child.ok().and_then(|c| c.name().map(|name| (name, c))) + }).filter(|&(name, _)| name.ends_with(RLIB_BYTECODE_EXTENSION)); + for (name, data) in bytecodes { + info!("adding bytecode {}", name); + let bc_encoded = data.data(); + + let (bc, id) = time(cgcx.time_passes, &format!("decode {}", name), || { + match DecodedBytecode::new(bc_encoded) { + Ok(b) => Ok((b.bytecode(), b.identifier().to_string())), + Err(e) => Err(diag_handler.fatal(&e)), } - })? - }; + })?; + let bc = SerializedModule::FromRlib(bc); + upstream_modules.push((bc, CString::new(id).unwrap())); + } + timeline.record(&format!("load: {}", path.display())); + } + } - let ptr = bc_decoded.as_ptr(); - debug!("linking {}", name); - time(cgcx.time_passes, &format!("ll link {}", name), || unsafe { - if llvm::LLVMRustLinkInExternalBitcode(llmod, - ptr as *const libc::c_char, - bc_decoded.len() as libc::size_t) { - Ok(()) - } else { - let msg = format!("failed to load bc of `{}`", name); - Err(write::llvm_err(&diag_handler, msg)) - } - })?; + let arr = symbol_white_list.iter().map(|c| c.as_ptr()).collect::>(); + match mode { + LTOMode::WholeCrateGraph if !cgcx.thinlto => { + fat_lto(cgcx, &diag_handler, modules, upstream_modules, &arr, timeline) + } + _ => { + thin_lto(&diag_handler, modules, upstream_modules, &arr, timeline) } } +} + +fn fat_lto(cgcx: &CodegenContext, + diag_handler: &Handler, + mut modules: Vec, + mut serialized_modules: Vec<(SerializedModule, CString)>, + symbol_white_list: &[*const libc::c_char], + timeline: &mut Timeline) + -> Result, FatalError> +{ + info!("going for a fat lto"); + + // Find the "costliest" module and merge everything into that codegen unit. + // All the other modules will be serialized and reparsed into the new + // context, so this hopefully avoids serializing and parsing the largest + // codegen unit. + // + // Additionally use a regular module as the base here to ensure that various + // file copy operations in the backend work correctly. The only other kind + // of module here should be an allocator one, and if your crate is smaller + // than the allocator module then the size doesn't really matter anyway. + let (_, costliest_module) = modules.iter() + .enumerate() + .filter(|&(_, module)| module.kind == ModuleKind::Regular) + .map(|(i, module)| { + let cost = unsafe { + llvm::LLVMRustModuleCost(module.llvm().unwrap().llmod) + }; + (cost, i) + }) + .max() + .expect("must be trans'ing at least one module"); + let module = modules.remove(costliest_module); + let llmod = module.llvm().expect("can't lto pre-translated modules").llmod; + info!("using {:?} as a base module", module.llmod_id); + + // For all other modules we translated we'll need to link them into our own + // bitcode. All modules were translated in their own LLVM context, however, + // and we want to move everything to the same LLVM context. Currently the + // way we know of to do that is to serialize them to a string and them parse + // them later. Not great but hey, that's why it's "fat" LTO, right? + for module in modules { + let llvm = module.llvm().expect("can't lto pre-translated modules"); + let buffer = ModuleBuffer::new(llvm.llmod); + let llmod_id = CString::new(&module.llmod_id[..]).unwrap(); + serialized_modules.push((SerializedModule::Local(buffer), llmod_id)); + } + + // For all serialized bitcode files we parse them and link them in as we did + // above, this is all mostly handled in C++. Like above, though, we don't + // know much about the memory management here so we err on the side of being + // save and persist everything with the original module. + let mut serialized_bitcode = Vec::new(); + for (bc_decoded, name) in serialized_modules { + info!("linking {:?}", name); + time(cgcx.time_passes, &format!("ll link {:?}", name), || unsafe { + let data = bc_decoded.data(); + if llvm::LLVMRustLinkInExternalBitcode(llmod, + data.as_ptr() as *const libc::c_char, + data.len() as libc::size_t) { + Ok(()) + } else { + let msg = format!("failed to load bc of {:?}", name); + Err(write::llvm_err(&diag_handler, msg)) + } + })?; + timeline.record(&format!("link {:?}", name)); + serialized_bitcode.push(bc_decoded); + } + cgcx.save_temp_bitcode(&module, "lto.input"); - // Internalize everything but the exported symbols of the current module - let arr: Vec<*const libc::c_char> = symbol_white_list.iter() - .map(|c| c.as_ptr()) - .collect(); - let ptr = arr.as_ptr(); + // Internalize everything that *isn't* in our whitelist to help strip out + // more modules and such unsafe { + let ptr = symbol_white_list.as_ptr(); llvm::LLVMRustRunRestrictionPass(llmod, ptr as *const *const libc::c_char, - arr.len() as libc::size_t); + symbol_white_list.len() as libc::size_t); + cgcx.save_temp_bitcode(&module, "lto.after-restriction"); } if cgcx.no_landing_pads { unsafe { llvm::LLVMRustMarkAllFunctionsNounwind(llmod); } + cgcx.save_temp_bitcode(&module, "lto.after-nounwind"); } + timeline.record("passes"); - if cgcx.opts.cg.save_temps { - let cstr = path2cstr(temp_no_opt_bc_filename); - unsafe { - llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); + Ok(vec![LtoModuleTranslation::Fat { + module: Some(module), + _serialized_bitcode: serialized_bitcode, + }]) +} + +/// Prepare "thin" LTO to get run on these modules. +/// +/// The general structure of ThinLTO is quite different from the structure of +/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into +/// one giant LLVM module, and then we run more optimization passes over this +/// big module after internalizing most symbols. Thin LTO, on the other hand, +/// avoid this large bottleneck through more targeted optimization. +/// +/// At a high level Thin LTO looks like: +/// +/// 1. Prepare a "summary" of each LLVM module in question which describes +/// the values inside, cost of the values, etc. +/// 2. Merge the summaries of all modules in question into one "index" +/// 3. Perform some global analysis on this index +/// 4. For each module, use the index and analysis calculated previously to +/// perform local transformations on the module, for example inlining +/// small functions from other modules. +/// 5. Run thin-specific optimization passes over each module, and then code +/// generate everything at the end. +/// +/// The summary for each module is intended to be quite cheap, and the global +/// index is relatively quite cheap to create as well. As a result, the goal of +/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more +/// situations. For example one cheap optimization is that we can parallelize +/// all codegen modules, easily making use of all the cores on a machine. +/// +/// With all that in mind, the function here is designed at specifically just +/// calculating the *index* for ThinLTO. This index will then be shared amongst +/// all of the `LtoModuleTranslation` units returned below and destroyed once +/// they all go out of scope. +fn thin_lto(diag_handler: &Handler, + modules: Vec, + serialized_modules: Vec<(SerializedModule, CString)>, + symbol_white_list: &[*const libc::c_char], + timeline: &mut Timeline) + -> Result, FatalError> +{ + unsafe { + info!("going for that thin, thin LTO"); + + let mut thin_buffers = Vec::new(); + let mut module_names = Vec::new(); + let mut thin_modules = Vec::new(); + + // FIXME: right now, like with fat LTO, we serialize all in-memory + // modules before working with them and ThinLTO. We really + // shouldn't do this, however, and instead figure out how to + // extract a summary from an in-memory module and then merge that + // into the global index. It turns out that this loop is by far + // the most expensive portion of this small bit of global + // analysis! + for (i, module) in modules.iter().enumerate() { + info!("local module: {} - {}", i, module.llmod_id); + let llvm = module.llvm().expect("can't lto pretranslated module"); + let name = CString::new(module.llmod_id.clone()).unwrap(); + let buffer = llvm::LLVMRustThinLTOBufferCreate(llvm.llmod); + let buffer = ThinBuffer(buffer); + thin_modules.push(llvm::ThinLTOModule { + identifier: name.as_ptr(), + data: buffer.data().as_ptr(), + len: buffer.data().len(), + }); + thin_buffers.push(buffer); + module_names.push(name); + timeline.record(&module.llmod_id); } + + // FIXME: All upstream crates are deserialized internally in the + // function below to extract their summary and modules. Note that + // unlike the loop above we *must* decode and/or read something + // here as these are all just serialized files on disk. An + // improvement, however, to make here would be to store the + // module summary separately from the actual module itself. Right + // now this is store in one large bitcode file, and the entire + // file is deflate-compressed. We could try to bypass some of the + // decompression by storing the index uncompressed and only + // lazily decompressing the bytecode if necessary. + // + // Note that truly taking advantage of this optimization will + // likely be further down the road. We'd have to implement + // incremental ThinLTO first where we could actually avoid + // looking at upstream modules entirely sometimes (the contents, + // we must always unconditionally look at the index). + let mut serialized = Vec::new(); + for (module, name) in serialized_modules { + info!("foreign module {:?}", name); + thin_modules.push(llvm::ThinLTOModule { + identifier: name.as_ptr(), + data: module.data().as_ptr(), + len: module.data().len(), + }); + serialized.push(module); + module_names.push(name); + } + + // Delegate to the C++ bindings to create some data here. Once this is a + // tried-and-true interface we may wish to try to upstream some of this + // to LLVM itself, right now we reimplement a lot of what they do + // upstream... + let data = llvm::LLVMRustCreateThinLTOData( + thin_modules.as_ptr(), + thin_modules.len() as u32, + symbol_white_list.as_ptr(), + symbol_white_list.len() as u32, + ); + if data.is_null() { + let msg = format!("failed to prepare thin LTO context"); + return Err(write::llvm_err(&diag_handler, msg)) + } + let data = ThinData(data); + info!("thin LTO data created"); + timeline.record("data"); + + // Throw our data in an `Arc` as we'll be sharing it across threads. We + // also put all memory referenced by the C++ data (buffers, ids, etc) + // into the arc as well. After this we'll create a thin module + // translation per module in this data. + let shared = Arc::new(ThinShared { + data, + thin_buffers, + serialized_modules: serialized, + module_names, + }); + Ok((0..shared.module_names.len()).map(|i| { + LtoModuleTranslation::Thin(ThinModule { + shared: shared.clone(), + idx: i, + }) + }).collect()) } +} +fn run_pass_manager(cgcx: &CodegenContext, + tm: TargetMachineRef, + llmod: ModuleRef, + config: &ModuleConfig, + thin: bool) { // Now we have one massive module inside of llmod. Time to run the // LTO-specific optimization passes that LLVM provides. // @@ -197,9 +438,15 @@ pub fn run(cgcx: &CodegenContext, llvm::LLVMRustAddPass(pm, pass); with_llvm_pmb(llmod, config, &mut |b| { - llvm::LLVMPassManagerBuilderPopulateLTOPassManager(b, pm, - /* Internalize = */ False, - /* RunInliner = */ True); + if thin { + if !llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm) { + panic!("this version of LLVM does not support ThinLTO"); + } + } else { + llvm::LLVMPassManagerBuilderPopulateLTOPassManager(b, pm, + /* Internalize = */ False, + /* RunInliner = */ True); + } }); let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); @@ -212,25 +459,200 @@ pub fn run(cgcx: &CodegenContext, llvm::LLVMDisposePassManager(pm); } debug!("lto done"); - Ok(()) } -fn is_versioned_bytecode_format(bc: &[u8]) -> bool { - let magic_id_byte_count = link::RLIB_BYTECODE_OBJECT_MAGIC.len(); - return bc.len() > magic_id_byte_count && - &bc[..magic_id_byte_count] == link::RLIB_BYTECODE_OBJECT_MAGIC; +pub enum SerializedModule { + Local(ModuleBuffer), + FromRlib(Vec), +} + +impl SerializedModule { + fn data(&self) -> &[u8] { + match *self { + SerializedModule::Local(ref m) => m.data(), + SerializedModule::FromRlib(ref m) => m, + } + } +} + +pub struct ModuleBuffer(*mut llvm::ModuleBuffer); + +unsafe impl Send for ModuleBuffer {} +unsafe impl Sync for ModuleBuffer {} + +impl ModuleBuffer { + fn new(m: ModuleRef) -> ModuleBuffer { + ModuleBuffer(unsafe { + llvm::LLVMRustModuleBufferCreate(m) + }) + } + + fn data(&self) -> &[u8] { + unsafe { + let ptr = llvm::LLVMRustModuleBufferPtr(self.0); + let len = llvm::LLVMRustModuleBufferLen(self.0); + slice::from_raw_parts(ptr, len) + } + } +} + +impl Drop for ModuleBuffer { + fn drop(&mut self) { + unsafe { llvm::LLVMRustModuleBufferFree(self.0); } + } +} + +pub struct ThinModule { + shared: Arc, + idx: usize, +} + +struct ThinShared { + data: ThinData, + thin_buffers: Vec, + serialized_modules: Vec, + module_names: Vec, +} + +struct ThinData(*mut llvm::ThinLTOData); + +unsafe impl Send for ThinData {} +unsafe impl Sync for ThinData {} + +impl Drop for ThinData { + fn drop(&mut self) { + unsafe { + llvm::LLVMRustFreeThinLTOData(self.0); + } + } +} + +struct ThinBuffer(*mut llvm::ThinLTOBuffer); + +unsafe impl Send for ThinBuffer {} +unsafe impl Sync for ThinBuffer {} + +impl ThinBuffer { + fn data(&self) -> &[u8] { + unsafe { + let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _; + let len = llvm::LLVMRustThinLTOBufferLen(self.0); + slice::from_raw_parts(ptr, len) + } + } } -fn extract_bytecode_format_version(bc: &[u8]) -> u32 { - let pos = link::RLIB_BYTECODE_OBJECT_VERSION_OFFSET; - let byte_data = &bc[pos..pos + 4]; - let data = unsafe { read_unaligned(byte_data.as_ptr() as *const u32) }; - u32::from_le(data) +impl Drop for ThinBuffer { + fn drop(&mut self) { + unsafe { + llvm::LLVMRustThinLTOBufferFree(self.0); + } + } } -fn extract_compressed_bytecode_size_v1(bc: &[u8]) -> u64 { - let pos = link::RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET; - let byte_data = &bc[pos..pos + 8]; - let data = unsafe { read_unaligned(byte_data.as_ptr() as *const u64) }; - u64::from_le(data) +impl ThinModule { + fn name(&self) -> &str { + self.shared.module_names[self.idx].to_str().unwrap() + } + + fn cost(&self) -> u64 { + // Yes, that's correct, we're using the size of the bytecode as an + // indicator for how costly this codegen unit is. + self.data().len() as u64 + } + + fn data(&self) -> &[u8] { + let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); + a.unwrap_or_else(|| { + let len = self.shared.thin_buffers.len(); + self.shared.serialized_modules[self.idx - len].data() + }) + } + + unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline) + -> Result + { + let diag_handler = cgcx.create_diag_handler(); + let tm = (cgcx.tm_factory)().map_err(|e| { + write::llvm_err(&diag_handler, e) + })?; + + // Right now the implementation we've got only works over serialized + // modules, so we create a fresh new LLVM context and parse the module + // into that context. One day, however, we may do this for upstream + // crates but for locally translated modules we may be able to reuse + // that LLVM Context and Module. + let llcx = llvm::LLVMContextCreate(); + let llmod = llvm::LLVMRustParseBitcodeForThinLTO( + llcx, + self.data().as_ptr(), + self.data().len(), + self.shared.module_names[self.idx].as_ptr(), + ); + assert!(!llmod.is_null()); + let mtrans = ModuleTranslation { + source: ModuleSource::Translated(ModuleLlvm { + llmod, + llcx, + tm, + }), + llmod_id: self.name().to_string(), + name: self.name().to_string(), + kind: ModuleKind::Regular, + }; + cgcx.save_temp_bitcode(&mtrans, "thin-lto-input"); + + // Like with "fat" LTO, get some better optimizations if landing pads + // are disabled by removing all landing pads. + if cgcx.no_landing_pads { + llvm::LLVMRustMarkAllFunctionsNounwind(llmod); + cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-nounwind"); + timeline.record("nounwind"); + } + + // Up next comes the per-module local analyses that we do for Thin LTO. + // Each of these functions is basically copied from the LLVM + // implementation and then tailored to suit this implementation. Ideally + // each of these would be supported by upstream LLVM but that's perhaps + // a patch for another day! + // + // You can find some more comments about these functions in the LLVM + // bindings we've got (currently `PassWrapper.cpp`) + if !llvm::LLVMRustPrepareThinLTORename(self.shared.data.0, llmod) { + let msg = format!("failed to prepare thin LTO module"); + return Err(write::llvm_err(&diag_handler, msg)) + } + cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-rename"); + timeline.record("rename"); + if !llvm::LLVMRustPrepareThinLTOResolveWeak(self.shared.data.0, llmod) { + let msg = format!("failed to prepare thin LTO module"); + return Err(write::llvm_err(&diag_handler, msg)) + } + cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-resolve"); + timeline.record("resolve"); + if !llvm::LLVMRustPrepareThinLTOInternalize(self.shared.data.0, llmod) { + let msg = format!("failed to prepare thin LTO module"); + return Err(write::llvm_err(&diag_handler, msg)) + } + cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-internalize"); + timeline.record("internalize"); + if !llvm::LLVMRustPrepareThinLTOImport(self.shared.data.0, llmod) { + let msg = format!("failed to prepare thin LTO module"); + return Err(write::llvm_err(&diag_handler, msg)) + } + cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-import"); + timeline.record("import"); + + // Alright now that we've done everything related to the ThinLTO + // analysis it's time to run some optimizations! Here we use the same + // `run_pass_manager` as the "fat" LTO above except that we tell it to + // populate a thin-specific pass manager, which presumably LLVM treats a + // little differently. + info!("running thin lto passes over {}", mtrans.name); + let config = cgcx.config(mtrans.kind); + run_pass_manager(cgcx, tm, llmod, config, true); + cgcx.save_temp_bitcode(&mtrans, "thin-lto-after-pm"); + timeline.record("thin-done"); + Ok(mtrans) + } } diff --git a/src/librustc_trans/back/rpath.rs b/src/librustc_trans/back/rpath.rs index 104e7bc6a5..8e5e7d3764 100644 --- a/src/librustc_trans/back/rpath.rs +++ b/src/librustc_trans/back/rpath.rs @@ -17,7 +17,7 @@ use rustc::hir::def_id::CrateNum; use rustc::middle::cstore::LibSource; pub struct RPathConfig<'a> { - pub used_crates: Vec<(CrateNum, LibSource)>, + pub used_crates: &'a [(CrateNum, LibSource)], pub out_filename: PathBuf, pub is_like_osx: bool, pub has_rpath: bool, @@ -36,7 +36,7 @@ pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec { debug!("preparing the RPATH!"); let libs = config.used_crates.clone(); - let libs = libs.into_iter().filter_map(|(_, l)| l.option()).collect::>(); + let libs = libs.iter().filter_map(|&(_, ref l)| l.option()).collect::>(); let rpaths = get_rpaths(config, &libs); flags.extend_from_slice(&rpaths_to_flags(&rpaths)); diff --git a/src/librustc_trans/back/symbol_export.rs b/src/librustc_trans/back/symbol_export.rs index 971483e91b..4996972a64 100644 --- a/src/librustc_trans/back/symbol_export.rs +++ b/src/librustc_trans/back/symbol_export.rs @@ -8,46 +8,81 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::rc::Rc; +use std::sync::Arc; + +use base; use monomorphize::Instance; -use rustc::util::nodemap::{FxHashMap, NodeSet}; -use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE, INVALID_CRATE, CRATE_DEF_INDEX}; +use rustc::hir::def_id::CrateNum; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::middle::exported_symbols::SymbolExportLevel; use rustc::session::config; use rustc::ty::TyCtxt; +use rustc::ty::maps::Providers; +use rustc::util::nodemap::FxHashMap; use rustc_allocator::ALLOCATOR_METHODS; -use syntax::attr; - -/// The SymbolExportLevel of a symbols specifies from which kinds of crates -/// the symbol will be exported. `C` symbols will be exported from any -/// kind of crate, including cdylibs which export very few things. -/// `Rust` will only be exported if the crate produced is a Rust -/// dylib. -#[derive(Eq, PartialEq, Debug, Copy, Clone)] -pub enum SymbolExportLevel { - C, - Rust, + +pub type ExportedSymbols = FxHashMap< + CrateNum, + Arc, SymbolExportLevel)>>, +>; + +pub fn threshold(tcx: TyCtxt) -> SymbolExportLevel { + crates_export_threshold(&tcx.sess.crate_types.borrow()) } -/// The set of symbols exported from each crate in the crate graph. -#[derive(Debug)] -pub struct ExportedSymbols { - pub export_threshold: SymbolExportLevel, - exports: FxHashMap>, - local_exports: NodeSet, +pub fn metadata_symbol_name(tcx: TyCtxt) -> String { + format!("rust_metadata_{}_{}", + tcx.crate_name(LOCAL_CRATE), + tcx.crate_disambiguator(LOCAL_CRATE)) } -impl ExportedSymbols { - pub fn empty() -> ExportedSymbols { - ExportedSymbols { - export_threshold: SymbolExportLevel::C, - exports: FxHashMap(), - local_exports: NodeSet(), - } +fn crate_export_threshold(crate_type: config::CrateType) -> SymbolExportLevel { + match crate_type { + config::CrateTypeExecutable | + config::CrateTypeStaticlib | + config::CrateTypeProcMacro | + config::CrateTypeCdylib => SymbolExportLevel::C, + config::CrateTypeRlib | + config::CrateTypeDylib => SymbolExportLevel::Rust, } +} - pub fn compute<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - local_exported_symbols: &NodeSet) - -> ExportedSymbols { - let export_threshold = crates_export_threshold(&tcx.sess.crate_types.borrow()); +pub fn crates_export_threshold(crate_types: &[config::CrateType]) + -> SymbolExportLevel { + if crate_types.iter().any(|&crate_type| { + crate_export_threshold(crate_type) == SymbolExportLevel::Rust + }) { + SymbolExportLevel::Rust + } else { + SymbolExportLevel::C + } +} + +pub fn provide_local(providers: &mut Providers) { + providers.exported_symbol_ids = |tcx, cnum| { + let export_threshold = threshold(tcx); + Rc::new(tcx.exported_symbols(cnum) + .iter() + .filter_map(|&(_, id, level)| { + id.and_then(|id| { + if level.is_below_threshold(export_threshold) { + Some(id) + } else { + None + } + }) + }) + .collect()) + }; + + providers.is_exported_symbol = |tcx, id| { + tcx.exported_symbol_ids(id.krate).contains(&id) + }; + + providers.exported_symbols = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + let local_exported_symbols = base::find_exported_symbols(tcx); let mut local_crate: Vec<_> = local_exported_symbols .iter() @@ -58,36 +93,20 @@ impl ExportedSymbols { let name = tcx.symbol_name(Instance::mono(tcx, def_id)); let export_level = export_level(tcx, def_id); debug!("EXPORTED SYMBOL (local): {} ({:?})", name, export_level); - (str::to_owned(&name), def_id, export_level) + (str::to_owned(&name), Some(def_id), export_level) }) .collect(); - let mut local_exports = local_crate - .iter() - .filter_map(|&(_, def_id, level)| { - if is_below_threshold(level, export_threshold) { - tcx.hir.as_local_node_id(def_id) - } else { - None - } - }) - .collect::(); - - const INVALID_DEF_ID: DefId = DefId { - krate: INVALID_CRATE, - index: CRATE_DEF_INDEX, - }; - if let Some(_) = *tcx.sess.entry_fn.borrow() { local_crate.push(("main".to_string(), - INVALID_DEF_ID, + None, SymbolExportLevel::C)); } if tcx.sess.allocator_kind.get().is_some() { for method in ALLOCATOR_METHODS { local_crate.push((format!("__rust_{}", method.name), - INVALID_DEF_ID, + None, SymbolExportLevel::Rust)); } } @@ -97,153 +116,73 @@ impl ExportedSymbols { let idx = def_id.index; let disambiguator = tcx.sess.local_crate_disambiguator(); let registrar = tcx.sess.generate_derive_registrar_symbol(disambiguator, idx); - local_crate.push((registrar, def_id, SymbolExportLevel::C)); - local_exports.insert(id); + local_crate.push((registrar, Some(def_id), SymbolExportLevel::C)); } if tcx.sess.crate_types.borrow().contains(&config::CrateTypeDylib) { local_crate.push((metadata_symbol_name(tcx), - INVALID_DEF_ID, + None, SymbolExportLevel::Rust)); } + Arc::new(local_crate) + }; +} - let mut exports = FxHashMap(); - exports.insert(LOCAL_CRATE, local_crate); - - for cnum in tcx.sess.cstore.crates() { - debug_assert!(cnum != LOCAL_CRATE); - - // If this crate is a plugin and/or a custom derive crate, then - // we're not even going to link those in so we skip those crates. - if tcx.sess.cstore.plugin_registrar_fn(cnum).is_some() || - tcx.sess.cstore.derive_registrar_fn(cnum).is_some() { - continue; - } - - // Check to see if this crate is a "special runtime crate". These - // crates, implementation details of the standard library, typically - // have a bunch of `pub extern` and `#[no_mangle]` functions as the - // ABI between them. We don't want their symbols to have a `C` - // export level, however, as they're just implementation details. - // Down below we'll hardwire all of the symbols to the `Rust` export - // level instead. - let special_runtime_crate = - tcx.is_panic_runtime(cnum.as_def_id()) || - tcx.sess.cstore.is_compiler_builtins(cnum); - - let crate_exports = tcx - .sess - .cstore - .exported_symbols(cnum) - .iter() - .map(|&def_id| { - let name = tcx.symbol_name(Instance::mono(tcx, def_id)); - let export_level = if special_runtime_crate { - // We can probably do better here by just ensuring that - // it has hidden visibility rather than public - // visibility, as this is primarily here to ensure it's - // not stripped during LTO. - // - // In general though we won't link right if these - // symbols are stripped, and LTO currently strips them. - if &*name == "rust_eh_personality" || - &*name == "rust_eh_register_frames" || - &*name == "rust_eh_unregister_frames" { - SymbolExportLevel::C - } else { - SymbolExportLevel::Rust - } - } else { - export_level(tcx, def_id) - }; - debug!("EXPORTED SYMBOL (re-export): {} ({:?})", name, export_level); - (str::to_owned(&name), def_id, export_level) - }) - .collect(); - - exports.insert(cnum, crate_exports); - } - - return ExportedSymbols { - export_threshold, - exports, - local_exports, - }; - - fn export_level(tcx: TyCtxt, - sym_def_id: DefId) - -> SymbolExportLevel { - let attrs = tcx.get_attrs(sym_def_id); - if attr::contains_extern_indicator(tcx.sess.diagnostic(), &attrs) { - SymbolExportLevel::C - } else { - SymbolExportLevel::Rust - } - } - } - - pub fn local_exports(&self) -> &NodeSet { - &self.local_exports - } - - pub fn exported_symbols(&self, - cnum: CrateNum) - -> &[(String, DefId, SymbolExportLevel)] { - match self.exports.get(&cnum) { - Some(exports) => exports, - None => &[] +pub fn provide_extern(providers: &mut Providers) { + providers.exported_symbols = |tcx, cnum| { + // If this crate is a plugin and/or a custom derive crate, then + // we're not even going to link those in so we skip those crates. + if tcx.plugin_registrar_fn(cnum).is_some() || + tcx.derive_registrar_fn(cnum).is_some() { + return Arc::new(Vec::new()) } - } - pub fn for_each_exported_symbol(&self, - cnum: CrateNum, - mut f: F) - where F: FnMut(&str, DefId, SymbolExportLevel) - { - for &(ref name, def_id, export_level) in self.exported_symbols(cnum) { - if is_below_threshold(export_level, self.export_threshold) { - f(&name, def_id, export_level) - } - } - } -} - -pub fn metadata_symbol_name(tcx: TyCtxt) -> String { - format!("rust_metadata_{}_{}", - tcx.crate_name(LOCAL_CRATE), - tcx.crate_disambiguator(LOCAL_CRATE)) -} + // Check to see if this crate is a "special runtime crate". These + // crates, implementation details of the standard library, typically + // have a bunch of `pub extern` and `#[no_mangle]` functions as the + // ABI between them. We don't want their symbols to have a `C` + // export level, however, as they're just implementation details. + // Down below we'll hardwire all of the symbols to the `Rust` export + // level instead. + let special_runtime_crate = + tcx.is_panic_runtime(cnum) || tcx.is_compiler_builtins(cnum); + + let crate_exports = tcx + .exported_symbol_ids(cnum) + .iter() + .map(|&def_id| { + let name = tcx.symbol_name(Instance::mono(tcx, def_id)); + let export_level = if special_runtime_crate { + // We can probably do better here by just ensuring that + // it has hidden visibility rather than public + // visibility, as this is primarily here to ensure it's + // not stripped during LTO. + // + // In general though we won't link right if these + // symbols are stripped, and LTO currently strips them. + if &*name == "rust_eh_personality" || + &*name == "rust_eh_register_frames" || + &*name == "rust_eh_unregister_frames" { + SymbolExportLevel::C + } else { + SymbolExportLevel::Rust + } + } else { + export_level(tcx, def_id) + }; + debug!("EXPORTED SYMBOL (re-export): {} ({:?})", name, export_level); + (str::to_owned(&name), Some(def_id), export_level) + }) + .collect(); -pub fn crate_export_threshold(crate_type: config::CrateType) - -> SymbolExportLevel { - match crate_type { - config::CrateTypeExecutable | - config::CrateTypeStaticlib | - config::CrateTypeProcMacro | - config::CrateTypeCdylib => SymbolExportLevel::C, - config::CrateTypeRlib | - config::CrateTypeDylib => SymbolExportLevel::Rust, - } + Arc::new(crate_exports) + }; } -pub fn crates_export_threshold(crate_types: &[config::CrateType]) - -> SymbolExportLevel { - if crate_types.iter().any(|&crate_type| { - crate_export_threshold(crate_type) == SymbolExportLevel::Rust - }) { - SymbolExportLevel::Rust - } else { +fn export_level(tcx: TyCtxt, sym_def_id: DefId) -> SymbolExportLevel { + if tcx.contains_extern_indicator(sym_def_id) { SymbolExportLevel::C - } -} - -pub fn is_below_threshold(level: SymbolExportLevel, - threshold: SymbolExportLevel) - -> bool { - if threshold == SymbolExportLevel::Rust { - // We export everything from Rust dylibs - true } else { - level == SymbolExportLevel::C + SymbolExportLevel::Rust } } diff --git a/src/librustc_trans/back/symbol_names.rs b/src/librustc_trans/back/symbol_names.rs index 10b66fb199..66a27f1c4a 100644 --- a/src/librustc_trans/back/symbol_names.rs +++ b/src/librustc_trans/back/symbol_names.rs @@ -98,8 +98,10 @@ //! DefPaths which are much more robust in the face of changes to the code base. use monomorphize::Instance; +use trans_item::{TransItemExt, InstantiationMode}; use rustc::middle::weak_lang_items; +use rustc::middle::trans::TransItem; use rustc::hir::def_id::DefId; use rustc::hir::map as hir_map; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; @@ -119,6 +121,30 @@ pub fn provide(providers: &mut Providers) { *providers = Providers { def_symbol_name, symbol_name, + + export_name: |tcx, id| { + tcx.get_attrs(id).iter().fold(None, |ia, attr| { + if attr.check_name("export_name") { + if let s @ Some(_) = attr.value_str() { + s + } else { + struct_span_err!(tcx.sess, attr.span, E0558, + "export_name attribute has invalid format") + .span_label(attr.span, "did you mean #[export_name=\"*\"]?") + .emit(); + None + } + } else { + ia + } + }) + }, + + contains_extern_indicator: |tcx, id| { + attr::contains_name(&tcx.get_attrs(id), "no_mangle") || + tcx.export_name(id).is_some() + }, + ..*providers }; } @@ -126,7 +152,10 @@ pub fn provide(providers: &mut Providers) { fn get_symbol_hash<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // the DefId of the item this name is for - def_id: Option, + def_id: DefId, + + // instance this name will be for + instance: Instance<'tcx>, // type of the item, without any generic // parameters substituted; this is @@ -136,7 +165,7 @@ fn get_symbol_hash<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // values for generic type parameters, // if any. - substs: Option<&'tcx Substs<'tcx>>) + substs: &'tcx Substs<'tcx>) -> u64 { debug!("get_symbol_hash(def_id={:?}, parameters={:?})", def_id, substs); @@ -146,7 +175,7 @@ fn get_symbol_hash<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // the main symbol name is not necessarily unique; hash in the // compiler's internal def-path, guaranteeing each symbol has a // truly unique path - hasher.hash(def_id.map(|def_id| tcx.def_path_hash(def_id))); + hasher.hash(tcx.def_path_hash(def_id)); // Include the main item-type. Note that, in this case, the // assertions about `needs_subst` may not hold, but this item-type @@ -162,19 +191,36 @@ fn get_symbol_hash<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } // also include any type parameters (for generic items) - if let Some(substs) = substs { - assert!(!substs.has_erasable_regions()); - assert!(!substs.needs_subst()); - substs.visit_with(&mut hasher); - - // If this is an instance of a generic function, we also hash in - // the ID of the instantiating crate. This avoids symbol conflicts - // in case the same instances is emitted in two crates of the same - // project. - if substs.types().next().is_some() { - hasher.hash(tcx.crate_name.as_str()); - hasher.hash(tcx.sess.local_crate_disambiguator().as_str()); + assert!(!substs.has_erasable_regions()); + assert!(!substs.needs_subst()); + substs.visit_with(&mut hasher); + + let mut avoid_cross_crate_conflicts = false; + + // If this is an instance of a generic function, we also hash in + // the ID of the instantiating crate. This avoids symbol conflicts + // in case the same instances is emitted in two crates of the same + // project. + if substs.types().next().is_some() { + avoid_cross_crate_conflicts = true; + } + + // If we're dealing with an instance of a function that's inlined from + // another crate but we're marking it as globally shared to our + // compliation (aka we're not making an internal copy in each of our + // codegen units) then this symbol may become an exported (but hidden + // visibility) symbol. This means that multiple crates may do the same + // and we want to be sure to avoid any symbol conflicts here. + match TransItem::Fn(instance).instantiation_mode(tcx) { + InstantiationMode::GloballyShared { may_conflict: true } => { + avoid_cross_crate_conflicts = true; } + _ => {} + } + + if avoid_cross_crate_conflicts { + hasher.hash(tcx.crate_name.as_str()); + hasher.hash(tcx.sess.local_crate_disambiguator().as_str()); } }); @@ -242,17 +288,17 @@ fn compute_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance return name.to_string(); } // Don't mangle foreign items. - return tcx.item_name(def_id).as_str().to_string(); + return tcx.item_name(def_id).to_string(); } - if let Some(name) = attr::find_export_name_attr(tcx.sess.diagnostic(), &attrs) { + if let Some(name) = tcx.export_name(def_id) { // Use provided name return name.to_string(); } if attr::contains_name(&attrs, "no_mangle") { // Don't mangle - return tcx.item_name(def_id).as_str().to_string(); + return tcx.item_name(def_id).to_string(); } // We want to compute the "type" of this item. Unfortunately, some @@ -285,7 +331,7 @@ fn compute_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance // and should not matter anyhow. let instance_ty = tcx.erase_regions(&instance_ty); - let hash = get_symbol_hash(tcx, Some(def_id), instance_ty, Some(substs)); + let hash = get_symbol_hash(tcx, def_id, instance, instance_ty, substs); SymbolPathBuffer::from_interned(tcx.def_symbol_name(def_id)).finish(hash) } diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 3f9b28d3d6..22b09a0e26 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -13,20 +13,25 @@ use back::link::{self, get_linker, remove}; use back::linker::LinkerInfo; use back::symbol_export::ExportedSymbols; use rustc_incremental::{save_trans_partition, in_incr_comp_dir}; +use rustc::dep_graph::DepGraph; use rustc::middle::cstore::{LinkMeta, EncodedMetadata}; use rustc::session::config::{self, OutputFilenames, OutputType, OutputTypes, Passes, SomePasses, AllPasses, Sanitizer}; use rustc::session::Session; -use time_graph::{self, TimeGraph}; +use rustc::util::nodemap::FxHashMap; +use time_graph::{self, TimeGraph, Timeline}; use llvm; use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef}; -use llvm::SMDiagnosticRef; +use llvm::{SMDiagnosticRef, ContextRef}; use {CrateTranslation, ModuleSource, ModuleTranslation, CompiledModule, ModuleKind}; -use rustc::hir::def_id::CrateNum; +use CrateInfo; +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::ty::TyCtxt; use rustc::util::common::{time, time_depth, set_time_depth, path2cstr, print_time_passes_entry}; use rustc::util::fs::{link_or_copy, rename_or_copy_remove}; use errors::{self, Handler, Level, DiagnosticBuilder, FatalError}; use errors::emitter::{Emitter}; +use syntax::attr; use syntax::ext::hygiene::Mark; use syntax_pos::MultiSpan; use syntax_pos::symbol::Symbol; @@ -34,11 +39,12 @@ use context::{is_pie_binary, get_reloc_model}; use jobserver::{Client, Acquired}; use rustc_demangle; +use std::any::Any; use std::ffi::CString; -use std::fmt; use std::fs; use std::io; use std::io::Write; +use std::mem; use std::path::{Path, PathBuf}; use std::str; use std::sync::Arc; @@ -137,6 +143,14 @@ fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { } pub fn create_target_machine(sess: &Session) -> TargetMachineRef { + target_machine_factory(sess)().unwrap_or_else(|err| { + panic!(llvm_err(sess.diagnostic(), err)) + }) +} + +pub fn target_machine_factory(sess: &Session) + -> Arc Result + Send + Sync> +{ let reloc_model = get_reloc_model(sess); let opt_level = get_llvm_opt_level(sess.opts.optimize); @@ -165,40 +179,40 @@ pub fn create_target_machine(sess: &Session) -> TargetMachineRef { let triple = &sess.target.target.llvm_target; - let tm = unsafe { - let triple = CString::new(triple.as_bytes()).unwrap(); - let cpu = match sess.opts.cg.target_cpu { - Some(ref s) => &**s, - None => &*sess.target.target.options.cpu - }; - let cpu = CString::new(cpu.as_bytes()).unwrap(); - let features = CString::new(target_feature(sess).as_bytes()).unwrap(); - llvm::LLVMRustCreateTargetMachine( - triple.as_ptr(), cpu.as_ptr(), features.as_ptr(), - code_model, - reloc_model, - opt_level, - use_softfp, - is_pie_binary(sess), - ffunction_sections, - fdata_sections, - ) + let triple = CString::new(triple.as_bytes()).unwrap(); + let cpu = match sess.opts.cg.target_cpu { + Some(ref s) => &**s, + None => &*sess.target.target.options.cpu }; + let cpu = CString::new(cpu.as_bytes()).unwrap(); + let features = CString::new(target_feature(sess).as_bytes()).unwrap(); + let is_pie_binary = is_pie_binary(sess); + + Arc::new(move || { + let tm = unsafe { + llvm::LLVMRustCreateTargetMachine( + triple.as_ptr(), cpu.as_ptr(), features.as_ptr(), + code_model, + reloc_model, + opt_level, + use_softfp, + is_pie_binary, + ffunction_sections, + fdata_sections, + ) + }; - if tm.is_null() { - let msg = format!("Could not create LLVM TargetMachine for triple: {}", - triple); - panic!(llvm_err(sess.diagnostic(), msg)); - } else { - return tm; - }; + if tm.is_null() { + Err(format!("Could not create LLVM TargetMachine for triple: {}", + triple.to_str().unwrap())) + } else { + Ok(tm) + } + }) } - /// Module-specific configuration for `optimize_and_codegen`. pub struct ModuleConfig { - /// LLVM TargetMachine to use for codegen. - tm: TargetMachineRef, /// Names of additional optimization passes to run. passes: Vec, /// Some(level) to optimize at a certain level, or None to run @@ -231,12 +245,9 @@ pub struct ModuleConfig { obj_is_bitcode: bool, } -unsafe impl Send for ModuleConfig { } - impl ModuleConfig { - fn new(sess: &Session, passes: Vec) -> ModuleConfig { + fn new(passes: Vec) -> ModuleConfig { ModuleConfig { - tm: create_target_machine(sess), passes, opt_level: None, opt_size: None, @@ -284,40 +295,6 @@ impl ModuleConfig { self.merge_functions = sess.opts.optimize == config::OptLevel::Default || sess.opts.optimize == config::OptLevel::Aggressive; } - - fn clone(&self, sess: &Session) -> ModuleConfig { - ModuleConfig { - tm: create_target_machine(sess), - passes: self.passes.clone(), - opt_level: self.opt_level, - opt_size: self.opt_size, - - emit_no_opt_bc: self.emit_no_opt_bc, - emit_bc: self.emit_bc, - emit_lto_bc: self.emit_lto_bc, - emit_ir: self.emit_ir, - emit_asm: self.emit_asm, - emit_obj: self.emit_obj, - obj_is_bitcode: self.obj_is_bitcode, - - no_verify: self.no_verify, - no_prepopulate_passes: self.no_prepopulate_passes, - no_builtins: self.no_builtins, - time_passes: self.time_passes, - vectorize_loop: self.vectorize_loop, - vectorize_slp: self.vectorize_slp, - merge_functions: self.merge_functions, - inline_threshold: self.inline_threshold, - } - } -} - -impl Drop for ModuleConfig { - fn drop(&mut self) { - unsafe { - llvm::LLVMRustDisposeTargetMachine(self.tm); - } - } } /// Additional resources used by optimize_and_codegen (not module specific) @@ -326,11 +303,21 @@ pub struct CodegenContext { // Resouces needed when running LTO pub time_passes: bool, pub lto: bool, + pub thinlto: bool, pub no_landing_pads: bool, + pub save_temps: bool, pub exported_symbols: Arc, pub opts: Arc, pub crate_types: Vec, pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, + output_filenames: Arc, + regular_module_config: Arc, + metadata_module_config: Arc, + allocator_module_config: Arc, + pub tm_factory: Arc Result + Send + Sync>, + + // Number of cgus excluding the allocator/metadata modules + pub total_cgus: usize, // Handler to use for diagnostics produced during codegen. pub diag_emitter: SharedEmitter, // LLVM passes added by plugins. @@ -343,21 +330,69 @@ pub struct CodegenContext { // compiling incrementally pub incr_comp_session_dir: Option, // Channel back to the main control thread to send messages to - coordinator_send: Sender, + coordinator_send: Sender>, // A reference to the TimeGraph so we can register timings. None means that // measuring is disabled. time_graph: Option, } impl CodegenContext { - fn create_diag_handler(&self) -> Handler { + pub fn create_diag_handler(&self) -> Handler { Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) } + + pub fn config(&self, kind: ModuleKind) -> &ModuleConfig { + match kind { + ModuleKind::Regular => &self.regular_module_config, + ModuleKind::Metadata => &self.metadata_module_config, + ModuleKind::Allocator => &self.allocator_module_config, + } + } + + pub fn save_temp_bitcode(&self, trans: &ModuleTranslation, name: &str) { + if !self.save_temps { + return + } + unsafe { + let ext = format!("{}.bc", name); + let cgu = Some(&trans.name[..]); + let path = self.output_filenames.temp_path_ext(&ext, cgu); + let cstr = path2cstr(&path); + let llmod = trans.llvm().unwrap().llmod; + llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); + } + } +} + +struct DiagnosticHandlers<'a> { + inner: Box<(&'a CodegenContext, &'a Handler)>, + llcx: ContextRef, +} + +impl<'a> DiagnosticHandlers<'a> { + fn new(cgcx: &'a CodegenContext, + handler: &'a Handler, + llcx: ContextRef) -> DiagnosticHandlers<'a> { + let data = Box::new((cgcx, handler)); + unsafe { + let arg = &*data as &(_, _) as *const _ as *mut _; + llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, arg); + llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, arg); + } + DiagnosticHandlers { + inner: data, + llcx: llcx, + } + } } -struct HandlerFreeVars<'a> { - cgcx: &'a CodegenContext, - diag_handler: &'a Handler, +impl<'a> Drop for DiagnosticHandlers<'a> { + fn drop(&mut self) { + unsafe { + llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, 0 as *mut _); + llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, 0 as *mut _); + } + } } unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, @@ -369,7 +404,10 @@ unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef, user: *const c_void, cookie: c_uint) { - let HandlerFreeVars { cgcx, .. } = *(user as *const HandlerFreeVars); + if user.is_null() { + return + } + let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s)) .expect("non-UTF8 SMDiagnostic"); @@ -378,7 +416,10 @@ unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef, } unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_void) { - let HandlerFreeVars { cgcx, diag_handler, .. } = *(user as *const HandlerFreeVars); + if user.is_null() { + return + } + let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { @@ -409,36 +450,27 @@ unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_vo } // Unsafe due to LLVM calls. -unsafe fn optimize_and_codegen(cgcx: &CodegenContext, - diag_handler: &Handler, - mtrans: ModuleTranslation, - config: ModuleConfig, - output_names: OutputFilenames) - -> Result +unsafe fn optimize(cgcx: &CodegenContext, + diag_handler: &Handler, + mtrans: &ModuleTranslation, + config: &ModuleConfig, + timeline: &mut Timeline) + -> Result<(), FatalError> { - let (llmod, llcx) = match mtrans.source { - ModuleSource::Translated(ref llvm) => (llvm.llmod, llvm.llcx), + let (llmod, llcx, tm) = match mtrans.source { + ModuleSource::Translated(ref llvm) => (llvm.llmod, llvm.llcx, llvm.tm), ModuleSource::Preexisting(_) => { bug!("optimize_and_codegen: called with ModuleSource::Preexisting") } }; - let tm = config.tm; - - let fv = HandlerFreeVars { - cgcx, - diag_handler, - }; - let fv = &fv as *const HandlerFreeVars as *mut c_void; - - llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, fv); - llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, fv); + let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); let module_name = mtrans.name.clone(); let module_name = Some(&module_name[..]); if config.emit_no_opt_bc { - let out = output_names.temp_path_ext("no-opt.bc", module_name); + let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name); let out = path2cstr(&out); llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()); } @@ -501,31 +533,57 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, // Finally, run the actual optimization passes time(config.time_passes, &format!("llvm function passes [{}]", module_name.unwrap()), || llvm::LLVMRustRunFunctionPassManager(fpm, llmod)); + timeline.record("fpm"); time(config.time_passes, &format!("llvm module passes [{}]", module_name.unwrap()), || llvm::LLVMRunPassManager(mpm, llmod)); // Deallocate managers that we're now done with llvm::LLVMDisposePassManager(fpm); llvm::LLVMDisposePassManager(mpm); + } + Ok(()) +} - if cgcx.lto { - time(cgcx.time_passes, "all lto passes", || { - let temp_no_opt_bc_filename = - output_names.temp_path_ext("no-opt.lto.bc", module_name); - lto::run(cgcx, - diag_handler, - llmod, - tm, - &config, - &temp_no_opt_bc_filename) - })?; - if config.emit_lto_bc { - let out = output_names.temp_path_ext("lto.bc", module_name); - let out = path2cstr(&out); - llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()); - } +fn generate_lto_work(cgcx: &CodegenContext, + modules: Vec) + -> Vec<(WorkItem, u64)> +{ + let mut timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(TRANS_WORKER_TIMELINE, + TRANS_WORK_PACKAGE_KIND, + "generate lto") + }).unwrap_or(Timeline::noop()); + let mode = if cgcx.lto { + lto::LTOMode::WholeCrateGraph + } else { + lto::LTOMode::JustThisCrate + }; + let lto_modules = lto::run(cgcx, modules, mode, &mut timeline) + .unwrap_or_else(|e| panic!(e)); + + lto_modules.into_iter().map(|module| { + let cost = module.cost(); + (WorkItem::LTO(module), cost) + }).collect() +} + +unsafe fn codegen(cgcx: &CodegenContext, + diag_handler: &Handler, + mtrans: ModuleTranslation, + config: &ModuleConfig, + timeline: &mut Timeline) + -> Result +{ + timeline.record("codegen"); + let (llmod, llcx, tm) = match mtrans.source { + ModuleSource::Translated(ref llvm) => (llvm.llmod, llvm.llcx, llvm.tm), + ModuleSource::Preexisting(_) => { + bug!("codegen: called with ModuleSource::Preexisting") } - } + }; + let module_name = mtrans.name.clone(); + let module_name = Some(&module_name[..]); + let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); // A codegen-specific pass manager is used to generate object // files for an LLVM module. @@ -556,18 +614,29 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, let write_obj = config.emit_obj && !config.obj_is_bitcode; let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode; - let bc_out = output_names.temp_path(OutputType::Bitcode, module_name); - let obj_out = output_names.temp_path(OutputType::Object, module_name); + let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); + let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); if write_bc { let bc_out_c = path2cstr(&bc_out); - llvm::LLVMWriteBitcodeToFile(llmod, bc_out_c.as_ptr()); + if llvm::LLVMRustThinLTOAvailable() { + with_codegen(tm, llmod, config.no_builtins, |cpm| { + llvm::LLVMRustWriteThinBitcodeToFile( + cpm, + llmod, + bc_out_c.as_ptr(), + ) + }); + } else { + llvm::LLVMWriteBitcodeToFile(llmod, bc_out_c.as_ptr()); + } + timeline.record("bc"); } time(config.time_passes, &format!("codegen passes [{}]", module_name.unwrap()), || -> Result<(), FatalError> { if config.emit_ir { - let out = output_names.temp_path(OutputType::LlvmAssembly, module_name); + let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name); let out = path2cstr(&out); extern "C" fn demangle_callback(input_ptr: *const c_char, @@ -604,11 +673,12 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, with_codegen(tm, llmod, config.no_builtins, |cpm| { llvm::LLVMRustPrintModule(cpm, llmod, out.as_ptr(), demangle_callback); llvm::LLVMDisposePassManager(cpm); - }) + }); + timeline.record("ir"); } if config.emit_asm { - let path = output_names.temp_path(OutputType::Assembly, module_name); + let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); // We can't use the same module for asm and binary output, because that triggers // various errors like invalid IR or broken binaries, so we might have to clone the @@ -625,6 +695,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, if config.emit_obj { llvm::LLVMDisposeModule(llmod); } + timeline.record("asm"); } if write_obj { @@ -632,6 +703,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, write_output_file(diag_handler, tm, cpm, llmod, &obj_out, llvm::FileType::ObjectFile) })?; + timeline.record("obj"); } Ok(()) @@ -651,7 +723,10 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, } } - Ok(mtrans.into_compiled_module(config.emit_obj, config.emit_bc)) + drop(handlers); + Ok(mtrans.into_compiled_module(config.emit_obj, + config.emit_bc, + &cgcx.output_filenames)) } pub struct CompiledModules { @@ -665,18 +740,35 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { sess.opts.output_types.contains_key(&OutputType::Exe) } -pub fn start_async_translation(sess: &Session, - crate_output: &OutputFilenames, +pub fn start_async_translation(tcx: TyCtxt, time_graph: Option, - crate_name: Symbol, link: LinkMeta, metadata: EncodedMetadata, - exported_symbols: Arc, - no_builtins: bool, - windows_subsystem: Option, - linker_info: LinkerInfo, - no_integrated_as: bool) + coordinator_receive: Receiver>, + total_cgus: usize) -> OngoingCrateTranslation { + let sess = tcx.sess; + let crate_output = tcx.output_filenames(LOCAL_CRATE); + let crate_name = tcx.crate_name(LOCAL_CRATE); + let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); + let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, + "windows_subsystem"); + let windows_subsystem = subsystem.map(|subsystem| { + if subsystem != "windows" && subsystem != "console" { + tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ + `windows` and `console` are allowed", + subsystem)); + } + subsystem.to_string() + }); + + let no_integrated_as = tcx.sess.opts.cg.no_integrated_as || + (tcx.sess.target.target.options.no_integrated_as && + (crate_output.outputs.contains_key(&OutputType::Object) || + crate_output.outputs.contains_key(&OutputType::Exe))); + let linker_info = LinkerInfo::new(tcx); + let crate_info = CrateInfo::new(tcx); + let output_types_override = if no_integrated_as { OutputTypes::new(&[(OutputType::Assembly, None)]) } else { @@ -684,9 +776,9 @@ pub fn start_async_translation(sess: &Session, }; // Figure out what we actually need to build. - let mut modules_config = ModuleConfig::new(sess, sess.opts.cg.passes.clone()); - let mut metadata_config = ModuleConfig::new(sess, vec![]); - let mut allocator_config = ModuleConfig::new(sess, vec![]); + let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); + let mut metadata_config = ModuleConfig::new(vec![]); + let mut allocator_config = ModuleConfig::new(vec![]); if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { match *sanitizer { @@ -771,16 +863,19 @@ pub fn start_async_translation(sess: &Session, let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); let (trans_worker_send, trans_worker_receive) = channel(); - let (coordinator_send, coordinator_receive) = channel(); - let coordinator_thread = start_executing_work(sess, + let coordinator_thread = start_executing_work(tcx, + &crate_info, shared_emitter, trans_worker_send, - coordinator_send.clone(), coordinator_receive, + total_cgus, client, time_graph.clone(), - exported_symbols.clone()); + Arc::new(modules_config), + Arc::new(metadata_config), + Arc::new(allocator_config)); + OngoingCrateTranslation { crate_name, link, @@ -788,21 +883,19 @@ pub fn start_async_translation(sess: &Session, windows_subsystem, linker_info, no_integrated_as, - - regular_module_config: modules_config, - metadata_module_config: metadata_config, - allocator_module_config: allocator_config, + crate_info, time_graph, - output_filenames: crate_output.clone(), - coordinator_send, + coordinator_send: tcx.tx_to_llvm_workers.clone(), trans_worker_receive, shared_emitter_main, - future: coordinator_thread + future: coordinator_thread, + output_filenames: tcx.output_filenames(LOCAL_CRATE), } } fn copy_module_artifacts_into_incr_comp_cache(sess: &Session, + dep_graph: &DepGraph, compiled_modules: &CompiledModules, crate_output: &OutputFilenames) { if sess.opts.incremental.is_none() { @@ -822,7 +915,10 @@ fn copy_module_artifacts_into_incr_comp_cache(sess: &Session, files.push((OutputType::Bitcode, path)); } - save_trans_partition(sess, &module.name, module.symbol_name_hash, &files); + save_trans_partition(sess, + dep_graph, + &module.name, + &files); } } @@ -941,10 +1037,10 @@ fn produce_final_output_artifacts(sess: &Session, let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); let keep_numbered_bitcode = needs_crate_bitcode || - (user_wants_bitcode && sess.opts.cg.codegen_units > 1); + (user_wants_bitcode && sess.opts.codegen_units > 1); let keep_numbered_objects = needs_crate_object || - (user_wants_objects && sess.opts.cg.codegen_units > 1); + (user_wants_objects && sess.opts.codegen_units > 1); for module in compiled_modules.modules.iter() { let module_name = Some(&module.name[..]); @@ -992,37 +1088,52 @@ pub fn dump_incremental_data(trans: &CrateTranslation) { eprintln!("incremental: re-using {} out of {} modules", reuse, trans.modules.len()); } -struct WorkItem { - mtrans: ModuleTranslation, - config: ModuleConfig, - output_names: OutputFilenames +enum WorkItem { + Optimize(ModuleTranslation), + LTO(lto::LtoModuleTranslation), } -impl fmt::Debug for WorkItem { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "WorkItem({})", self.mtrans.name) +impl WorkItem { + fn kind(&self) -> ModuleKind { + match *self { + WorkItem::Optimize(ref m) => m.kind, + WorkItem::LTO(_) => ModuleKind::Regular, + } } -} -fn build_work_item(mtrans: ModuleTranslation, - config: ModuleConfig, - output_names: OutputFilenames) - -> WorkItem -{ - WorkItem { - mtrans, - config, - output_names, + fn name(&self) -> String { + match *self { + WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), + WorkItem::LTO(ref m) => format!("lto: {}", m.name()), + } } } -fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) - -> Result +enum WorkItemResult { + Compiled(CompiledModule), + NeedsLTO(ModuleTranslation), +} + +fn execute_work_item(cgcx: &CodegenContext, + work_item: WorkItem, + timeline: &mut Timeline) + -> Result { let diag_handler = cgcx.create_diag_handler(); - let module_name = work_item.mtrans.name.clone(); + let config = cgcx.config(work_item.kind()); + let mtrans = match work_item { + WorkItem::Optimize(mtrans) => mtrans, + WorkItem::LTO(mut lto) => { + unsafe { + let module = lto.optimize(cgcx, timeline)?; + let module = codegen(cgcx, &diag_handler, module, config, timeline)?; + return Ok(WorkItemResult::Compiled(module)) + } + } + }; + let module_name = mtrans.name.clone(); - let pre_existing = match work_item.mtrans.source { + let pre_existing = match mtrans.source { ModuleSource::Translated(_) => None, ModuleSource::Preexisting(ref wp) => Some(wp.clone()), }; @@ -1031,13 +1142,13 @@ fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) let incr_comp_session_dir = cgcx.incr_comp_session_dir .as_ref() .unwrap(); - let name = &work_item.mtrans.name; + let name = &mtrans.name; for (kind, saved_file) in wp.saved_files { - let obj_out = work_item.output_names.temp_path(kind, Some(name)); + let obj_out = cgcx.output_filenames.temp_path(kind, Some(name)); let source_file = in_incr_comp_dir(&incr_comp_session_dir, &saved_file); debug!("copying pre-existing module `{}` from {:?} to {}", - work_item.mtrans.name, + mtrans.name, source_file, obj_out.display()); match link_or_copy(&source_file, &obj_out) { @@ -1050,31 +1161,56 @@ fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) } } } + let object = cgcx.output_filenames.temp_path(OutputType::Object, Some(name)); - Ok(CompiledModule { + Ok(WorkItemResult::Compiled(CompiledModule { + object, + llmod_id: mtrans.llmod_id.clone(), name: module_name, kind: ModuleKind::Regular, pre_existing: true, - symbol_name_hash: work_item.mtrans.symbol_name_hash, - emit_bc: work_item.config.emit_bc, - emit_obj: work_item.config.emit_obj, - }) + emit_bc: config.emit_bc, + emit_obj: config.emit_obj, + })) } else { debug!("llvm-optimizing {:?}", module_name); unsafe { - optimize_and_codegen(cgcx, - &diag_handler, - work_item.mtrans, - work_item.config, - work_item.output_names) + optimize(cgcx, &diag_handler, &mtrans, config, timeline)?; + + let lto = cgcx.lto; + + let auto_thin_lto = + cgcx.thinlto && + cgcx.total_cgus > 1 && + mtrans.kind != ModuleKind::Allocator; + + // If we're a metadata module we never participate in LTO. + // + // If LTO was explicitly requested on the command line, we always + // LTO everything else. + // + // If LTO *wasn't* explicitly requested and we're not a metdata + // module, then we may automatically do ThinLTO if we've got + // multiple codegen units. Note, however, that the allocator module + // doesn't participate here automatically because of linker + // shenanigans later on. + if mtrans.kind == ModuleKind::Metadata || (!lto && !auto_thin_lto) { + let module = codegen(cgcx, &diag_handler, mtrans, config, timeline)?; + Ok(WorkItemResult::Compiled(module)) + } else { + Ok(WorkItemResult::NeedsLTO(mtrans)) + } } } } -#[derive(Debug)] enum Message { Token(io::Result), + NeedsLTO { + result: ModuleTranslation, + worker_id: usize, + }, Done { result: Result, worker_id: usize, @@ -1082,8 +1218,8 @@ enum Message { TranslationDone { llvm_work_item: WorkItem, cost: u64, - is_last: bool, }, + TranslationComplete, TranslateItem, } @@ -1100,15 +1236,27 @@ enum MainThreadWorkerState { LLVMing, } -fn start_executing_work(sess: &Session, +fn start_executing_work(tcx: TyCtxt, + crate_info: &CrateInfo, shared_emitter: SharedEmitter, trans_worker_send: Sender, - coordinator_send: Sender, - coordinator_receive: Receiver, + coordinator_receive: Receiver>, + total_cgus: usize, jobserver: Client, time_graph: Option, - exported_symbols: Arc) - -> thread::JoinHandle { + modules_config: Arc, + metadata_config: Arc, + allocator_config: Arc) + -> thread::JoinHandle> { + let coordinator_send = tcx.tx_to_llvm_workers.clone(); + let mut exported_symbols = FxHashMap(); + exported_symbols.insert(LOCAL_CRATE, tcx.exported_symbols(LOCAL_CRATE)); + for &cnum in tcx.crates().iter() { + exported_symbols.insert(cnum, tcx.exported_symbols(cnum)); + } + let exported_symbols = Arc::new(exported_symbols); + let sess = tcx.sess; + // First up, convert our jobserver into a helper thread so we can use normal // mpsc channels to manage our messages and such. Once we've got the helper // thread then request `n-1` tokens because all of our work items are ready @@ -1121,12 +1269,12 @@ fn start_executing_work(sess: &Session, // tokens on `rx` above which will get managed in the main loop below. let coordinator_send2 = coordinator_send.clone(); let helper = jobserver.into_helper_thread(move |token| { - drop(coordinator_send2.send(Message::Token(token))); + drop(coordinator_send2.send(Box::new(Message::Token(token)))); }).expect("failed to spawn helper thread"); let mut each_linked_rlib_for_lto = Vec::new(); - drop(link::each_linked_rlib(sess, &mut |cnum, path| { - if link::ignored_for_lto(sess, cnum) { + drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { + if link::ignored_for_lto(crate_info, cnum) { return } each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); @@ -1136,7 +1284,9 @@ fn start_executing_work(sess: &Session, crate_types: sess.crate_types.borrow().clone(), each_linked_rlib_for_lto, lto: sess.lto(), + thinlto: sess.opts.debugging_opts.thinlto, no_landing_pads: sess.no_landing_pads(), + save_temps: sess.opts.cg.save_temps, opts: Arc::new(sess.opts.clone()), time_passes: sess.time_passes(), exported_symbols, @@ -1147,6 +1297,12 @@ fn start_executing_work(sess: &Session, coordinator_send, diag_emitter: shared_emitter.clone(), time_graph, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + regular_module_config: modules_config, + metadata_module_config: metadata_config, + allocator_module_config: allocator_config, + tm_factory: target_machine_factory(tcx.sess), + total_cgus, }; // This is the "main loop" of parallel work happening for parallel codegen. @@ -1269,6 +1425,21 @@ fn start_executing_work(sess: &Session, // and whenever we're done with that work we release the semaphore. In this // manner we can ensure that the maximum number of parallel workers is // capped at any one point in time. + // + // LTO and the coordinator thread + // ------------------------------ + // + // The final job the coordinator thread is responsible for is managing LTO + // and how that works. When LTO is requested what we'll to is collect all + // optimized LLVM modules into a local vector on the coordinator. Once all + // modules have been translated and optimized we hand this to the `lto` + // module for further optimization. The `lto` module will return back a list + // of more modules to work on, which the coordinator will continue to spawn + // work for. + // + // Each LLVM module is automatically sent back to the coordinator for LTO if + // necessary. There's already optimizations in place to avoid sending work + // back to the coordinator if LTO isn't requested. return thread::spawn(move || { // We pretend to be within the top-level LLVM time-passes task here: set_time_depth(1); @@ -1291,12 +1462,14 @@ fn start_executing_work(sess: &Session, let mut compiled_modules = vec![]; let mut compiled_metadata_module = None; let mut compiled_allocator_module = None; + let mut needs_lto = Vec::new(); + let mut started_lto = false; // This flag tracks whether all items have gone through translations let mut translation_done = false; // This is the queue of LLVM work items that still need processing. - let mut work_items = Vec::new(); + let mut work_items = Vec::<(WorkItem, u64)>::new(); // This are the Jobserver Tokens we currently hold. Does not include // the implicit Token the compiler process owns no matter what. @@ -1312,6 +1485,7 @@ fn start_executing_work(sess: &Session, while !translation_done || work_items.len() > 0 || running > 0 || + needs_lto.len() > 0 || main_thread_worker_state != MainThreadWorkerState::Idle { // While there are still CGUs to be translated, the coordinator has @@ -1335,12 +1509,34 @@ fn start_executing_work(sess: &Session, worker: get_worker_id(&mut free_worker_ids), .. cgcx.clone() }; - maybe_start_llvm_timer(&item, &mut llvm_start_time); + maybe_start_llvm_timer(cgcx.config(item.kind()), + &mut llvm_start_time); main_thread_worker_state = MainThreadWorkerState::LLVMing; spawn_work(cgcx, item); } } } else { + // If we've finished everything related to normal translation + // then it must be the case that we've got some LTO work to do. + // Perform the serial work here of figuring out what we're + // going to LTO and then push a bunch of work items onto our + // queue to do LTO + if work_items.len() == 0 && + running == 0 && + main_thread_worker_state == MainThreadWorkerState::Idle { + assert!(!started_lto); + assert!(needs_lto.len() > 0); + started_lto = true; + let modules = mem::replace(&mut needs_lto, Vec::new()); + for (work, cost) in generate_lto_work(&cgcx, modules) { + let insertion_index = work_items + .binary_search_by_key(&cost, |&(_, cost)| cost) + .unwrap_or_else(|e| e); + work_items.insert(insertion_index, (work, cost)); + helper.request_token(); + } + } + // In this branch, we know that everything has been translated, // so it's just a matter of determining whether the implicit // Token is free to use for LLVM work. @@ -1351,7 +1547,8 @@ fn start_executing_work(sess: &Session, worker: get_worker_id(&mut free_worker_ids), .. cgcx.clone() }; - maybe_start_llvm_timer(&item, &mut llvm_start_time); + maybe_start_llvm_timer(cgcx.config(item.kind()), + &mut llvm_start_time); main_thread_worker_state = MainThreadWorkerState::LLVMing; spawn_work(cgcx, item); } else { @@ -1381,7 +1578,8 @@ fn start_executing_work(sess: &Session, while work_items.len() > 0 && running < tokens.len() { let (item, _) = work_items.pop().unwrap(); - maybe_start_llvm_timer(&item, &mut llvm_start_time); + maybe_start_llvm_timer(cgcx.config(item.kind()), + &mut llvm_start_time); let cgcx = CodegenContext { worker: get_worker_id(&mut free_worker_ids), @@ -1395,7 +1593,8 @@ fn start_executing_work(sess: &Session, // Relinquish accidentally acquired extra tokens tokens.truncate(running); - match coordinator_receive.recv().unwrap() { + let msg = coordinator_receive.recv().unwrap(); + match *msg.downcast::().ok().unwrap() { // Save the token locally and the next turn of the loop will use // this to spawn a new unit of work, or it may get dropped // immediately if we have no more work to spawn. @@ -1422,7 +1621,7 @@ fn start_executing_work(sess: &Session, } } - Message::TranslationDone { llvm_work_item, cost, is_last } => { + Message::TranslationDone { llvm_work_item, cost } => { // We keep the queue sorted by estimated processing cost, // so that more expensive items are processed earlier. This // is good for throughput as it gives the main thread more @@ -1438,15 +1637,14 @@ fn start_executing_work(sess: &Session, }; work_items.insert(insertion_index, (llvm_work_item, cost)); - if is_last { - // If this is the last, don't request a token because - // the trans worker thread will be free to handle this - // immediately. - translation_done = true; - } else { - helper.request_token(); - } + helper.request_token(); + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Translating); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + Message::TranslationComplete => { + translation_done = true; assert_eq!(main_thread_worker_state, MainThreadWorkerState::Translating); main_thread_worker_state = MainThreadWorkerState::Idle; @@ -1483,10 +1681,21 @@ fn start_executing_work(sess: &Session, } } } + Message::NeedsLTO { result, worker_id } => { + assert!(!started_lto); + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + + free_worker_ids.push(worker_id); + needs_lto.push(result); + } Message::Done { result: Err(()), worker_id: _ } => { - shared_emitter.fatal("aborting due to worker thread panic"); + shared_emitter.fatal("aborting due to worker thread failure"); // Exit the coordinator thread - panic!("aborting due to worker thread panic") + return Err(()) } Message::TranslateItem => { bug!("the coordinator should not receive translation requests") @@ -1504,14 +1713,19 @@ fn start_executing_work(sess: &Session, total_llvm_time); } + // Regardless of what order these modules completed in, report them to + // the backend in the same order every time to ensure that we're handing + // out deterministic results. + compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); + let compiled_metadata_module = compiled_metadata_module .expect("Metadata module not compiled?"); - CompiledModules { + Ok(CompiledModules { modules: compiled_modules, metadata_module: compiled_metadata_module, allocator_module: compiled_allocator_module, - } + }) }); // A heuristic that determines if we have enough LLVM WorkItems in the @@ -1524,11 +1738,11 @@ fn start_executing_work(sess: &Session, items_in_queue >= max_workers.saturating_sub(workers_running / 2) } - fn maybe_start_llvm_timer(work_item: &WorkItem, + fn maybe_start_llvm_timer(config: &ModuleConfig, llvm_start_time: &mut Option) { // We keep track of the -Ztime-passes output manually, // since the closure-based interface does not fit well here. - if work_item.config.time_passes { + if config.time_passes { if llvm_start_time.is_none() { *llvm_start_time = Some(Instant::now()); } @@ -1553,21 +1767,23 @@ fn spawn_work(cgcx: CodegenContext, work: WorkItem) { // Set up a destructor which will fire off a message that we're done as // we exit. struct Bomb { - coordinator_send: Sender, - result: Option, + coordinator_send: Sender>, + result: Option, worker_id: usize, } impl Drop for Bomb { fn drop(&mut self) { - let result = match self.result.take() { - Some(compiled_module) => Ok(compiled_module), - None => Err(()) + let worker_id = self.worker_id; + let msg = match self.result.take() { + Some(WorkItemResult::Compiled(m)) => { + Message::Done { result: Ok(m), worker_id } + } + Some(WorkItemResult::NeedsLTO(m)) => { + Message::NeedsLTO { result: m, worker_id } + } + None => Message::Done { result: Err(()), worker_id } }; - - drop(self.coordinator_send.send(Message::Done { - result, - worker_id: self.worker_id, - })); + drop(self.coordinator_send.send(Box::new(msg))); } } @@ -1580,22 +1796,17 @@ fn spawn_work(cgcx: CodegenContext, work: WorkItem) { // Execute the work itself, and if it finishes successfully then flag // ourselves as a success as well. // - // Note that we ignore the result coming out of `execute_work_item` - // which will tell us if the worker failed with a `FatalError`. If that - // has happened, however, then a diagnostic was sent off to the main - // thread, along with an `AbortIfErrors` message. In that case the main - // thread is already exiting anyway most likely. - // - // In any case, there's no need for us to take further action here, so - // we just ignore the result and then send off our message saying that - // we're done, which if `execute_work_item` failed is unlikely to be - // seen by the main thread, but hey we might as well try anyway. + // Note that we ignore any `FatalError` coming out of `execute_work_item`, + // as a diagnostic was already sent off to the main thread - just + // surface that there was an error in this worker. bomb.result = { - let _timing_guard = cgcx.time_graph - .as_ref() - .map(|tg| tg.start(time_graph::TimelineId(cgcx.worker), - LLVM_WORK_PACKAGE_KIND)); - Some(execute_work_item(&cgcx, work).unwrap()) + let timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(time_graph::TimelineId(cgcx.worker), + LLVM_WORK_PACKAGE_KIND, + &work.name()) + }); + let mut timeline = timeline.unwrap_or(Timeline::noop()); + execute_work_item(&cgcx, work, &mut timeline).ok() }; }); } @@ -1802,24 +2013,24 @@ pub struct OngoingCrateTranslation { windows_subsystem: Option, linker_info: LinkerInfo, no_integrated_as: bool, - - output_filenames: OutputFilenames, - regular_module_config: ModuleConfig, - metadata_module_config: ModuleConfig, - allocator_module_config: ModuleConfig, - + crate_info: CrateInfo, time_graph: Option, - coordinator_send: Sender, + coordinator_send: Sender>, trans_worker_receive: Receiver, shared_emitter_main: SharedEmitterMain, - future: thread::JoinHandle, + future: thread::JoinHandle>, + output_filenames: Arc, } impl OngoingCrateTranslation { - pub fn join(self, sess: &Session) -> CrateTranslation { + pub fn join(self, sess: &Session, dep_graph: &DepGraph) -> CrateTranslation { self.shared_emitter_main.check(sess, true); let compiled_modules = match self.future.join() { - Ok(compiled_modules) => compiled_modules, + Ok(Ok(compiled_modules)) => compiled_modules, + Ok(Err(())) => { + sess.abort_if_errors(); + panic!("expected abort due to worker thread errors") + }, Err(_) => { sess.fatal("Error during translation/LLVM phase."); } @@ -1832,6 +2043,7 @@ impl OngoingCrateTranslation { } copy_module_artifacts_into_incr_comp_cache(sess, + dep_graph, &compiled_modules, &self.output_filenames); produce_final_output_artifacts(sess, @@ -1840,7 +2052,7 @@ impl OngoingCrateTranslation { // FIXME: time_llvm_passes support - does this use a global context or // something? - if sess.opts.cg.codegen_units == 1 && sess.time_llvm_passes() { + if sess.opts.codegen_units == 1 && sess.time_llvm_passes() { unsafe { llvm::LLVMRustPrintPassTimings(); } } @@ -1850,6 +2062,7 @@ impl OngoingCrateTranslation { metadata: self.metadata, windows_subsystem: self.windows_subsystem, linker_info: self.linker_info, + crate_info: self.crate_info, modules: compiled_modules.modules, allocator_module: compiled_modules.allocator_module, @@ -1878,38 +2091,21 @@ impl OngoingCrateTranslation { trans } - pub fn submit_translated_module_to_llvm(&self, - sess: &Session, - mtrans: ModuleTranslation, - cost: u64, - is_last: bool) { - let module_config = match mtrans.kind { - ModuleKind::Regular => self.regular_module_config.clone(sess), - ModuleKind::Metadata => self.metadata_module_config.clone(sess), - ModuleKind::Allocator => self.allocator_module_config.clone(sess), - }; - - let llvm_work_item = build_work_item(mtrans, - module_config, - self.output_filenames.clone()); - - drop(self.coordinator_send.send(Message::TranslationDone { - llvm_work_item, - cost, - is_last - })); - } - pub fn submit_pre_translated_module_to_llvm(&self, - sess: &Session, - mtrans: ModuleTranslation, - is_last: bool) { + tcx: TyCtxt, + mtrans: ModuleTranslation) { self.wait_for_signal_to_translate_item(); - self.check_for_errors(sess); + self.check_for_errors(tcx.sess); // These are generally cheap and won't through off scheduling. let cost = 0; - self.submit_translated_module_to_llvm(sess, mtrans, cost, is_last); + submit_translated_module_to_llvm(tcx, mtrans, cost); + } + + pub fn translation_finished(&self, tcx: TyCtxt) { + self.wait_for_signal_to_translate_item(); + self.check_for_errors(tcx.sess); + drop(self.coordinator_send.send(Box::new(Message::TranslationComplete))); } pub fn check_for_errors(&self, sess: &Session) { @@ -1921,9 +2117,7 @@ impl OngoingCrateTranslation { Ok(Message::TranslateItem) => { // Nothing to do } - Ok(message) => { - panic!("unexpected message: {:?}", message) - } + Ok(_) => panic!("unexpected message"), Err(_) => { // One of the LLVM threads must have panicked, fall through so // error handling can be reached. @@ -1931,3 +2125,13 @@ impl OngoingCrateTranslation { } } } + +pub fn submit_translated_module_to_llvm(tcx: TyCtxt, + mtrans: ModuleTranslation, + cost: u64) { + let llvm_work_item = WorkItem::Optimize(mtrans); + drop(tcx.tx_to_llvm_workers.send(Box::new(Message::TranslationDone { + llvm_work_item, + cost, + }))); +} diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index e4b090471d..17e00ac134 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -30,65 +30,70 @@ use super::ModuleKind; use assert_module_sources; use back::link; -use back::linker::LinkerInfo; -use back::symbol_export::{self, ExportedSymbols}; -use back::write::{self, OngoingCrateTranslation}; -use llvm::{ContextRef, Linkage, ModuleRef, ValueRef, Vector, get_param}; +use back::symbol_export; +use back::write::{self, OngoingCrateTranslation, create_target_machine}; +use llvm::{ContextRef, ModuleRef, ValueRef, Vector, get_param}; use llvm; use metadata; -use rustc::hir::def_id::LOCAL_CRATE; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc::middle::lang_items::StartFnLangItem; +use rustc::middle::trans::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::dep_graph::AssertDepGraphSafe; -use rustc::middle::cstore::LinkMeta; -use rustc::hir::map as hir_map; +use rustc::ty::maps::Providers; +use rustc::dep_graph::{DepNode, DepKind, DepConstructor}; +use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; use rustc::util::common::{time, print_time_passes_entry}; -use rustc::session::config::{self, NoDebugInfo, OutputFilenames, OutputType}; +use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; -use rustc_incremental::{self, IncrementalHashesMap}; +use rustc_incremental; use abi; use allocator; use mir::lvalue::LvalueRef; use attributes; use builder::Builder; use callee; -use common::{C_bool, C_bytes_in_context, C_i32, C_uint}; +use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; use collector::{self, TransItemCollectionMode}; use common::{C_struct_in_context, C_u64, C_undef, C_array}; use common::CrateContext; use common::{type_is_zero_size, val_ty}; use common; use consts; -use context::{self, LocalCrateContext, SharedCrateContext, Stats}; +use context::{self, LocalCrateContext, SharedCrateContext}; use debuginfo; use declare; use machine; use meth; use mir; use monomorphize::{self, Instance}; -use partitioning::{self, PartitioningStrategy, CodegenUnit}; +use partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; use symbol_names_test; use time_graph; -use trans_item::{TransItem, DefPathBasedNames}; +use trans_item::{TransItem, TransItemExt, DefPathBasedNames}; use type_::Type; use type_of; use value::Value; -use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet}; +use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet}; +use CrateInfo; -use libc::c_uint; +use std::any::Any; use std::ffi::{CStr, CString}; use std::str; use std::sync::Arc; use std::time::{Instant, Duration}; use std::i32; +use std::sync::mpsc; use syntax_pos::Span; +use syntax_pos::symbol::InternedString; use syntax::attr; use rustc::hir; use syntax::ast; use mir::lvalue::Alignment; +pub use rustc_trans_utils::{find_exported_symbols, check_for_rustc_errors_attr}; + pub struct StatRecorder<'a, 'tcx: 'a> { ccx: &'a CrateContext<'a, 'tcx>, name: Option, @@ -97,7 +102,7 @@ pub struct StatRecorder<'a, 'tcx: 'a> { impl<'a, 'tcx> StatRecorder<'a, 'tcx> { pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> { - let istart = ccx.stats().n_llvm_insns.get(); + let istart = ccx.stats().borrow().n_llvm_insns; StatRecorder { ccx, name: Some(name), @@ -109,12 +114,12 @@ impl<'a, 'tcx> StatRecorder<'a, 'tcx> { impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { fn drop(&mut self) { if self.ccx.sess().trans_stats() { - let iend = self.ccx.stats().n_llvm_insns.get(); - self.ccx.stats().fn_stats.borrow_mut() - .push((self.name.take().unwrap(), iend - self.istart)); - self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1); + let mut stats = self.ccx.stats().borrow_mut(); + let iend = stats.n_llvm_insns; + stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); + stats.n_fns += 1; // Reset LLVM insn count to avoid compound costs. - self.ccx.stats().n_llvm_insns.set(self.istart); + stats.n_llvm_insns = self.istart; } } } @@ -200,7 +205,9 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, -> ValueRef { let (source, target) = ccx.tcx().struct_lockstep_tails(source, target); match (&source.sty, &target.sty) { - (&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len), + (&ty::TyArray(_, len), &ty::TySlice(_)) => { + C_usize(ccx, len.val.to_const_int().unwrap().to_u64().unwrap()) + } (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { // For now, upcasts are limited to changes in marker // traits, and hence never actually require an actual @@ -523,7 +530,7 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, let memcpy = ccx.get_intrinsic(&key); let src_ptr = b.pointercast(src, Type::i8p(ccx)); let dst_ptr = b.pointercast(dst, Type::i8p(ccx)); - let size = b.intcast(n_bytes, ccx.int_type(), false); + let size = b.intcast(n_bytes, ccx.isize_ty(), false); let align = C_i32(ccx, align as i32); let volatile = C_bool(ccx, false); b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); @@ -544,7 +551,7 @@ pub fn memcpy_ty<'a, 'tcx>( } let align = align.unwrap_or_else(|| ccx.align_of(t)); - call_memcpy(bcx, dst, src, C_uint(ccx, size), align); + call_memcpy(bcx, dst, src, C_usize(ccx, size), align); } pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, @@ -575,7 +582,7 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance // release builds. info!("trans_instance({})", instance); - let fn_ty = common::instance_ty(ccx.shared(), &instance); + let fn_ty = common::instance_ty(ccx.tcx(), &instance); let sig = common::ty_fn_sig(ccx, fn_ty); let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); @@ -584,7 +591,7 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance None => bug!("Instance `{:?}` not already declared", instance) }; - ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1); + ccx.stats().borrow_mut().n_closures += 1; // The `uwtable` attribute according to LLVM is: // @@ -611,7 +618,9 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance mir::trans_mir(ccx, lldecl, &mir, instance, sig); } -pub fn llvm_linkage_by_name(name: &str) -> Option { +pub fn linkage_by_name(name: &str) -> Option { + use rustc::middle::trans::Linkage::*; + // Use the names from src/llvm/docs/LangRef.rst here. Most types are only // applicable to variable declarations and may not really make sense for // Rust code in the first place but whitelist them anyway and trust that @@ -621,17 +630,17 @@ pub fn llvm_linkage_by_name(name: &str) -> Option { // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported // and don't have to be, LLVM treats them as no-ops. match name { - "appending" => Some(llvm::Linkage::AppendingLinkage), - "available_externally" => Some(llvm::Linkage::AvailableExternallyLinkage), - "common" => Some(llvm::Linkage::CommonLinkage), - "extern_weak" => Some(llvm::Linkage::ExternalWeakLinkage), - "external" => Some(llvm::Linkage::ExternalLinkage), - "internal" => Some(llvm::Linkage::InternalLinkage), - "linkonce" => Some(llvm::Linkage::LinkOnceAnyLinkage), - "linkonce_odr" => Some(llvm::Linkage::LinkOnceODRLinkage), - "private" => Some(llvm::Linkage::PrivateLinkage), - "weak" => Some(llvm::Linkage::WeakAnyLinkage), - "weak_odr" => Some(llvm::Linkage::WeakODRLinkage), + "appending" => Some(Appending), + "available_externally" => Some(AvailableExternally), + "common" => Some(Common), + "extern_weak" => Some(ExternalWeak), + "external" => Some(External), + "internal" => Some(Internal), + "linkonce" => Some(LinkOnceAny), + "linkonce_odr" => Some(LinkOnceODR), + "private" => Some(Private), + "weak" => Some(WeakAny), + "weak_odr" => Some(WeakODR), _ => None, } } @@ -650,20 +659,6 @@ pub fn set_link_section(ccx: &CrateContext, } } -// check for the #[rustc_error] annotation, which forces an -// error in trans. This is used to write compile-fail tests -// that actually test that compilation succeeds without -// reporting an error. -fn check_for_rustc_errors_attr(tcx: TyCtxt) { - if let Some((id, span)) = *tcx.sess.entry_fn.borrow() { - let main_def_id = tcx.hir.local_def_id(id); - - if tcx.has_attr(main_def_id, "rustc_error") { - tcx.sess.span_fatal(span, "compilation successful"); - } - } -} - /// Create the `main` function which will initialize the rust runtime and call /// users main function. fn maybe_create_entry_wrapper(ccx: &CrateContext) { @@ -695,7 +690,8 @@ fn maybe_create_entry_wrapper(ccx: &CrateContext) { sp: Span, rust_main: ValueRef, use_start_lang_item: bool) { - let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()], &ccx.int_type()); + // Signature of native main(), corresponding to C's `int main(int, char **)` + let llfty = Type::func(&[Type::c_int(ccx), Type::i8p(ccx).ptr_to()], &Type::c_int(ccx)); if declare::get_defined_value(ccx, "main").is_some() { // FIXME: We should be smart and show a better diagnostic here. @@ -714,19 +710,27 @@ fn maybe_create_entry_wrapper(ccx: &CrateContext) { debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld); + // Params from native main() used as args for rust start function + let param_argc = get_param(llfn, 0); + let param_argv = get_param(llfn, 1); + let arg_argc = bld.intcast(param_argc, ccx.isize_ty(), true); + let arg_argv = param_argv; + let (start_fn, args) = if use_start_lang_item { let start_def_id = ccx.tcx().require_lang_item(StartFnLangItem); let start_instance = Instance::mono(ccx.tcx(), start_def_id); let start_fn = callee::get_fn(ccx, start_instance); - (start_fn, vec![bld.pointercast(rust_main, Type::i8p(ccx).ptr_to()), get_param(llfn, 0), - get_param(llfn, 1)]) + (start_fn, vec![bld.pointercast(rust_main, Type::i8p(ccx).ptr_to()), + arg_argc, arg_argv]) } else { debug!("using user-defined start fn"); - (rust_main, vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)]) + (rust_main, vec![arg_argc, arg_argv]) }; let result = bld.call(start_fn, &args, None); - bld.ret(result); + + // Return rust start function's result from native main() + bld.ret(bld.intcast(result, Type::c_int(ccx), true)); } } @@ -735,6 +739,7 @@ fn contains_null(s: &str) -> bool { } fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, + llmod_id: &str, link_meta: &LinkMeta, exported_symbols: &NodeSet) -> (ContextRef, ModuleRef, @@ -744,7 +749,7 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, use flate2::write::DeflateEncoder; let (metadata_llcx, metadata_llmod) = unsafe { - context::create_context_and_module(tcx.sess, "metadata") + context::create_context_and_module(tcx.sess, llmod_id) }; #[derive(PartialEq, Eq, PartialOrd, Ord)] @@ -774,16 +779,13 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, EncodedMetadataHashes::new()); } - let cstore = &tcx.sess.cstore; - let (metadata, hashes) = cstore.encode_metadata(tcx, - &link_meta, - exported_symbols); + let (metadata, hashes) = tcx.encode_metadata(link_meta, exported_symbols); if kind == MetadataKind::Uncompressed { return (metadata_llcx, metadata_llmod, metadata, hashes); } assert!(kind == MetadataKind::Compressed); - let mut compressed = cstore.metadata_encoding_version().to_vec(); + let mut compressed = tcx.metadata_encoding_version(); DeflateEncoder::new(&mut compressed, Compression::Fast) .write_all(&metadata.raw_data).unwrap(); @@ -878,93 +880,42 @@ fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter { } } -/// The context provided lists a set of reachable ids as calculated by -/// middle::reachable, but this contains far more ids and symbols than we're -/// actually exposing from the object file. This function will filter the set in -/// the context to the set of ids which correspond to symbols that are exposed -/// from the object file being generated. -/// -/// This list is later used by linkers to determine the set of symbols needed to -/// be exposed from a dynamic library and it's also encoded into the metadata. -pub fn find_exported_symbols(tcx: TyCtxt, reachable: &NodeSet) -> NodeSet { - reachable.iter().cloned().filter(|&id| { - // Next, we want to ignore some FFI functions that are not exposed from - // this crate. Reachable FFI functions can be lumped into two - // categories: - // - // 1. Those that are included statically via a static library - // 2. Those included otherwise (e.g. dynamically or via a framework) - // - // Although our LLVM module is not literally emitting code for the - // statically included symbols, it's an export of our library which - // needs to be passed on to the linker and encoded in the metadata. - // - // As a result, if this id is an FFI item (foreign item) then we only - // let it through if it's included statically. - match tcx.hir.get(id) { - hir_map::NodeForeignItem(..) => { - let def_id = tcx.hir.local_def_id(id); - tcx.sess.cstore.is_statically_included_foreign_item(def_id) - } +pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver>) + -> OngoingCrateTranslation { - // Only consider nodes that actually have exported symbols. - hir_map::NodeItem(&hir::Item { - node: hir::ItemStatic(..), .. }) | - hir_map::NodeItem(&hir::Item { - node: hir::ItemFn(..), .. }) | - hir_map::NodeImplItem(&hir::ImplItem { - node: hir::ImplItemKind::Method(..), .. }) => { - let def_id = tcx.hir.local_def_id(id); - let generics = tcx.generics_of(def_id); - let attributes = tcx.get_attrs(def_id); - (generics.parent_types == 0 && generics.types.is_empty()) && - // Functions marked with #[inline] are only ever translated - // with "internal" linkage and are never exported. - !attr::requests_inline(&attributes) - } + check_for_rustc_errors_attr(tcx); - _ => false + if tcx.sess.opts.debugging_opts.thinlto { + if unsafe { !llvm::LLVMRustThinLTOAvailable() } { + tcx.sess.fatal("this compiler's LLVM does not support ThinLTO"); } - }).collect() -} + } -pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - analysis: ty::CrateAnalysis, - incremental_hashes_map: IncrementalHashesMap, - output_filenames: &OutputFilenames) - -> OngoingCrateTranslation { - check_for_rustc_errors_attr(tcx); + let crate_hash = tcx.dep_graph + .fingerprint_of(&DepNode::new_no_params(DepKind::Krate)); + let link_meta = link::build_link_meta(crate_hash); + let exported_symbol_node_ids = find_exported_symbols(tcx); - // Be careful with this krate: obviously it gives access to the - // entire contents of the krate. So if you push any subtasks of - // `TransCrate`, you need to be careful to register "reads" of the - // particular items that will be processed. - let krate = tcx.hir.krate(); - let ty::CrateAnalysis { reachable, .. } = analysis; - let check_overflow = tcx.sess.overflow_checks(); - let link_meta = link::build_link_meta(&incremental_hashes_map); - let exported_symbol_node_ids = find_exported_symbols(tcx, &reachable); - - let shared_ccx = SharedCrateContext::new(tcx, - check_overflow, - output_filenames); + let shared_ccx = SharedCrateContext::new(tcx); // Translate the metadata. + let llmod_id = "metadata"; let (metadata_llcx, metadata_llmod, metadata, metadata_incr_hashes) = time(tcx.sess.time_passes(), "write metadata", || { - write_metadata(tcx, &link_meta, &exported_symbol_node_ids) + write_metadata(tcx, llmod_id, &link_meta, &exported_symbol_node_ids) }); let metadata_module = ModuleTranslation { name: link::METADATA_MODULE_NAME.to_string(), - symbol_name_hash: 0, // we always rebuild metadata, at least for now + llmod_id: llmod_id.to_string(), source: ModuleSource::Translated(ModuleLlvm { llcx: metadata_llcx, llmod: metadata_llmod, + tm: create_target_machine(tcx.sess), }), kind: ModuleKind::Metadata, }; - let no_builtins = attr::contains_name(&krate.attrs, "no_builtins"); let time_graph = if tcx.sess.opts.debugging_opts.trans_time_graph { Some(time_graph::TimeGraph::new()) } else { @@ -974,25 +925,18 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Skip crate items and just output metadata in -Z no-trans mode. if tcx.sess.opts.debugging_opts.no_trans || !tcx.sess.opts.output_types.should_trans() { - let empty_exported_symbols = ExportedSymbols::empty(); - let linker_info = LinkerInfo::new(&shared_ccx, &empty_exported_symbols); let ongoing_translation = write::start_async_translation( - tcx.sess, - output_filenames, + tcx, time_graph.clone(), - tcx.crate_name(LOCAL_CRATE), link_meta, metadata, - Arc::new(empty_exported_symbols), - no_builtins, - None, - linker_info, - false); + rx, + 1); - ongoing_translation.submit_pre_translated_module_to_llvm(tcx.sess, metadata_module, true); + ongoing_translation.submit_pre_translated_module_to_llvm(tcx, metadata_module); + ongoing_translation.translation_finished(tcx); assert_and_save_dep_graph(tcx, - incremental_hashes_map, metadata_incr_hashes, link_meta); @@ -1001,65 +945,41 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return ongoing_translation; } - let exported_symbols = Arc::new(ExportedSymbols::compute(tcx, - &exported_symbol_node_ids)); - // Run the translation item collector and partition the collected items into // codegen units. - let (translation_items, codegen_units) = - collect_and_partition_translation_items(&shared_ccx, &exported_symbols); - - assert!(codegen_units.len() <= 1 || !tcx.sess.lto()); - - let linker_info = LinkerInfo::new(&shared_ccx, &exported_symbols); - let subsystem = attr::first_attr_value_str_by_name(&krate.attrs, - "windows_subsystem"); - let windows_subsystem = subsystem.map(|subsystem| { - if subsystem != "windows" && subsystem != "console" { - tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ - `windows` and `console` are allowed", - subsystem)); + let codegen_units = + shared_ccx.tcx().collect_and_partition_translation_items(LOCAL_CRATE).1; + let codegen_units = (*codegen_units).clone(); + + // Force all codegen_unit queries so they are already either red or green + // when compile_codegen_unit accesses them. We are not able to re-execute + // the codegen_unit query from just the DepNode, so an unknown color would + // lead to having to re-execute compile_codegen_unit, possibly + // unnecessarily. + if tcx.dep_graph.is_fully_enabled() { + for cgu in &codegen_units { + tcx.codegen_unit(cgu.name().clone()); } - subsystem.to_string() - }); - - let no_integrated_as = tcx.sess.opts.cg.no_integrated_as || - (tcx.sess.target.target.options.no_integrated_as && - (output_filenames.outputs.contains_key(&OutputType::Object) || - output_filenames.outputs.contains_key(&OutputType::Exe))); + } let ongoing_translation = write::start_async_translation( - tcx.sess, - output_filenames, + tcx, time_graph.clone(), - tcx.crate_name(LOCAL_CRATE), link_meta, metadata, - exported_symbols.clone(), - no_builtins, - windows_subsystem, - linker_info, - no_integrated_as); + rx, + codegen_units.len()); // Translate an allocator shim, if any - // - // If LTO is enabled and we've got some previous LLVM module we translated - // above, then we can just translate directly into that LLVM module. If not, - // however, we need to create a separate module and trans into that. Note - // that the separate translation is critical for the standard library where - // the rlib's object file doesn't have allocator functions but the dylib - // links in an object file that has allocator functions. When we're - // compiling a final LTO artifact, though, there's no need to worry about - // this as we're not working with this dual "rlib/dylib" functionality. - let allocator_module = if tcx.sess.lto() { - None - } else if let Some(kind) = tcx.sess.allocator_kind.get() { + let allocator_module = if let Some(kind) = tcx.sess.allocator_kind.get() { unsafe { + let llmod_id = "allocator"; let (llcx, llmod) = - context::create_context_and_module(tcx.sess, "allocator"); + context::create_context_and_module(tcx.sess, llmod_id); let modules = ModuleLlvm { llmod, llcx, + tm: create_target_machine(tcx.sess), }; time(tcx.sess.time_passes(), "write allocator module", || { allocator::trans(tcx, &modules, kind) @@ -1067,7 +987,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, Some(ModuleTranslation { name: link::ALLOCATOR_MODULE_NAME.to_string(), - symbol_name_hash: 0, // we always rebuild allocator shims + llmod_id: llmod_id.to_string(), source: ModuleSource::Translated(modules), kind: ModuleKind::Allocator, }) @@ -1077,18 +997,10 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }; if let Some(allocator_module) = allocator_module { - ongoing_translation.submit_pre_translated_module_to_llvm(tcx.sess, allocator_module, false); + ongoing_translation.submit_pre_translated_module_to_llvm(tcx, allocator_module); } - let codegen_unit_count = codegen_units.len(); - ongoing_translation.submit_pre_translated_module_to_llvm(tcx.sess, - metadata_module, - codegen_unit_count == 0); - - let translation_items = Arc::new(translation_items); - - let mut all_stats = Stats::default(); - let mut module_dispositions = tcx.sess.opts.incremental.as_ref().map(|_| Vec::new()); + ongoing_translation.submit_pre_translated_module_to_llvm(tcx, metadata_module); // We sort the codegen units by size. This way we can schedule work for LLVM // a bit more efficiently. Note that "size" is defined rather crudely at the @@ -1101,221 +1013,99 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }; let mut total_trans_time = Duration::new(0, 0); + let mut all_stats = Stats::default(); - for (cgu_index, cgu) in codegen_units.into_iter().enumerate() { + for cgu in codegen_units.into_iter() { ongoing_translation.wait_for_signal_to_translate_item(); ongoing_translation.check_for_errors(tcx.sess); - let start_time = Instant::now(); - - let module = { - let _timing_guard = time_graph - .as_ref() - .map(|time_graph| time_graph.start(write::TRANS_WORKER_TIMELINE, - write::TRANS_WORK_PACKAGE_KIND)); - let dep_node = cgu.work_product_dep_node(); - let ((stats, module), _) = - tcx.dep_graph.with_task(dep_node, - AssertDepGraphSafe(&shared_ccx), - AssertDepGraphSafe((cgu, - translation_items.clone(), - exported_symbols.clone())), - module_translation); - all_stats.extend(stats); - - if let Some(ref mut module_dispositions) = module_dispositions { - module_dispositions.push(module.disposition()); + // First, if incremental compilation is enabled, we try to re-use the + // codegen unit from the cache. + if tcx.dep_graph.is_fully_enabled() { + let cgu_id = cgu.work_product_id(); + + // Check whether there is a previous work-product we can + // re-use. Not only must the file exist, and the inputs not + // be dirty, but the hash of the symbols we will generate must + // be the same. + if let Some(buf) = tcx.dep_graph.previous_work_product(&cgu_id) { + let dep_node = &DepNode::new(tcx, + DepConstructor::CompileCodegenUnit(cgu.name().clone())); + + // We try to mark the DepNode::CompileCodegenUnit green. If we + // succeed it means that none of the dependencies has changed + // and we can safely re-use. + if let Some(dep_node_index) = tcx.dep_graph.try_mark_green(tcx, dep_node) { + // Append ".rs" to LLVM module identifier. + // + // LLVM code generator emits a ".file filename" directive + // for ELF backends. Value of the "filename" is set as the + // LLVM module identifier. Due to a LLVM MC bug[1], LLVM + // crashes if the module identifier is same as other symbols + // such as a function name in the module. + // 1. http://llvm.org/bugs/show_bug.cgi?id=11479 + let llmod_id = format!("{}.rs", cgu.name()); + + let module = ModuleTranslation { + name: cgu.name().to_string(), + source: ModuleSource::Preexisting(buf), + kind: ModuleKind::Regular, + llmod_id, + }; + tcx.dep_graph.mark_loaded_from_cache(dep_node_index, true); + write::submit_translated_module_to_llvm(tcx, module, 0); + // Continue to next cgu, this one is done. + continue + } + } else { + // This can happen if files were deleted from the cache + // directory for some reason. We just re-compile then. } + } - module - }; - - let time_to_translate = Instant::now().duration_since(start_time); - - // We assume that the cost to run LLVM on a CGU is proportional to - // the time we needed for translating it. - let cost = time_to_translate.as_secs() * 1_000_000_000 + - time_to_translate.subsec_nanos() as u64; - - total_trans_time += time_to_translate; - - let is_last_cgu = (cgu_index + 1) == codegen_unit_count; - - ongoing_translation.submit_translated_module_to_llvm(tcx.sess, - module, - cost, - is_last_cgu); + let _timing_guard = time_graph.as_ref().map(|time_graph| { + time_graph.start(write::TRANS_WORKER_TIMELINE, + write::TRANS_WORK_PACKAGE_KIND, + &format!("codegen {}", cgu.name())) + }); + let start_time = Instant::now(); + all_stats.extend(tcx.compile_codegen_unit(*cgu.name())); + total_trans_time += start_time.elapsed(); ongoing_translation.check_for_errors(tcx.sess); } + ongoing_translation.translation_finished(tcx); + // Since the main thread is sometimes blocked during trans, we keep track // -Ztime-passes output manually. print_time_passes_entry(tcx.sess.time_passes(), "translate to LLVM IR", total_trans_time); - if let Some(module_dispositions) = module_dispositions { - assert_module_sources::assert_module_sources(tcx, &module_dispositions); - } - - fn module_translation<'a, 'tcx>( - scx: AssertDepGraphSafe<&SharedCrateContext<'a, 'tcx>>, - args: AssertDepGraphSafe<(CodegenUnit<'tcx>, - Arc>>, - Arc)>) - -> (Stats, ModuleTranslation) - { - // FIXME(#40304): We ought to be using the id as a key and some queries, I think. - let AssertDepGraphSafe(scx) = scx; - let AssertDepGraphSafe((cgu, crate_trans_items, exported_symbols)) = args; - - let cgu_name = String::from(cgu.name()); - let cgu_id = cgu.work_product_id(); - let symbol_name_hash = cgu.compute_symbol_name_hash(scx); - - // Check whether there is a previous work-product we can - // re-use. Not only must the file exist, and the inputs not - // be dirty, but the hash of the symbols we will generate must - // be the same. - let previous_work_product = - scx.dep_graph().previous_work_product(&cgu_id).and_then(|work_product| { - if work_product.input_hash == symbol_name_hash { - debug!("trans_reuse_previous_work_products: reusing {:?}", work_product); - Some(work_product) - } else { - if scx.sess().opts.debugging_opts.incremental_info { - eprintln!("incremental: CGU `{}` invalidated because of \ - changed partitioning hash.", - cgu.name()); - } - debug!("trans_reuse_previous_work_products: \ - not reusing {:?} because hash changed to {:?}", - work_product, symbol_name_hash); - None - } - }); - - if let Some(buf) = previous_work_product { - // Don't need to translate this module. - let module = ModuleTranslation { - name: cgu_name, - symbol_name_hash, - source: ModuleSource::Preexisting(buf.clone()), - kind: ModuleKind::Regular, - }; - return (Stats::default(), module); - } - - // Instantiate translation items without filling out definitions yet... - let lcx = LocalCrateContext::new(scx, cgu, crate_trans_items, exported_symbols); - let module = { - let ccx = CrateContext::new(scx, &lcx); - let trans_items = ccx.codegen_unit() - .items_in_deterministic_order(ccx.tcx()); - for &(trans_item, (linkage, visibility)) in &trans_items { - trans_item.predefine(&ccx, linkage, visibility); - } - - // ... and now that we have everything pre-defined, fill out those definitions. - for &(trans_item, _) in &trans_items { - trans_item.define(&ccx); - } - - // If this codegen unit contains the main function, also create the - // wrapper here - maybe_create_entry_wrapper(&ccx); - - // Run replace-all-uses-with for statics that need it - for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() { - unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g)); - llvm::LLVMReplaceAllUsesWith(old_g, bitcast); - llvm::LLVMDeleteGlobal(old_g); - } - } - - // Create the llvm.used variable - // This variable has type [N x i8*] and is stored in the llvm.metadata section - if !ccx.used_statics().borrow().is_empty() { - let name = CString::new("llvm.used").unwrap(); - let section = CString::new("llvm.metadata").unwrap(); - let array = C_array(Type::i8(&ccx).ptr_to(), &*ccx.used_statics().borrow()); - - unsafe { - let g = llvm::LLVMAddGlobal(ccx.llmod(), - val_ty(array).to_ref(), - name.as_ptr()); - llvm::LLVMSetInitializer(g, array); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); - llvm::LLVMSetSection(g, section.as_ptr()); - } - } - - // Finalize debuginfo - if ccx.sess().opts.debuginfo != NoDebugInfo { - debuginfo::finalize(&ccx); - } - - let llvm_module = ModuleLlvm { - llcx: ccx.llcx(), - llmod: ccx.llmod(), - }; - - // In LTO mode we inject the allocator shim into the existing - // module. - if ccx.sess().lto() { - if let Some(kind) = ccx.sess().allocator_kind.get() { - time(ccx.sess().time_passes(), "write allocator module", || { - unsafe { - allocator::trans(ccx.tcx(), &llvm_module, kind); - } - }); - } - } - - // Adjust exported symbols for MSVC dllimport - if ccx.sess().target.target.options.is_like_msvc && - ccx.sess().crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) { - create_imps(ccx.sess(), &llvm_module); - } - - ModuleTranslation { - name: cgu_name, - symbol_name_hash, - source: ModuleSource::Translated(llvm_module), - kind: ModuleKind::Regular, - } - }; - - (lcx.into_stats(), module) + if tcx.sess.opts.incremental.is_some() { + assert_module_sources::assert_module_sources(tcx); } symbol_names_test::report_symbol_names(tcx); if shared_ccx.sess().trans_stats() { println!("--- trans stats ---"); - println!("n_glues_created: {}", all_stats.n_glues_created.get()); - println!("n_null_glues: {}", all_stats.n_null_glues.get()); - println!("n_real_glues: {}", all_stats.n_real_glues.get()); + println!("n_glues_created: {}", all_stats.n_glues_created); + println!("n_null_glues: {}", all_stats.n_null_glues); + println!("n_real_glues: {}", all_stats.n_real_glues); - println!("n_fns: {}", all_stats.n_fns.get()); - println!("n_inlines: {}", all_stats.n_inlines.get()); - println!("n_closures: {}", all_stats.n_closures.get()); + println!("n_fns: {}", all_stats.n_fns); + println!("n_inlines: {}", all_stats.n_inlines); + println!("n_closures: {}", all_stats.n_closures); println!("fn stats:"); - all_stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| { - insns_b.cmp(&insns_a) - }); - for tuple in all_stats.fn_stats.borrow().iter() { - match *tuple { - (ref name, insns) => { - println!("{} insns, {}", insns, *name); - } - } + all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); + for &(ref name, insns) in all_stats.fn_stats.iter() { + println!("{} insns, {}", insns, *name); } } if shared_ccx.sess().count_llvm_insns() { - for (k, v) in all_stats.llvm_insns.borrow().iter() { + for (k, v) in all_stats.llvm_insns.iter() { println!("{:7} {}", *v, *k); } } @@ -1323,14 +1113,12 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ongoing_translation.check_for_errors(tcx.sess); assert_and_save_dep_graph(tcx, - incremental_hashes_map, metadata_incr_hashes, link_meta); ongoing_translation } fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: IncrementalHashesMap, metadata_incr_hashes: EncodedMetadataHashes, link_meta: LinkMeta) { time(tcx.sess.time_passes(), @@ -1340,7 +1128,6 @@ fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, time(tcx.sess.time_passes(), "serialize dep graph", || rustc_incremental::save_dep_graph(tcx, - incremental_hashes_map, &metadata_incr_hashes, link_meta.crate_hash)); } @@ -1371,7 +1158,7 @@ fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trans_i // Deterministically select one of the spans for error reporting let span = match (span1, span2) { (Some(span1), Some(span2)) => { - Some(if span1.lo.0 > span2.lo.0 { + Some(if span1.lo().0 > span2.lo().0 { span1 } else { span2 @@ -1393,13 +1180,15 @@ fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trans_i } } -fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, - exported_symbols: &ExportedSymbols) - -> (FxHashSet>, - Vec>) { - let time_passes = scx.sess().time_passes(); +fn collect_and_partition_translation_items<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cnum: CrateNum, +) -> (Arc, Arc>>>) +{ + assert_eq!(cnum, LOCAL_CRATE); + let time_passes = tcx.sess.time_passes(); - let collection_mode = match scx.sess().opts.debugging_opts.print_trans_items { + let collection_mode = match tcx.sess.opts.debugging_opts.print_trans_items { Some(ref s) => { let mode_string = s.to_lowercase(); let mode_string = mode_string.trim(); @@ -1410,7 +1199,7 @@ fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a let message = format!("Unknown codegen-item collection mode '{}'. \ Falling back to 'lazy' mode.", mode_string); - scx.sess().warn(&message); + tcx.sess.warn(&message); } TransItemCollectionMode::Lazy @@ -1421,33 +1210,35 @@ fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a let (items, inlining_map) = time(time_passes, "translation item collection", || { - collector::collect_crate_translation_items(&scx, - exported_symbols, - collection_mode) + collector::collect_crate_translation_items(tcx, collection_mode) }); - assert_symbols_are_distinct(scx.tcx(), items.iter()); + assert_symbols_are_distinct(tcx, items.iter()); - let strategy = if scx.sess().opts.debugging_opts.incremental.is_some() { + let strategy = if tcx.sess.opts.debugging_opts.incremental.is_some() { PartitioningStrategy::PerModule } else { - PartitioningStrategy::FixedUnitCount(scx.sess().opts.cg.codegen_units) + PartitioningStrategy::FixedUnitCount(tcx.sess.opts.codegen_units) }; let codegen_units = time(time_passes, "codegen unit partitioning", || { - partitioning::partition(scx, + partitioning::partition(tcx, items.iter().cloned(), strategy, - &inlining_map, - exported_symbols) + &inlining_map) + .into_iter() + .map(Arc::new) + .collect::>() }); - assert!(scx.tcx().sess.opts.cg.codegen_units == codegen_units.len() || - scx.tcx().sess.opts.debugging_opts.incremental.is_some()); - - let translation_items: FxHashSet> = items.iter().cloned().collect(); + let translation_items: DefIdSet = items.iter().filter_map(|trans_item| { + match *trans_item { + TransItem::Fn(ref instance) => Some(instance.def_id()), + _ => None, + } + }).collect(); - if scx.sess().opts.debugging_opts.print_trans_items.is_some() { + if tcx.sess.opts.debugging_opts.print_trans_items.is_some() { let mut item_to_cgus = FxHashMap(); for cgu in &codegen_units { @@ -1461,7 +1252,7 @@ fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a let mut item_keys: Vec<_> = items .iter() .map(|i| { - let mut output = i.to_string(scx.tcx()); + let mut output = i.to_string(tcx); output.push_str(" @@"); let mut empty = Vec::new(); let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); @@ -1472,17 +1263,17 @@ fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a output.push_str(&cgu_name); let linkage_abbrev = match linkage { - llvm::Linkage::ExternalLinkage => "External", - llvm::Linkage::AvailableExternallyLinkage => "Available", - llvm::Linkage::LinkOnceAnyLinkage => "OnceAny", - llvm::Linkage::LinkOnceODRLinkage => "OnceODR", - llvm::Linkage::WeakAnyLinkage => "WeakAny", - llvm::Linkage::WeakODRLinkage => "WeakODR", - llvm::Linkage::AppendingLinkage => "Appending", - llvm::Linkage::InternalLinkage => "Internal", - llvm::Linkage::PrivateLinkage => "Private", - llvm::Linkage::ExternalWeakLinkage => "ExternalWeak", - llvm::Linkage::CommonLinkage => "Common", + Linkage::External => "External", + Linkage::AvailableExternally => "Available", + Linkage::LinkOnceAny => "OnceAny", + Linkage::LinkOnceODR => "OnceODR", + Linkage::WeakAny => "WeakAny", + Linkage::WeakODR => "WeakODR", + Linkage::Appending => "Appending", + Linkage::Internal => "Internal", + Linkage::Private => "Private", + Linkage::ExternalWeak => "ExternalWeak", + Linkage::Common => "Common", }; output.push_str("["); @@ -1500,5 +1291,232 @@ fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a } } - (translation_items, codegen_units) + (Arc::new(translation_items), Arc::new(codegen_units)) +} + +impl CrateInfo { + pub fn new(tcx: TyCtxt) -> CrateInfo { + let mut info = CrateInfo { + panic_runtime: None, + compiler_builtins: None, + profiler_runtime: None, + sanitizer_runtime: None, + is_no_builtins: FxHashSet(), + native_libraries: FxHashMap(), + used_libraries: tcx.native_libraries(LOCAL_CRATE), + link_args: tcx.link_args(LOCAL_CRATE), + crate_name: FxHashMap(), + used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), + used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), + used_crate_source: FxHashMap(), + }; + + for &cnum in tcx.crates().iter() { + info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); + info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); + info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); + if tcx.is_panic_runtime(cnum) { + info.panic_runtime = Some(cnum); + } + if tcx.is_compiler_builtins(cnum) { + info.compiler_builtins = Some(cnum); + } + if tcx.is_profiler_runtime(cnum) { + info.profiler_runtime = Some(cnum); + } + if tcx.is_sanitizer_runtime(cnum) { + info.sanitizer_runtime = Some(cnum); + } + if tcx.is_no_builtins(cnum) { + info.is_no_builtins.insert(cnum); + } + } + + + return info + } +} + +fn is_translated_function(tcx: TyCtxt, id: DefId) -> bool { + let (all_trans_items, _) = + tcx.collect_and_partition_translation_items(LOCAL_CRATE); + all_trans_items.contains(&id) +} + +fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu: InternedString) -> Stats { + let cgu = tcx.codegen_unit(cgu); + + let start_time = Instant::now(); + let (stats, module) = module_translation(tcx, cgu); + let time_to_translate = start_time.elapsed(); + + // We assume that the cost to run LLVM on a CGU is proportional to + // the time we needed for translating it. + let cost = time_to_translate.as_secs() * 1_000_000_000 + + time_to_translate.subsec_nanos() as u64; + + write::submit_translated_module_to_llvm(tcx, + module, + cost); + return stats; + + fn module_translation<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu: Arc>) + -> (Stats, ModuleTranslation) + { + let cgu_name = cgu.name().to_string(); + + // Append ".rs" to LLVM module identifier. + // + // LLVM code generator emits a ".file filename" directive + // for ELF backends. Value of the "filename" is set as the + // LLVM module identifier. Due to a LLVM MC bug[1], LLVM + // crashes if the module identifier is same as other symbols + // such as a function name in the module. + // 1. http://llvm.org/bugs/show_bug.cgi?id=11479 + let llmod_id = format!("{}-{}.rs", + cgu.name(), + tcx.crate_disambiguator(LOCAL_CRATE)); + + // Instantiate translation items without filling out definitions yet... + let scx = SharedCrateContext::new(tcx); + let lcx = LocalCrateContext::new(&scx, cgu, &llmod_id); + let module = { + let ccx = CrateContext::new(&scx, &lcx); + let trans_items = ccx.codegen_unit() + .items_in_deterministic_order(ccx.tcx()); + for &(trans_item, (linkage, visibility)) in &trans_items { + trans_item.predefine(&ccx, linkage, visibility); + } + + // ... and now that we have everything pre-defined, fill out those definitions. + for &(trans_item, _) in &trans_items { + trans_item.define(&ccx); + } + + // If this codegen unit contains the main function, also create the + // wrapper here + maybe_create_entry_wrapper(&ccx); + + // Run replace-all-uses-with for statics that need it + for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() { + unsafe { + let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g)); + llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMDeleteGlobal(old_g); + } + } + + // Create the llvm.used variable + // This variable has type [N x i8*] and is stored in the llvm.metadata section + if !ccx.used_statics().borrow().is_empty() { + let name = CString::new("llvm.used").unwrap(); + let section = CString::new("llvm.metadata").unwrap(); + let array = C_array(Type::i8(&ccx).ptr_to(), &*ccx.used_statics().borrow()); + + unsafe { + let g = llvm::LLVMAddGlobal(ccx.llmod(), + val_ty(array).to_ref(), + name.as_ptr()); + llvm::LLVMSetInitializer(g, array); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); + llvm::LLVMSetSection(g, section.as_ptr()); + } + } + + // Finalize debuginfo + if ccx.sess().opts.debuginfo != NoDebugInfo { + debuginfo::finalize(&ccx); + } + + let llvm_module = ModuleLlvm { + llcx: ccx.llcx(), + llmod: ccx.llmod(), + tm: create_target_machine(ccx.sess()), + }; + + // Adjust exported symbols for MSVC dllimport + if ccx.sess().target.target.options.is_like_msvc && + ccx.sess().crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) { + create_imps(ccx.sess(), &llvm_module); + } + + ModuleTranslation { + name: cgu_name, + source: ModuleSource::Translated(llvm_module), + kind: ModuleKind::Regular, + llmod_id, + } + }; + + (lcx.into_stats(), module) + } +} + +pub fn provide_local(providers: &mut Providers) { + providers.collect_and_partition_translation_items = + collect_and_partition_translation_items; + + providers.is_translated_function = is_translated_function; + + providers.codegen_unit = |tcx, name| { + let (_, all) = tcx.collect_and_partition_translation_items(LOCAL_CRATE); + all.iter() + .find(|cgu| *cgu.name() == name) + .cloned() + .expect(&format!("failed to find cgu with name {:?}", name)) + }; + providers.compile_codegen_unit = compile_codegen_unit; +} + +pub fn provide_extern(providers: &mut Providers) { + providers.is_translated_function = is_translated_function; +} + +pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { + match linkage { + Linkage::External => llvm::Linkage::ExternalLinkage, + Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage, + Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage, + Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage, + Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage, + Linkage::WeakODR => llvm::Linkage::WeakODRLinkage, + Linkage::Appending => llvm::Linkage::AppendingLinkage, + Linkage::Internal => llvm::Linkage::InternalLinkage, + Linkage::Private => llvm::Linkage::PrivateLinkage, + Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage, + Linkage::Common => llvm::Linkage::CommonLinkage, + } +} + +pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { + match linkage { + Visibility::Default => llvm::Visibility::Default, + Visibility::Hidden => llvm::Visibility::Hidden, + Visibility::Protected => llvm::Visibility::Protected, + } +} + +// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement +// the HashStable trait. Normally DepGraph::with_task() calls are +// hidden behind queries, but CGU creation is a special case in two +// ways: (1) it's not a query and (2) CGU are output nodes, so their +// Fingerprints are not actually needed. It remains to be clarified +// how exactly this case will be handled in the red/green system but +// for now we content ourselves with providing a no-op HashStable +// implementation for CGUs. +mod temp_stable_hash_impls { + use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, + HashStable}; + use ModuleTranslation; + + impl HashStable for ModuleTranslation { + fn hash_stable(&self, + _: &mut HCX, + _: &mut StableHasher) { + // do nothing + } + } } diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 8a585e72f5..41a238ea8e 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -101,11 +101,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fn count_insn(&self, category: &str) { if self.ccx.sess().trans_stats() { - self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1); + self.ccx.stats().borrow_mut().n_llvm_insns += 1; } if self.ccx.sess().count_llvm_insns() { - let mut h = self.ccx.stats().llvm_insns.borrow_mut(); - *h.entry(category.to_string()).or_insert(0) += 1; + *self.ccx.stats() + .borrow_mut() + .llvm_insns + .entry(category.to_string()) + .or_insert(0) += 1; } } diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs index 5c69538723..fb5472eb6a 100644 --- a/src/librustc_trans/cabi_powerpc64.rs +++ b/src/librustc_trans/cabi_powerpc64.rs @@ -14,14 +14,26 @@ use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform}; use context::CrateContext; +use rustc::ty::layout; -fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) +#[derive(Debug, Clone, Copy, PartialEq)] +enum ABI { + ELFv1, // original ABI used for powerpc64 (big-endian) + ELFv2, // newer ABI used for powerpc64le +} +use self::ABI::*; + +fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + arg: &mut ArgType<'tcx>, + abi: ABI) -> Option { arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { let size = arg.layout.size(ccx); - // Ensure we have at most eight uniquely addressable members. - if size > unit.size.checked_mul(8, ccx).unwrap() { + // ELFv1 only passes one-member aggregates transparently. + // ELFv2 passes up to eight uniquely addressable members. + if (abi == ELFv1 && size > unit.size) + || size > unit.size.checked_mul(8, ccx).unwrap() { return None; } @@ -42,21 +54,23 @@ fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut Ar }) } -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>, abi: ABI) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(64); return; } - // The PowerPC64 big endian ABI doesn't return aggregates in registers - if ccx.sess().target.target.target_endian == "big" { + // The ELFv1 ABI doesn't return aggregates in registers + if abi == ELFv1 { ret.make_indirect(ccx); + return; } - if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { + if let Some(uniform) = is_homogeneous_aggregate(ccx, ret, abi) { ret.cast_to(ccx, uniform); return; } + let size = ret.layout.size(ccx); let bits = size.bits(); if bits <= 128 { @@ -80,31 +94,55 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc ret.make_indirect(ccx); } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, abi: ABI) { if !arg.layout.is_aggregate() { arg.extend_integer_width_to(64); return; } - if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { + if let Some(uniform) = is_homogeneous_aggregate(ccx, arg, abi) { arg.cast_to(ccx, uniform); return; } - let total = arg.layout.size(ccx); + let size = arg.layout.size(ccx); + let (unit, total) = match abi { + ELFv1 => { + // In ELFv1, aggregates smaller than a doubleword should appear in + // the least-significant bits of the parameter doubleword. The rest + // should be padded at their tail to fill out multiple doublewords. + if size.bits() <= 64 { + (Reg { kind: RegKind::Integer, size }, size) + } else { + let align = layout::Align::from_bits(64, 64).unwrap(); + (Reg::i64(), size.abi_align(align)) + } + }, + ELFv2 => { + // In ELFv2, we can just cast directly. + (Reg::i64(), size) + }, + }; + arg.cast_to(ccx, Uniform { - unit: Reg::i64(), + unit, total }); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let abi = match ccx.sess().target.target.target_endian.as_str() { + "big" => ELFv1, + "little" => ELFv2, + _ => unimplemented!(), + }; + if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, abi); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(ccx, arg, abi); } } diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 8b024b8c97..49634d6e78 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -11,12 +11,30 @@ use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind}; use common::CrateContext; +use rustc::ty::layout::{self, Layout, TyLayout}; + #[derive(PartialEq)] pub enum Flavor { General, Fastcall } +fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: TyLayout<'tcx>) -> bool { + match *layout { + Layout::Scalar { value: layout::F32, .. } | + Layout::Scalar { value: layout::F64, .. } => true, + Layout::Univariant { .. } => { + if layout.field_count() == 1 { + is_single_fp_element(ccx, layout.field(ccx, 0)) + } else { + false + } + } + _ => false + } +} + pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>, flavor: Flavor) { @@ -33,12 +51,23 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, if t.options.is_like_osx || t.options.is_like_windows || t.options.is_like_openbsd { let size = fty.ret.layout.size(ccx); - match size.bytes() { - 1 => fty.ret.cast_to(ccx, Reg::i8()), - 2 => fty.ret.cast_to(ccx, Reg::i16()), - 4 => fty.ret.cast_to(ccx, Reg::i32()), - 8 => fty.ret.cast_to(ccx, Reg::i64()), - _ => fty.ret.make_indirect(ccx) + + // According to Clang, everyone but MSVC returns single-element + // float aggregates directly in a floating-point register. + if !t.options.is_like_msvc && is_single_fp_element(ccx, fty.ret.layout) { + match size.bytes() { + 4 => fty.ret.cast_to(ccx, Reg::f32()), + 8 => fty.ret.cast_to(ccx, Reg::f64()), + _ => fty.ret.make_indirect(ccx) + } + } else { + match size.bytes() { + 1 => fty.ret.cast_to(ccx, Reg::i8()), + 2 => fty.ret.cast_to(ccx, Reg::i16()), + 4 => fty.ret.cast_to(ccx, Reg::i32()), + 8 => fty.ret.cast_to(ccx, Reg::i64()), + _ => fty.ret.make_indirect(ccx) + } } } else { fty.ret.make_indirect(ccx); diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 76f94565ba..18b91b18d8 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -19,11 +19,11 @@ use common::{self, CrateContext}; use consts; use declare; use llvm::{self, ValueRef}; -use monomorphize::{self, Instance}; +use monomorphize::Instance; use rustc::hir::def_id::DefId; -use rustc::ty::TypeFoldable; +use rustc::ty::{self, TypeFoldable}; +use rustc::traits; use rustc::ty::subst::Substs; -use trans_item::TransItem; use type_of; /// Translates a reference to a fn/method item, monomorphizing and @@ -45,7 +45,7 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, assert!(!instance.substs.has_escaping_regions()); assert!(!instance.substs.has_param_types()); - let fn_ty = common::instance_ty(ccx.shared(), &instance); + let fn_ty = common::instance_ty(ccx.tcx(), &instance); if let Some(&llfn) = ccx.instances().borrow().get(&instance) { return llfn; } @@ -53,35 +53,34 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sym = tcx.symbol_name(instance); debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym); - // This is subtle and surprising, but sometimes we have to bitcast - // the resulting fn pointer. The reason has to do with external - // functions. If you have two crates that both bind the same C - // library, they may not use precisely the same types: for - // example, they will probably each declare their own structs, - // which are distinct types from LLVM's point of view (nominal - // types). - // - // Now, if those two crates are linked into an application, and - // they contain inlined code, you can wind up with a situation - // where both of those functions wind up being loaded into this - // application simultaneously. In that case, the same function - // (from LLVM's point of view) requires two types. But of course - // LLVM won't allow one function to have two types. - // - // What we currently do, therefore, is declare the function with - // one of the two types (whichever happens to come first) and then - // bitcast as needed when the function is referenced to make sure - // it has the type we expect. - // - // This can occur on either a crate-local or crate-external - // reference. It also occurs when testing libcore and in some - // other weird situations. Annoying. - // Create a fn pointer with the substituted signature. let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(ccx, fn_ty)); let llptrty = type_of::type_of(ccx, fn_ptr_ty); let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) { + // This is subtle and surprising, but sometimes we have to bitcast + // the resulting fn pointer. The reason has to do with external + // functions. If you have two crates that both bind the same C + // library, they may not use precisely the same types: for + // example, they will probably each declare their own structs, + // which are distinct types from LLVM's point of view (nominal + // types). + // + // Now, if those two crates are linked into an application, and + // they contain inlined code, you can wind up with a situation + // where both of those functions wind up being loaded into this + // application simultaneously. In that case, the same function + // (from LLVM's point of view) requires two types. But of course + // LLVM won't allow one function to have two types. + // + // What we currently do, therefore, is declare the function with + // one of the two types (whichever happens to come first) and then + // bitcast as needed when the function is referenced to make sure + // it has the type we expect. + // + // This can occur on either a crate-local or crate-external + // reference. It also occurs when testing libcore and in some + // other weird situations. Annoying. if common::val_ty(llfn) != llptrty { debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); consts::ptrcast(llfn, llptrty) @@ -110,12 +109,45 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, attributes::unwind(llfn, true); } + // Apply an appropriate linkage/visibility value to our item that we + // just declared. + // + // This is sort of subtle. Inside our codegen unit we started off + // compilation by predefining all our own `TransItem` instances. That + // is, everything we're translating ourselves is already defined. That + // means that anything we're actually translating ourselves will have + // hit the above branch in `get_declared_value`. As a result, we're + // guaranteed here that we're declaring a symbol that won't get defined, + // or in other words we're referencing a foreign value. + // + // So because this is a foreign value we blanket apply an external + // linkage directive because it's coming from a different object file. + // The visibility here is where it gets tricky. This symbol could be + // referencing some foreign crate or foreign library (an `extern` + // block) in which case we want to leave the default visibility. We may + // also, though, have multiple codegen units. + // + // In the situation of multiple codegen units this function may be + // referencing a function from another codegen unit. If we're + // indeed referencing a symbol in another codegen unit then we're in one + // of two cases: + // + // * This is a symbol defined in a foreign crate and we're just + // monomorphizing in another codegen unit. In this case this symbols + // is for sure not exported, so both codegen units will be using + // hidden visibility. Hence, we apply a hidden visibility here. + // + // * This is a symbol defined in our local crate. If the symbol in the + // other codegen unit is also not exported then like with the foreign + // case we apply a hidden visibility. If the symbol is exported from + // the foreign object file, however, then we leave this at the + // default visibility as we'll just import it naturally. unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage); - if ccx.crate_trans_items().contains(&TransItem::Fn(instance)) { - if let Some(node_id) = tcx.hir.as_local_node_id(instance_def_id) { - if !ccx.exported_symbols().local_exports().contains(&node_id) { + if ccx.tcx().is_translated_function(instance_def_id) { + if instance_def_id.is_local() { + if !ccx.tcx().is_exported_symbol(instance_def_id) { llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); } } else { @@ -125,12 +157,13 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } if ccx.use_dll_storage_attrs() && - ccx.sess().cstore.is_dllimport_foreign_item(instance_def_id) + tcx.is_dllimport_foreign_item(instance_def_id) { unsafe { llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport); } } + llfn }; @@ -144,5 +177,13 @@ pub fn resolve_and_get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, substs: &'tcx Substs<'tcx>) -> ValueRef { - get_fn(ccx, monomorphize::resolve(ccx.shared(), def_id, substs)) + get_fn( + ccx, + ty::Instance::resolve( + ccx.tcx(), + ty::ParamEnv::empty(traits::Reveal::All), + def_id, + substs + ).unwrap() + ) } diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs index c5de3a4ffb..9d1e36fa58 100644 --- a/src/librustc_trans/collector.rs +++ b/src/librustc_trans/collector.rs @@ -193,23 +193,22 @@ use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::hir::map as hir_map; use rustc::hir::def_id::DefId; +use rustc::middle::const_val::ConstVal; use rustc::middle::lang_items::{ExchangeMallocFnLangItem}; use rustc::traits; use rustc::ty::subst::Substs; -use rustc::ty::{self, TypeFoldable, TyCtxt}; +use rustc::ty::{self, TypeFoldable, Ty, TyCtxt}; use rustc::ty::adjustment::CustomCoerceUnsized; use rustc::mir::{self, Location}; use rustc::mir::visit::Visitor as MirVisitor; -use context::SharedCrateContext; -use common::{def_ty, instance_ty}; +use common::{def_ty, instance_ty, type_is_sized}; use monomorphize::{self, Instance}; use rustc::util::nodemap::{FxHashSet, FxHashMap, DefIdMap}; -use trans_item::{TransItem, DefPathBasedNames, InstantiationMode}; +use trans_item::{TransItem, TransItemExt, DefPathBasedNames, InstantiationMode}; use rustc_data_structures::bitvec::BitVector; -use back::symbol_export::ExportedSymbols; #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub enum TransItemCollectionMode { @@ -293,37 +292,31 @@ impl<'tcx> InliningMap<'tcx> { } } -pub fn collect_crate_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, - exported_symbols: &ExportedSymbols, +pub fn collect_crate_translation_items<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mode: TransItemCollectionMode) -> (FxHashSet>, InliningMap<'tcx>) { - // We are not tracking dependencies of this pass as it has to be re-executed - // every time no matter what. - scx.tcx().dep_graph.with_ignore(|| { - let roots = collect_roots(scx, exported_symbols, mode); - - debug!("Building translation item graph, beginning at roots"); - let mut visited = FxHashSet(); - let mut recursion_depths = DefIdMap(); - let mut inlining_map = InliningMap::new(); - - for root in roots { - collect_items_rec(scx, - root, - &mut visited, - &mut recursion_depths, - &mut inlining_map); - } + let roots = collect_roots(tcx, mode); + + debug!("Building translation item graph, beginning at roots"); + let mut visited = FxHashSet(); + let mut recursion_depths = DefIdMap(); + let mut inlining_map = InliningMap::new(); + + for root in roots { + collect_items_rec(tcx, + root, + &mut visited, + &mut recursion_depths, + &mut inlining_map); + } - (visited, inlining_map) - }) + (visited, inlining_map) } // Find all non-generic items by walking the HIR. These items serve as roots to // start monomorphizing from. -fn collect_roots<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, - exported_symbols: &ExportedSymbols, +fn collect_roots<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mode: TransItemCollectionMode) -> Vec> { debug!("Collecting roots"); @@ -331,25 +324,24 @@ fn collect_roots<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, { let mut visitor = RootCollector { - scx, + tcx, mode, - exported_symbols, output: &mut roots, }; - scx.tcx().hir.krate().visit_all_item_likes(&mut visitor); + tcx.hir.krate().visit_all_item_likes(&mut visitor); } // We can only translate items that are instantiable - items all of // whose predicates hold. Luckily, items that aren't instantiable // can't actually be used, so we can just skip translating them. - roots.retain(|root| root.is_instantiable(scx.tcx())); + roots.retain(|root| root.is_instantiable(tcx)); roots } // Collect all monomorphized translation items reachable from `starting_point` -fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>, +fn collect_items_rec<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, starting_point: TransItem<'tcx>, visited: &mut FxHashSet>, recursion_depths: &mut DefIdMap, @@ -358,60 +350,60 @@ fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>, // We've been here already, no need to search again. return; } - debug!("BEGIN collect_items_rec({})", starting_point.to_string(scx.tcx())); + debug!("BEGIN collect_items_rec({})", starting_point.to_string(tcx)); let mut neighbors = Vec::new(); let recursion_depth_reset; match starting_point { TransItem::Static(node_id) => { - let def_id = scx.tcx().hir.local_def_id(node_id); - let instance = Instance::mono(scx.tcx(), def_id); + let def_id = tcx.hir.local_def_id(node_id); + let instance = Instance::mono(tcx, def_id); // Sanity check whether this ended up being collected accidentally - debug_assert!(should_trans_locally(scx.tcx(), &instance)); + debug_assert!(should_trans_locally(tcx, &instance)); - let ty = instance_ty(scx, &instance); - visit_drop_use(scx, ty, true, &mut neighbors); + let ty = instance_ty(tcx, &instance); + visit_drop_use(tcx, ty, true, &mut neighbors); recursion_depth_reset = None; - collect_neighbours(scx, instance, true, &mut neighbors); + collect_neighbours(tcx, instance, true, &mut neighbors); } TransItem::Fn(instance) => { // Sanity check whether this ended up being collected accidentally - debug_assert!(should_trans_locally(scx.tcx(), &instance)); + debug_assert!(should_trans_locally(tcx, &instance)); // Keep track of the monomorphization recursion depth - recursion_depth_reset = Some(check_recursion_limit(scx.tcx(), + recursion_depth_reset = Some(check_recursion_limit(tcx, instance, recursion_depths)); - check_type_length_limit(scx.tcx(), instance); + check_type_length_limit(tcx, instance); - collect_neighbours(scx, instance, false, &mut neighbors); + collect_neighbours(tcx, instance, false, &mut neighbors); } TransItem::GlobalAsm(..) => { recursion_depth_reset = None; } } - record_accesses(scx.tcx(), starting_point, &neighbors[..], inlining_map); + record_accesses(tcx, starting_point, &neighbors[..], inlining_map); for neighbour in neighbors { - collect_items_rec(scx, neighbour, visited, recursion_depths, inlining_map); + collect_items_rec(tcx, neighbour, visited, recursion_depths, inlining_map); } if let Some((def_id, depth)) = recursion_depth_reset { recursion_depths.insert(def_id, depth); } - debug!("END collect_items_rec({})", starting_point.to_string(scx.tcx())); + debug!("END collect_items_rec({})", starting_point.to_string(tcx)); } fn record_accesses<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - caller: TransItem<'tcx>, - callees: &[TransItem<'tcx>], - inlining_map: &mut InliningMap<'tcx>) { + caller: TransItem<'tcx>, + callees: &[TransItem<'tcx>], + inlining_map: &mut InliningMap<'tcx>) { let is_inlining_candidate = |trans_item: &TransItem<'tcx>| { trans_item.instantiation_mode(tcx) == InstantiationMode::LocalCopy }; @@ -432,7 +424,7 @@ fn check_recursion_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let recursion_depth = recursion_depths.get(&def_id).cloned().unwrap_or(0); debug!(" => recursion depth={}", recursion_depth); - let recursion_depth = if Some(def_id) == tcx.lang_items.drop_in_place_fn() { + let recursion_depth = if Some(def_id) == tcx.lang_items().drop_in_place_fn() { // HACK: drop_in_place creates tight monomorphization loops. Give // it more margin. recursion_depth / 4 @@ -493,7 +485,7 @@ fn check_type_length_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } struct MirNeighborCollector<'a, 'tcx: 'a> { - scx: &'a SharedCrateContext<'a, 'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &'a mir::Mir<'tcx>, output: &'a mut Vec>, param_substs: &'tcx Substs<'tcx>, @@ -510,49 +502,49 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { // have to instantiate all methods of the trait being cast to, so we // can build the appropriate vtable. mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, target_ty) => { - let target_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs, - &target_ty); - let source_ty = operand.ty(self.mir, self.scx.tcx()); - let source_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs, - &source_ty); - let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.scx, + let target_ty = self.tcx.trans_apply_param_substs(self.param_substs, + &target_ty); + let source_ty = operand.ty(self.mir, self.tcx); + let source_ty = self.tcx.trans_apply_param_substs(self.param_substs, + &source_ty); + let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.tcx, source_ty, target_ty); // This could also be a different Unsize instruction, like // from a fixed sized array to a slice. But we are only // interested in things that produce a vtable. if target_ty.is_trait() && !source_ty.is_trait() { - create_trans_items_for_vtable_methods(self.scx, + create_trans_items_for_vtable_methods(self.tcx, target_ty, source_ty, self.output); } } mir::Rvalue::Cast(mir::CastKind::ReifyFnPointer, ref operand, _) => { - let fn_ty = operand.ty(self.mir, self.scx.tcx()); - let fn_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs, - &fn_ty); - visit_fn_use(self.scx, fn_ty, false, &mut self.output); + let fn_ty = operand.ty(self.mir, self.tcx); + let fn_ty = self.tcx.trans_apply_param_substs(self.param_substs, + &fn_ty); + visit_fn_use(self.tcx, fn_ty, false, &mut self.output); } mir::Rvalue::Cast(mir::CastKind::ClosureFnPointer, ref operand, _) => { - let source_ty = operand.ty(self.mir, self.scx.tcx()); - let source_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs, - &source_ty); + let source_ty = operand.ty(self.mir, self.tcx); + let source_ty = self.tcx.trans_apply_param_substs(self.param_substs, + &source_ty); match source_ty.sty { ty::TyClosure(def_id, substs) => { let instance = monomorphize::resolve_closure( - self.scx, def_id, substs, ty::ClosureKind::FnOnce); + self.tcx, def_id, substs, ty::ClosureKind::FnOnce); self.output.push(create_fn_trans_item(instance)); } _ => bug!(), } } mir::Rvalue::NullaryOp(mir::NullOp::Box, _) => { - let tcx = self.scx.tcx(); + let tcx = self.tcx; let exchange_malloc_fn_def_id = tcx - .lang_items + .lang_items() .require(ExchangeMallocFnLangItem) - .unwrap_or_else(|e| self.scx.sess().fatal(&e)); + .unwrap_or_else(|e| tcx.sess.fatal(&e)); let instance = Instance::mono(tcx, exchange_malloc_fn_def_id); if should_trans_locally(tcx, &instance) { self.output.push(create_fn_trans_item(instance)); @@ -564,24 +556,20 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { self.super_rvalue(rvalue, location); } - fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: Location) { - debug!("visiting constant {:?} @ {:?}", *constant, location); - - if let ty::TyFnDef(..) = constant.ty.sty { - // function definitions are zero-sized, and only generate - // IR when they are called/reified. - self.super_constant(constant, location); - return + fn visit_const(&mut self, constant: &&'tcx ty::Const<'tcx>, location: Location) { + debug!("visiting const {:?} @ {:?}", *constant, location); + + if let ConstVal::Unevaluated(def_id, substs) = constant.val { + let substs = self.tcx.trans_apply_param_substs(self.param_substs, + &substs); + let instance = ty::Instance::resolve(self.tcx, + ty::ParamEnv::empty(traits::Reveal::All), + def_id, + substs).unwrap(); + collect_neighbours(self.tcx, instance, true, self.output); } - if let mir::Literal::Item { def_id, substs } = constant.literal { - let substs = self.scx.tcx().trans_apply_param_substs(self.param_substs, - &substs); - let instance = monomorphize::resolve(self.scx, def_id, substs); - collect_neighbours(self.scx, instance, true, self.output); - } - - self.super_constant(constant, location); + self.super_const(constant); } fn visit_terminator_kind(&mut self, @@ -590,15 +578,19 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { location: Location) { debug!("visiting terminator {:?} @ {:?}", kind, location); - let tcx = self.scx.tcx(); + let tcx = self.tcx; match *kind { mir::TerminatorKind::Call { ref func, .. } => { let callee_ty = func.ty(self.mir, tcx); let callee_ty = tcx.trans_apply_param_substs(self.param_substs, &callee_ty); let constness = match (self.const_context, &callee_ty.sty) { - (true, &ty::TyFnDef(def_id, substs)) if self.scx.tcx().is_const_fn(def_id) => { - let instance = monomorphize::resolve(self.scx, def_id, substs); + (true, &ty::TyFnDef(def_id, substs)) if self.tcx.is_const_fn(def_id) => { + let instance = + ty::Instance::resolve(self.tcx, + ty::ParamEnv::empty(traits::Reveal::All), + def_id, + substs).unwrap(); Some(instance) } _ => None @@ -608,20 +600,20 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { // If this is a const fn, called from a const context, we // have to visit its body in order to find any fn reifications // it might contain. - collect_neighbours(self.scx, + collect_neighbours(self.tcx, const_fn_instance, true, self.output); } else { - visit_fn_use(self.scx, callee_ty, true, &mut self.output); + visit_fn_use(self.tcx, callee_ty, true, &mut self.output); } } mir::TerminatorKind::Drop { ref location, .. } | mir::TerminatorKind::DropAndReplace { ref location, .. } => { - let ty = location.ty(self.mir, self.scx.tcx()) - .to_ty(self.scx.tcx()); + let ty = location.ty(self.mir, self.tcx) + .to_ty(self.tcx); let ty = tcx.trans_apply_param_substs(self.param_substs, &ty); - visit_drop_use(self.scx, ty, true, self.output); + visit_drop_use(self.tcx, ty, true, self.output); } mir::TerminatorKind::Goto { .. } | mir::TerminatorKind::SwitchInt { .. } | @@ -629,6 +621,8 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { mir::TerminatorKind::Return | mir::TerminatorKind::Unreachable | mir::TerminatorKind::Assert { .. } => {} + mir::TerminatorKind::GeneratorDrop | + mir::TerminatorKind::Yield { .. } => bug!(), } self.super_terminator_kind(block, kind, location); @@ -640,7 +634,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { location: Location) { debug!("visiting static {:?} @ {:?}", static_.def_id, location); - let tcx = self.scx.tcx(); + let tcx = self.tcx; let instance = Instance::mono(tcx, static_.def_id); if should_trans_locally(tcx, &instance) { let node_id = tcx.hir.as_local_node_id(static_.def_id).unwrap(); @@ -651,33 +645,36 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { } } -fn visit_drop_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, - ty: ty::Ty<'tcx>, +fn visit_drop_use<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, is_direct_call: bool, output: &mut Vec>) { - let instance = monomorphize::resolve_drop_in_place(scx, ty); - visit_instance_use(scx, instance, is_direct_call, output); + let instance = monomorphize::resolve_drop_in_place(tcx, ty); + visit_instance_use(tcx, instance, is_direct_call, output); } -fn visit_fn_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, - ty: ty::Ty<'tcx>, +fn visit_fn_use<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, is_direct_call: bool, output: &mut Vec>) { if let ty::TyFnDef(def_id, substs) = ty.sty { - let instance = monomorphize::resolve(scx, def_id, substs); - visit_instance_use(scx, instance, is_direct_call, output); + let instance = ty::Instance::resolve(tcx, + ty::ParamEnv::empty(traits::Reveal::All), + def_id, + substs).unwrap(); + visit_instance_use(tcx, instance, is_direct_call, output); } } -fn visit_instance_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, +fn visit_instance_use<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: ty::Instance<'tcx>, is_direct_call: bool, output: &mut Vec>) { debug!("visit_item_use({:?}, is_direct_call={:?})", instance, is_direct_call); - if !should_trans_locally(scx.tcx(), &instance) { + if !should_trans_locally(tcx, &instance) { return } @@ -779,15 +776,15 @@ fn should_trans_locally<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: &Instan /// /// Finally, there is also the case of custom unsizing coercions, e.g. for /// smart pointers such as `Rc` and `Arc`. -fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, - source_ty: ty::Ty<'tcx>, - target_ty: ty::Ty<'tcx>) - -> (ty::Ty<'tcx>, ty::Ty<'tcx>) { - let ptr_vtable = |inner_source: ty::Ty<'tcx>, inner_target: ty::Ty<'tcx>| { - if !scx.type_is_sized(inner_source) { +fn find_vtable_types_for_unsizing<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + source_ty: Ty<'tcx>, + target_ty: Ty<'tcx>) + -> (Ty<'tcx>, Ty<'tcx>) { + let ptr_vtable = |inner_source: Ty<'tcx>, inner_target: Ty<'tcx>| { + if !type_is_sized(tcx, inner_source) { (inner_source, inner_target) } else { - scx.tcx().struct_lockstep_tails(inner_source, inner_target) + tcx.struct_lockstep_tails(inner_source, inner_target) } }; match (&source_ty.sty, &target_ty.sty) { @@ -808,7 +805,7 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, assert_eq!(source_adt_def, target_adt_def); let kind = - monomorphize::custom_coerce_unsize_info(scx, source_ty, target_ty); + monomorphize::custom_coerce_unsize_info(tcx, source_ty, target_ty); let coerce_index = match kind { CustomCoerceUnsized::Struct(i) => i @@ -820,10 +817,10 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, assert!(coerce_index < source_fields.len() && source_fields.len() == target_fields.len()); - find_vtable_types_for_unsizing(scx, - source_fields[coerce_index].ty(scx.tcx(), + find_vtable_types_for_unsizing(tcx, + source_fields[coerce_index].ty(tcx, source_substs), - target_fields[coerce_index].ty(scx.tcx(), + target_fields[coerce_index].ty(tcx, target_substs)) } _ => bug!("find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}", @@ -839,28 +836,32 @@ fn create_fn_trans_item<'a, 'tcx>(instance: Instance<'tcx>) -> TransItem<'tcx> { /// Creates a `TransItem` for each method that is referenced by the vtable for /// the given trait/impl pair. -fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, - trait_ty: ty::Ty<'tcx>, - impl_ty: ty::Ty<'tcx>, +fn create_trans_items_for_vtable_methods<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + trait_ty: Ty<'tcx>, + impl_ty: Ty<'tcx>, output: &mut Vec>) { assert!(!trait_ty.needs_subst() && !trait_ty.has_escaping_regions() && !impl_ty.needs_subst() && !impl_ty.has_escaping_regions()); if let ty::TyDynamic(ref trait_ty, ..) = trait_ty.sty { if let Some(principal) = trait_ty.principal() { - let poly_trait_ref = principal.with_self_ty(scx.tcx(), impl_ty); + let poly_trait_ref = principal.with_self_ty(tcx, impl_ty); assert!(!poly_trait_ref.has_escaping_regions()); // Walk all methods of the trait, including those of its supertraits - let methods = traits::get_vtable_methods(scx.tcx(), poly_trait_ref); + let methods = traits::get_vtable_methods(tcx, poly_trait_ref); let methods = methods.filter_map(|method| method) - .map(|(def_id, substs)| monomorphize::resolve(scx, def_id, substs)) - .filter(|&instance| should_trans_locally(scx.tcx(), &instance)) + .map(|(def_id, substs)| ty::Instance::resolve( + tcx, + ty::ParamEnv::empty(traits::Reveal::All), + def_id, + substs).unwrap()) + .filter(|&instance| should_trans_locally(tcx, &instance)) .map(|instance| create_fn_trans_item(instance)); output.extend(methods); } // Also add the destructor - visit_drop_use(scx, impl_ty, false, output); + visit_drop_use(tcx, impl_ty, false, output); } } @@ -869,8 +870,7 @@ fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a, //=----------------------------------------------------------------------------- struct RootCollector<'b, 'a: 'b, 'tcx: 'a + 'b> { - scx: &'b SharedCrateContext<'a, 'tcx>, - exported_symbols: &'b ExportedSymbols, + tcx: TyCtxt<'a, 'tcx, 'tcx>, mode: TransItemCollectionMode, output: &'b mut Vec>, } @@ -890,7 +890,7 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { hir::ItemImpl(..) => { if self.mode == TransItemCollectionMode::Eager { - create_trans_items_for_default_impls(self.scx, + create_trans_items_for_default_impls(self.tcx, item, self.output); } @@ -901,25 +901,25 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { hir::ItemUnion(_, ref generics) => { if !generics.is_parameterized() { if self.mode == TransItemCollectionMode::Eager { - let def_id = self.scx.tcx().hir.local_def_id(item.id); + let def_id = self.tcx.hir.local_def_id(item.id); debug!("RootCollector: ADT drop-glue for {}", - def_id_to_string(self.scx.tcx(), def_id)); + def_id_to_string(self.tcx, def_id)); - let ty = def_ty(self.scx, def_id, Substs::empty()); - visit_drop_use(self.scx, ty, true, self.output); + let ty = def_ty(self.tcx, def_id, Substs::empty()); + visit_drop_use(self.tcx, ty, true, self.output); } } } hir::ItemGlobalAsm(..) => { debug!("RootCollector: ItemGlobalAsm({})", - def_id_to_string(self.scx.tcx(), - self.scx.tcx().hir.local_def_id(item.id))); + def_id_to_string(self.tcx, + self.tcx.hir.local_def_id(item.id))); self.output.push(TransItem::GlobalAsm(item.id)); } hir::ItemStatic(..) => { debug!("RootCollector: ItemStatic({})", - def_id_to_string(self.scx.tcx(), - self.scx.tcx().hir.local_def_id(item.id))); + def_id_to_string(self.tcx, + self.tcx.hir.local_def_id(item.id))); self.output.push(TransItem::Static(item.id)); } hir::ItemConst(..) => { @@ -927,12 +927,11 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { // actually used somewhere. Just declaring them is insufficient. } hir::ItemFn(..) => { - let tcx = self.scx.tcx(); + let tcx = self.tcx; let def_id = tcx.hir.local_def_id(item.id); if (self.mode == TransItemCollectionMode::Eager || - !tcx.is_const_fn(def_id) || - self.exported_symbols.local_exports().contains(&item.id)) && + !tcx.is_const_fn(def_id) || tcx.is_exported_symbol(def_id)) && !item_has_type_parameters(tcx, def_id) { debug!("RootCollector: ItemFn({})", @@ -953,12 +952,12 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { fn visit_impl_item(&mut self, ii: &'v hir::ImplItem) { match ii.node { hir::ImplItemKind::Method(hir::MethodSig { .. }, _) => { - let tcx = self.scx.tcx(); + let tcx = self.tcx; let def_id = tcx.hir.local_def_id(ii.id); if (self.mode == TransItemCollectionMode::Eager || !tcx.is_const_fn(def_id) || - self.exported_symbols.local_exports().contains(&ii.id)) && + tcx.is_exported_symbol(def_id)) && !item_has_type_parameters(tcx, def_id) { debug!("RootCollector: MethodImplItem({})", def_id_to_string(tcx, def_id)); @@ -977,10 +976,9 @@ fn item_has_type_parameters<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId generics.parent_types as usize + generics.types.len() > 0 } -fn create_trans_items_for_default_impls<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, +fn create_trans_items_for_default_impls<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: &'tcx hir::Item, output: &mut Vec>) { - let tcx = scx.tcx(); match item.node { hir::ItemImpl(_, _, @@ -1012,8 +1010,10 @@ fn create_trans_items_for_default_impls<'a, 'tcx>(scx: &SharedCrateContext<'a, ' continue; } - let instance = - monomorphize::resolve(scx, method.def_id, callee_substs); + let instance = ty::Instance::resolve(tcx, + ty::ParamEnv::empty(traits::Reveal::All), + method.def_id, + callee_substs).unwrap(); let trans_item = create_fn_trans_item(instance); if trans_item.is_instantiable(tcx) && should_trans_locally(tcx, &instance) { @@ -1029,15 +1029,15 @@ fn create_trans_items_for_default_impls<'a, 'tcx>(scx: &SharedCrateContext<'a, ' } /// Scan the MIR in order to find function calls, closures, and drop-glue -fn collect_neighbours<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, +fn collect_neighbours<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance<'tcx>, const_context: bool, output: &mut Vec>) { - let mir = scx.tcx().instance_mir(instance.def); + let mir = tcx.instance_mir(instance.def); let mut visitor = MirNeighborCollector { - scx, + tcx, mir: &mir, output, param_substs: instance.substs, diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 09aa3d2335..52607904f7 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -26,17 +26,19 @@ use machine; use monomorphize; use type_::Type; use value::Value; +use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{Layout, LayoutTyper}; -use rustc::ty::subst::{Subst, Substs}; +use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::hir; use libc::{c_uint, c_char}; use std::iter; +use syntax::abi::Abi; use syntax::attr; use syntax::symbol::InternedString; -use syntax_pos::Span; +use syntax_pos::{Span, DUMMY_SP}; pub use context::{CrateContext, SharedCrateContext}; @@ -91,6 +93,16 @@ pub fn type_pair_fields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) } })) } + ty::TyGenerator(def_id, substs, _) => { + let mut tys = substs.field_tys(def_id, ccx.tcx()); + tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| { + if tys.next().is_some() { + None + } else { + Some([first_ty, second_ty]) + } + })) + } ty::TyTuple(tys, _) => { if tys.len() != 2 { return None; @@ -129,6 +141,18 @@ pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - !layout.is_unsized() && layout.size(ccx).bytes() == 0 } +pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All)) +} + +pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_sized(tcx, ty::ParamEnv::empty(traits::Reveal::All), DUMMY_SP) +} + +pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_freeze(tcx, ty::ParamEnv::empty(traits::Reveal::All), DUMMY_SP) +} + /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * @@ -210,9 +234,15 @@ pub fn C_undef(t: Type) -> ValueRef { } } -pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef { +pub fn C_int(t: Type, i: i64) -> ValueRef { unsafe { - llvm::LLVMConstInt(t.to_ref(), u, sign_extend as Bool) + llvm::LLVMConstInt(t.to_ref(), i as u64, True) + } +} + +pub fn C_uint(t: Type, i: u64) -> ValueRef { + unsafe { + llvm::LLVMConstInt(t.to_ref(), i, False) } } @@ -228,49 +258,34 @@ pub fn C_nil(ccx: &CrateContext) -> ValueRef { } pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef { - C_integral(Type::i1(ccx), val as u64, false) + C_uint(Type::i1(ccx), val as u64) } pub fn C_i32(ccx: &CrateContext, i: i32) -> ValueRef { - C_integral(Type::i32(ccx), i as u64, true) + C_int(Type::i32(ccx), i as i64) } pub fn C_u32(ccx: &CrateContext, i: u32) -> ValueRef { - C_integral(Type::i32(ccx), i as u64, false) + C_uint(Type::i32(ccx), i as u64) } pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef { - C_integral(Type::i64(ccx), i, false) + C_uint(Type::i64(ccx), i) } -pub fn C_uint(ccx: &CrateContext, i: I) -> ValueRef { - let v = i.as_u64(); - - let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type()); +pub fn C_usize(ccx: &CrateContext, i: u64) -> ValueRef { + let bit_size = machine::llbitsize_of_real(ccx, ccx.isize_ty()); if bit_size < 64 { // make sure it doesn't overflow - assert!(v < (1< i64; } -pub trait AsU64 { fn as_u64(self) -> u64; } - -// FIXME: remove the intptr conversions, because they -// are host-architecture-dependent -impl AsI64 for i64 { fn as_i64(self) -> i64 { self as i64 }} -impl AsI64 for i32 { fn as_i64(self) -> i64 { self as i64 }} -impl AsI64 for isize { fn as_i64(self) -> i64 { self as i64 }} - -impl AsU64 for u64 { fn as_u64(self) -> u64 { self as u64 }} -impl AsU64 for u32 { fn as_u64(self) -> u64 { self as u64 }} -impl AsU64 for usize { fn as_u64(self) -> u64 { self as u64 }} - pub fn C_u8(ccx: &CrateContext, i: u8) -> ValueRef { - C_integral(Type::i8(ccx), i as u64, false) + C_uint(Type::i8(ccx), i as u64) } @@ -304,7 +319,7 @@ pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> Va pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { let len = s.len(); let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx)); - C_named_struct(cx.str_slice_type(), &[cs, C_uint(cx, len)]) + C_named_struct(cx.str_slice_type(), &[cs, C_usize(cx, len as u64)]) } pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef { @@ -412,7 +427,7 @@ pub fn langcall(tcx: TyCtxt, msg: &str, li: LangItem) -> DefId { - match tcx.lang_items.require(li) { + match tcx.lang_items().require(li) { Ok(id) => id, Err(s) => { let msg = format!("{} {}", msg, s); @@ -471,9 +486,9 @@ pub fn shift_mask_val<'a, 'tcx>( // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. let val = llty.int_width() - 1; if invert { - C_integral(mask_llty, !val, true) + C_int(mask_llty, !val as i64) } else { - C_integral(mask_llty, val, false) + C_uint(mask_llty, val) } }, TypeKind::Vector => { @@ -511,6 +526,28 @@ pub fn ty_fn_sig<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, sig.abi )) } + ty::TyGenerator(def_id, substs, _) => { + let tcx = ccx.tcx(); + let sig = tcx.generator_sig(def_id).unwrap().subst(tcx, substs.substs); + + let env_region = ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrEnv); + let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); + + sig.map_bound(|sig| { + let state_did = tcx.lang_items().gen_state().unwrap(); + let state_adt_ref = tcx.adt_def(state_did); + let state_substs = tcx.mk_substs([Kind::from(sig.yield_ty), + Kind::from(sig.return_ty)].iter()); + let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); + + tcx.mk_fn_sig(iter::once(env_ty), + ret_ty, + false, + hir::Unsafety::Normal, + Abi::Rust + ) + }) + } _ => bug!("unexpected type {:?} to ty_fn_sig", ty) } } @@ -549,20 +586,20 @@ pub fn is_inline_instance<'a, 'tcx>( } /// Given a DefId and some Substs, produces the monomorphic item type. -pub fn def_ty<'a, 'tcx>(shared: &SharedCrateContext<'a, 'tcx>, +pub fn def_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { - let ty = shared.tcx().type_of(def_id); - shared.tcx().trans_apply_param_substs(substs, &ty) + let ty = tcx.type_of(def_id); + tcx.trans_apply_param_substs(substs, &ty) } /// Return the substituted type of an instance. -pub fn instance_ty<'a, 'tcx>(shared: &SharedCrateContext<'a, 'tcx>, +pub fn instance_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: &ty::Instance<'tcx>) -> Ty<'tcx> { - let ty = instance.def.def_ty(shared.tcx()); - shared.tcx().trans_apply_param_substs(instance.substs, &ty) + let ty = instance.def.def_ty(tcx); + tcx.trans_apply_param_substs(instance.substs, &ty) } diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index 310cd6fe95..eaf7392aab 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -16,7 +16,7 @@ use rustc::hir::map as hir_map; use rustc::middle::const_val::ConstEvalErr; use {debuginfo, machine}; use base; -use trans_item::TransItem; +use trans_item::{TransItem, TransItemExt}; use common::{self, CrateContext, val_ty}; use declare; use monomorphize::Instance; @@ -26,6 +26,7 @@ use rustc::ty; use rustc::hir; +use std::cmp; use std::ffi::{CStr, CString}; use syntax::ast; use syntax::attr; @@ -42,6 +43,25 @@ pub fn bitcast(val: ValueRef, ty: Type) -> ValueRef { } } +fn set_global_alignment(ccx: &CrateContext, + gv: ValueRef, + mut align: machine::llalign) { + // The target may require greater alignment for globals than the type does. + // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, + // which can force it to be smaller. Rust doesn't support this yet. + if let Some(min) = ccx.sess().target.target.options.min_global_align { + match ty::layout::Align::from_bits(min, min) { + Ok(min) => align = cmp::max(align, min.abi() as machine::llalign), + Err(err) => { + ccx.sess().err(&format!("invalid minimum global alignment: {}", err)); + } + } + } + unsafe { + llvm::LLVMSetAlignment(gv, align); + } +} + pub fn addr_of_mut(ccx: &CrateContext, cv: ValueRef, align: machine::llalign, @@ -53,7 +73,7 @@ pub fn addr_of_mut(ccx: &CrateContext, bug!("symbol `{}` is already defined", name); }); llvm::LLVMSetInitializer(gv, cv); - llvm::LLVMSetAlignment(gv, align); + set_global_alignment(ccx, gv, align); llvm::LLVMRustSetLinkage(gv, llvm::Linkage::InternalLinkage); SetUnnamedAddr(gv, true); gv @@ -89,7 +109,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { return g; } - let ty = common::instance_ty(ccx.shared(), &instance); + let ty = common::instance_ty(ccx.tcx(), &instance); let g = if let Some(id) = ccx.tcx().hir.as_local_node_id(def_id) { let llty = type_of::type_of(ccx, ty); @@ -110,7 +130,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { let g = declare::define_global(ccx, &sym[..], llty).unwrap(); - if !ccx.exported_symbols().local_exports().contains(&id) { + if !ccx.tcx().is_exported_symbol(def_id) { unsafe { llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); } @@ -130,7 +150,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { // extern "C" fn() from being non-null, so we can't just declare a // static and call it a day. Some linkages (like weak) will make it such // that the static actually has a null value. - let linkage = match base::llvm_linkage_by_name(&name.as_str()) { + let linkage = match base::linkage_by_name(&name.as_str()) { Some(linkage) => linkage, None => { ccx.sess().span_fatal(span, "invalid linkage specified"); @@ -145,7 +165,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { unsafe { // Declare a symbol `foo` with the desired linkage. let g1 = declare::declare_global(ccx, &sym, llty2); - llvm::LLVMRustSetLinkage(g1, linkage); + llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage)); // Declare an internal global `extern_with_linkage_foo` which // is initialized with the address of `foo`. If `foo` is @@ -211,12 +231,13 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { g }; - if ccx.use_dll_storage_attrs() && ccx.sess().cstore.is_dllimport_foreign_item(def_id) { + if ccx.use_dll_storage_attrs() && ccx.tcx().is_dllimport_foreign_item(def_id) { // For foreign (native) libs we know the exact storage type to use. unsafe { llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); } } + ccx.instances().borrow_mut().insert(instance, g); ccx.statics().borrow_mut().insert(g, def_id); g @@ -244,7 +265,7 @@ pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, }; let instance = Instance::mono(ccx.tcx(), def_id); - let ty = common::instance_ty(ccx.shared(), &instance); + let ty = common::instance_ty(ccx.tcx(), &instance); let llty = type_of::type_of(ccx, ty); let g = if val_llty == llty { g @@ -271,7 +292,7 @@ pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ccx.statics_to_rauw().borrow_mut().push((g, new_g)); new_g }; - llvm::LLVMSetAlignment(g, ccx.align_of(ty)); + set_global_alignment(ccx, g, ccx.align_of(ty)); llvm::LLVMSetInitializer(g, v); // As an optimization, all shared statics which do not have interior diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index f2b07cf6a5..1722d008a5 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -8,28 +8,30 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use common; use llvm; use llvm::{ContextRef, ModuleRef, ValueRef}; use rustc::dep_graph::{DepGraph, DepGraphSafe}; use rustc::hir; use rustc::hir::def_id::DefId; +use rustc::ich::StableHashingContext; use rustc::traits; use debuginfo; use callee; -use back::symbol_export::ExportedSymbols; use base; use declare; use monomorphize::Instance; use partitioning::CodegenUnit; -use trans_item::TransItem; use type_::Type; use rustc_data_structures::base_n; -use rustc::session::config::{self, NoDebugInfo, OutputFilenames}; +use rustc::middle::trans::Stats; +use rustc_data_structures::stable_hasher::StableHashingContextProvider; +use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; -use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{LayoutCx, LayoutError, LayoutTyper, TyLayout}; -use rustc::util::nodemap::{FxHashMap, FxHashSet}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::util::nodemap::FxHashMap; use std::ffi::{CStr, CString}; use std::cell::{Cell, RefCell}; @@ -39,39 +41,8 @@ use std::str; use std::sync::Arc; use std::marker::PhantomData; use syntax::symbol::InternedString; -use syntax_pos::DUMMY_SP; use abi::Abi; -#[derive(Clone, Default)] -pub struct Stats { - pub n_glues_created: Cell, - pub n_null_glues: Cell, - pub n_real_glues: Cell, - pub n_fns: Cell, - pub n_inlines: Cell, - pub n_closures: Cell, - pub n_llvm_insns: Cell, - pub llvm_insns: RefCell>, - // (ident, llvm-instructions) - pub fn_stats: RefCell >, -} - -impl Stats { - pub fn extend(&mut self, stats: Stats) { - self.n_glues_created.set(self.n_glues_created.get() + stats.n_glues_created.get()); - self.n_null_glues.set(self.n_null_glues.get() + stats.n_null_glues.get()); - self.n_real_glues.set(self.n_real_glues.get() + stats.n_real_glues.get()); - self.n_fns.set(self.n_fns.get() + stats.n_fns.get()); - self.n_inlines.set(self.n_inlines.get() + stats.n_inlines.get()); - self.n_closures.set(self.n_closures.get() + stats.n_closures.get()); - self.n_llvm_insns.set(self.n_llvm_insns.get() + stats.n_llvm_insns.get()); - self.llvm_insns.borrow_mut().extend( - stats.llvm_insns.borrow().iter() - .map(|(key, value)| (key.clone(), value.clone()))); - self.fn_stats.borrow_mut().append(&mut *stats.fn_stats.borrow_mut()); - } -} - /// The shared portion of a `CrateContext`. There is one `SharedCrateContext` /// per crate. The data here is shared between all compilation units of the /// crate, so it must not contain references to any LLVM data structures @@ -79,10 +50,7 @@ impl Stats { pub struct SharedCrateContext<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, check_overflow: bool, - use_dll_storage_attrs: bool, - - output_filenames: &'a OutputFilenames, } /// The local portion of a `CrateContext`. There is one `LocalCrateContext` @@ -92,19 +60,13 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> { pub struct LocalCrateContext<'a, 'tcx: 'a> { llmod: ModuleRef, llcx: ContextRef, - stats: Stats, - codegen_unit: CodegenUnit<'tcx>, - - /// The translation items of the whole crate. - crate_trans_items: Arc>>, - - /// Information about which symbols are exported from the crate. - exported_symbols: Arc, + stats: RefCell, + codegen_unit: Arc>, /// Cache instances of monomorphic and polymorphic items instances: RefCell, ValueRef>>, /// Cache generated vtables - vtables: RefCell, + vtables: RefCell, Option>), ValueRef>>, /// Cache of constant strings, const_cstr_cache: RefCell>, @@ -136,7 +98,7 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> { used_statics: RefCell>, lltypes: RefCell, Type>>, - int_type: Type, + isize_ty: Type, opaque_vec_type: Type, str_slice_type: Type, @@ -174,6 +136,17 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { impl<'a, 'tcx> DepGraphSafe for CrateContext<'a, 'tcx> { } +impl<'a, 'tcx> DepGraphSafe for SharedCrateContext<'a, 'tcx> { +} + +impl<'a, 'tcx> StableHashingContextProvider for SharedCrateContext<'a, 'tcx> { + type ContextType = StableHashingContext<'tcx>; + + fn create_stable_hashing_context(&self) -> Self::ContextType { + self.tcx.create_stable_hashing_context() + } +} + pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { let reloc_model_arg = match sess.opts.cg.relocation_model { Some(ref s) => &s[..], @@ -261,10 +234,7 @@ pub unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (Cont } impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { - pub fn new(tcx: TyCtxt<'b, 'tcx, 'tcx>, - check_overflow: bool, - output_filenames: &'b OutputFilenames) - -> SharedCrateContext<'b, 'tcx> { + pub fn new(tcx: TyCtxt<'b, 'tcx, 'tcx>) -> SharedCrateContext<'b, 'tcx> { // An interesting part of Windows which MSVC forces our hand on (and // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` // attributes in LLVM IR as well as native dependencies (in C these @@ -310,27 +280,28 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { // start) and then strongly recommending static linkage on MSVC! let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc; + let check_overflow = tcx.sess.overflow_checks(); + SharedCrateContext { tcx, check_overflow, use_dll_storage_attrs, - output_filenames, } } pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - ty.needs_drop(self.tcx, ty::ParamEnv::empty(traits::Reveal::All)) + common::type_needs_drop(self.tcx, ty) } pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - ty.is_sized(self.tcx, ty::ParamEnv::empty(traits::Reveal::All), DUMMY_SP) + common::type_is_sized(self.tcx, ty) } pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - ty.is_freeze(self.tcx, ty::ParamEnv::empty(traits::Reveal::All), DUMMY_SP) + common::type_is_freeze(self.tcx, ty) } - pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + pub fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> { self.tcx } @@ -345,29 +316,14 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { pub fn use_dll_storage_attrs(&self) -> bool { self.use_dll_storage_attrs } - - pub fn output_filenames(&self) -> &OutputFilenames { - self.output_filenames - } } impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { pub fn new(shared: &SharedCrateContext<'a, 'tcx>, - codegen_unit: CodegenUnit<'tcx>, - crate_trans_items: Arc>>, - exported_symbols: Arc,) + codegen_unit: Arc>, + llmod_id: &str) -> LocalCrateContext<'a, 'tcx> { unsafe { - // Append ".rs" to LLVM module identifier. - // - // LLVM code generator emits a ".file filename" directive - // for ELF backends. Value of the "filename" is set as the - // LLVM module identifier. Due to a LLVM MC bug[1], LLVM - // crashes if the module identifier is same as other symbols - // such as a function name in the module. - // 1. http://llvm.org/bugs/show_bug.cgi?id=11479 - let llmod_id = format!("{}.rs", codegen_unit.name()); - let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, &llmod_id[..]); @@ -385,10 +341,8 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { let local_ccx = LocalCrateContext { llmod, llcx, - stats: Stats::default(), + stats: RefCell::new(Stats::default()), codegen_unit, - crate_trans_items, - exported_symbols, instances: RefCell::new(FxHashMap()), vtables: RefCell::new(FxHashMap()), const_cstr_cache: RefCell::new(FxHashMap()), @@ -398,7 +352,7 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { statics_to_rauw: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()), lltypes: RefCell::new(FxHashMap()), - int_type: Type::from_ref(ptr::null_mut()), + isize_ty: Type::from_ref(ptr::null_mut()), opaque_vec_type: Type::from_ref(ptr::null_mut()), str_slice_type: Type::from_ref(ptr::null_mut()), dbg_cx, @@ -410,23 +364,23 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { placeholder: PhantomData, }; - let (int_type, opaque_vec_type, str_slice_ty, mut local_ccx) = { + let (isize_ty, opaque_vec_type, str_slice_ty, mut local_ccx) = { // Do a little dance to create a dummy CrateContext, so we can // create some things in the LLVM module of this codegen unit let mut local_ccxs = vec![local_ccx]; - let (int_type, opaque_vec_type, str_slice_ty) = { + let (isize_ty, opaque_vec_type, str_slice_ty) = { let dummy_ccx = LocalCrateContext::dummy_ccx(shared, local_ccxs.as_mut_slice()); let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); str_slice_ty.set_struct_body(&[Type::i8p(&dummy_ccx), - Type::int(&dummy_ccx)], + Type::isize(&dummy_ccx)], false); - (Type::int(&dummy_ccx), Type::opaque_vec(&dummy_ccx), str_slice_ty) + (Type::isize(&dummy_ccx), Type::opaque_vec(&dummy_ccx), str_slice_ty) }; - (int_type, opaque_vec_type, str_slice_ty, local_ccxs.pop().unwrap()) + (isize_ty, opaque_vec_type, str_slice_ty, local_ccxs.pop().unwrap()) }; - local_ccx.int_type = int_type; + local_ccx.isize_ty = isize_ty; local_ccx.opaque_vec_type = opaque_vec_type; local_ccx.str_slice_type = str_slice_ty; @@ -452,7 +406,7 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { } pub fn into_stats(self) -> Stats { - self.stats + self.stats.into_inner() } } @@ -465,7 +419,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { self.local_ccx } - pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + pub fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> { self.shared.tcx } @@ -495,14 +449,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().codegen_unit } - pub fn crate_trans_items(&self) -> &FxHashSet> { - &self.local().crate_trans_items - } - - pub fn exported_symbols(&self) -> &ExportedSymbols { - &self.local().exported_symbols - } - pub fn td(&self) -> llvm::TargetDataRef { unsafe { llvm::LLVMRustGetModuleDataLayout(self.llmod()) } } @@ -512,7 +458,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { } pub fn vtables<'a>(&'a self) - -> &'a RefCell, + -> &'a RefCell, Option>), ValueRef>> { &self.local().vtables } @@ -545,12 +491,12 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().lltypes } - pub fn stats<'a>(&'a self) -> &'a Stats { + pub fn stats<'a>(&'a self) -> &'a RefCell { &self.local().stats } - pub fn int_type(&self) -> Type { - self.local().int_type + pub fn isize_ty(&self) -> Type { + self.local().isize_ty } pub fn str_slice_type(&self) -> Type { @@ -616,7 +562,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { return llpersonality } let tcx = self.tcx(); - let llfn = match tcx.lang_items.eh_personality() { + let llfn = match tcx.lang_items().eh_personality() { Some(def_id) if !base::wants_msvc_seh(self.sess()) => { callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) } @@ -645,7 +591,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { let tcx = self.tcx(); assert!(self.sess().target.target.options.custom_unwind_resume); - if let Some(def_id) = tcx.lang_items.eh_unwind_resume() { + if let Some(def_id) = tcx.lang_items().eh_unwind_resume() { let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); unwresume.set(Some(llfn)); return llfn; diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 9aba075a20..8a89bfee4a 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -366,7 +366,7 @@ fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, -> bool { member_llvm_types.len() == 2 && member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() && - member_llvm_types[1] == cx.int_type() + member_llvm_types[1] == cx.isize_ty() } } @@ -530,7 +530,8 @@ pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, MetadataCreationResult::new(basic_type_metadata(cx, t), false) } ty::TyArray(typ, len) => { - fixed_vec_metadata(cx, unique_type_id, typ, Some(len as u64), usage_site_span) + let len = len.val.to_const_int().unwrap().to_u64().unwrap(); + fixed_vec_metadata(cx, unique_type_id, typ, Some(len), usage_site_span) } ty::TySlice(typ) => { fixed_vec_metadata(cx, unique_type_id, typ, None, usage_site_span) @@ -580,6 +581,16 @@ pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, unique_type_id, usage_site_span).finalize(cx) } + ty::TyGenerator(def_id, substs, _) => { + let upvar_tys : Vec<_> = substs.field_tys(def_id, cx.tcx()).map(|t| { + cx.tcx().normalize_associated_type(&t) + }).collect(); + prepare_tuple_metadata(cx, + t, + &upvar_tys, + unique_type_id, + usage_site_span).finalize(cx) + } ty::TyAdt(def, ..) => match def.adt_kind() { AdtKind::Struct => { prepare_struct_metadata(cx, @@ -811,9 +822,9 @@ pub fn compile_unit_metadata(scc: &SharedCrateContext, let gcov_cu_info = [ path_to_mdstring(debug_context.llcontext, - &scc.output_filenames().with_extension("gcno")), + &scc.tcx().output_filenames(LOCAL_CRATE).with_extension("gcno")), path_to_mdstring(debug_context.llcontext, - &scc.output_filenames().with_extension("gcda")), + &scc.tcx().output_filenames(LOCAL_CRATE).with_extension("gcda")), cu_desc_metadata, ]; let gcov_metadata = llvm::LLVMMDNodeInContext(debug_context.llcontext, @@ -1602,7 +1613,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fn get_enum_discriminant_name(cx: &CrateContext, def_id: DefId) -> InternedString { - cx.tcx().item_name(def_id).as_str() + cx.tcx().item_name(def_id) } } @@ -1792,7 +1803,7 @@ pub fn create_global_var_metadata(cx: &CrateContext, }; let is_local_to_unit = is_node_local_to_unit(cx, node_id); - let variable_type = common::def_ty(cx.shared(), node_def_id, Substs::empty()); + let variable_type = common::def_ty(cx.tcx(), node_def_id, Substs::empty()); let type_metadata = type_metadata(cx, variable_type, span); let var_name = tcx.item_name(node_def_id).to_string(); let linkage_name = mangled_name_of_item(cx, node_def_id, ""); diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 83366c1345..7e2ac95cd8 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -428,7 +428,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // If the method does *not* belong to a trait, proceed if cx.tcx().trait_id_of_impl(impl_def_id).is_none() { let impl_self_ty = - common::def_ty(cx.shared(), impl_def_id, instance.substs); + common::def_ty(cx.tcx(), impl_def_id, instance.substs); // Only "class" methods are generally understood by LLVM, // so avoid methods on other types (e.g. `<*mut T>::null`). diff --git a/src/librustc_trans/debuginfo/type_names.rs b/src/librustc_trans/debuginfo/type_names.rs index 8cb2c2809f..7bf9d39ea2 100644 --- a/src/librustc_trans/debuginfo/type_names.rs +++ b/src/librustc_trans/debuginfo/type_names.rs @@ -96,7 +96,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::TyArray(inner_type, len) => { output.push('['); push_debuginfo_type_name(cx, inner_type, true, output); - output.push_str(&format!("; {}", len)); + output.push_str(&format!("; {}", len.val.to_const_int().unwrap().to_u64().unwrap())); output.push(']'); }, ty::TySlice(inner_type) => { @@ -165,6 +165,9 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::TyClosure(..) => { output.push_str("closure"); } + ty::TyGenerator(..) => { + output.push_str("generator"); + } ty::TyError | ty::TyInfer(_) | ty::TyProjection(..) | @@ -186,7 +189,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, output.push_str(&path_element.data.as_interned_str()); } } else { - output.push_str(&cx.tcx().item_name(def_id).as_str()); + output.push_str(&cx.tcx().item_name(def_id)); } } diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs index 6df509f34a..ad4fdfca72 100644 --- a/src/librustc_trans/debuginfo/utils.rs +++ b/src/librustc_trans/debuginfo/utils.rs @@ -37,7 +37,8 @@ pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool // visible). It might better to use the `exported_items` set from // `driver::CrateAnalysis` in the future, but (atm) this set is not // available in the translation pass. - !cx.exported_symbols().local_exports().contains(&node_id) + let def_id = cx.tcx().hir.local_def_id(node_id); + !cx.tcx().is_exported_symbol(def_id) } #[allow(non_snake_case)] @@ -49,7 +50,7 @@ pub fn create_DIArray(builder: DIBuilderRef, arr: &[DIDescriptor]) -> DIArray { /// Return syntax_pos::Loc corresponding to the beginning of the span pub fn span_start(cx: &CrateContext, span: Span) -> syntax_pos::Loc { - cx.sess().codemap().lookup_char_pos(span.lo) + cx.sess().codemap().lookup_char_pos(span.lo()) } pub fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u32) { diff --git a/src/librustc_trans/declare.rs b/src/librustc_trans/declare.rs index 8f9146283e..3c8ff45499 100644 --- a/src/librustc_trans/declare.rs +++ b/src/librustc_trans/declare.rs @@ -22,7 +22,7 @@ use llvm::{self, ValueRef}; use llvm::AttributePlace::Function; -use rustc::ty; +use rustc::ty::Ty; use rustc::session::config::Sanitizer; use abi::{Abi, FnType}; use attributes; @@ -119,7 +119,7 @@ pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type) -> ValueRef { /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing ValueRef instead. pub fn declare_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, - fn_type: ty::Ty<'tcx>) -> ValueRef { + fn_type: Ty<'tcx>) -> ValueRef { debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); let sig = common::ty_fn_sig(ccx, fn_type); let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); @@ -164,7 +164,7 @@ pub fn define_global(ccx: &CrateContext, name: &str, ty: Type) -> Option(ccx: &CrateContext<'a, 'tcx>, name: &str, - fn_type: ty::Ty<'tcx>) -> ValueRef { + fn_type: Ty<'tcx>) -> ValueRef { if get_defined_value(ccx, name).is_some() { ccx.sess().fatal(&format!("symbol `{}` already defined", name)) } else { @@ -179,7 +179,7 @@ pub fn define_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, /// can happen with #[no_mangle] or #[export_name], for example. pub fn define_internal_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, - fn_type: ty::Ty<'tcx>) -> ValueRef { + fn_type: Ty<'tcx>) -> ValueRef { let llfn = define_fn(ccx, name, fn_type); unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; llfn diff --git a/src/librustc_trans/diagnostics.rs b/src/librustc_trans/diagnostics.rs index df71fd4b19..8485867689 100644 --- a/src/librustc_trans/diagnostics.rs +++ b/src/librustc_trans/diagnostics.rs @@ -46,4 +46,28 @@ extern "platform-intrinsic" { unsafe { simd_add(i32x1(0), i32x1(1)); } // ok! ``` "##, + +E0558: r##" +The `export_name` attribute was malformed. + +Erroneous code example: + +```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) +#[export_name] // error: export_name attribute has invalid format +pub fn something() {} + +fn main() {} +``` + +The `export_name` attribute expects a string in order to determine the name of +the exported symbol. Example: + +``` +#[export_name = "some_function"] // ok! +pub fn something() {} + +fn main() {} +``` +"##, + } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index c2f44c089a..453b98a1d7 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -14,52 +14,15 @@ use std; -use llvm; -use llvm::{ValueRef}; -use rustc::traits; -use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::LayoutTyper; +use builder::Builder; use common::*; +use llvm::{ValueRef}; +use llvm; use meth; use monomorphize; +use rustc::ty::layout::LayoutTyper; +use rustc::ty::{self, Ty}; use value::Value; -use builder::Builder; - -pub fn needs_drop_glue<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> bool { - assert!(t.is_normalized_for_trans()); - - let t = scx.tcx().erase_regions(&t); - - // FIXME (#22815): note that type_needs_drop conservatively - // approximates in some cases and may say a type expression - // requires drop glue when it actually does not. - // - // (In this case it is not clear whether any harm is done, i.e. - // erroneously returning `true` in some cases where we could have - // returned `false` does not appear unsound. The impact on - // code quality is unknown at this time.) - - if !scx.type_needs_drop(t) { - return false; - } - match t.sty { - ty::TyAdt(def, _) if def.is_box() => { - let typ = t.boxed_ty(); - if !scx.type_needs_drop(typ) && scx.type_is_sized(typ) { - let layout = t.layout(scx.tcx(), ty::ParamEnv::empty(traits::Reveal::All)).unwrap(); - if layout.size(scx).bytes() == 0 { - // `Box` does not allocate. - false - } else { - true - } - } else { - true - } - } - _ => true - } -} pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef) -> (ValueRef, ValueRef) { @@ -70,8 +33,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf let align = bcx.ccx.align_of(t); debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", t, Value(info), size, align); - let size = C_uint(bcx.ccx, size); - let align = C_uint(bcx.ccx, align); + let size = C_usize(bcx.ccx, size); + let align = C_usize(bcx.ccx, align as u64); return (size, align); } assert!(!info.is_null()); @@ -96,8 +59,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf }; debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); - let sized_size = C_uint(ccx, sized_size); - let sized_align = C_uint(ccx, sized_align); + let sized_size = C_usize(ccx, sized_size); + let sized_align = C_usize(ccx, sized_align); // Recurse to get the size of the dynamically sized field (must be // the last field). @@ -128,7 +91,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf (Some(sized_align), Some(unsized_align)) => { // If both alignments are constant, (the sized_align should always be), then // pick the correct alignment statically. - C_uint(ccx, std::cmp::max(sized_align, unsized_align) as u64) + C_usize(ccx, std::cmp::max(sized_align, unsized_align) as u64) } _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align), sized_align, @@ -146,7 +109,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf // // `(size + (align-1)) & -align` - let addend = bcx.sub(align, C_uint(bcx.ccx, 1_u64)); + let addend = bcx.sub(align, C_usize(bcx.ccx, 1)); let size = bcx.and(bcx.add(size, addend), bcx.neg(align)); (size, align) @@ -159,8 +122,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf let unit = t.sequence_element_type(bcx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - (bcx.mul(info, C_uint(bcx.ccx, bcx.ccx.size_of(unit))), - C_uint(bcx.ccx, bcx.ccx.align_of(unit))) + (bcx.mul(info, C_usize(bcx.ccx, bcx.ccx.size_of(unit))), + C_usize(bcx.ccx, bcx.ccx.align_of(unit) as u64)) } _ => bug!("Unexpected unsized type, found {}", t) } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 033ef98857..f78d80a197 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -104,7 +104,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let sig = tcx.erase_late_bound_regions_and_normalize(&sig); let arg_tys = sig.inputs(); let ret_ty = sig.output(); - let name = &*tcx.item_name(def_id).as_str(); + let name = &*tcx.item_name(def_id); let llret_ty = type_of::type_of(ccx, ret_ty); @@ -135,7 +135,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "size_of" => { let tp_ty = substs.type_at(0); let lltp_ty = type_of::type_of(ccx, tp_ty); - C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) + C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) } "size_of_val" => { let tp_ty = substs.type_at(0); @@ -145,12 +145,12 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, llsize } else { let lltp_ty = type_of::type_of(ccx, tp_ty); - C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) + C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) } } "min_align_of" => { let tp_ty = substs.type_at(0); - C_uint(ccx, ccx.align_of(tp_ty)) + C_usize(ccx, ccx.align_of(tp_ty) as u64) } "min_align_of_val" => { let tp_ty = substs.type_at(0); @@ -159,13 +159,13 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llalign } else { - C_uint(ccx, ccx.align_of(tp_ty)) + C_usize(ccx, ccx.align_of(tp_ty) as u64) } } "pref_align_of" => { let tp_ty = substs.type_at(0); let lltp_ty = type_of::type_of(ccx, tp_ty); - C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)) + C_usize(ccx, machine::llalign_of_pref(ccx, lltp_ty) as u64) } "type_name" => { let tp_ty = substs.type_at(0); @@ -182,7 +182,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // If we store a zero constant, LLVM will drown in vreg allocation for large data // structures, and the generated code will be awful. (A telltale sign of this is // large quantities of `mov [byte ptr foo],0` in the generated code.) - memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_uint(ccx, 1usize)); + memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_usize(ccx, 1)); } C_nil(ccx) } @@ -383,6 +383,18 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, _ => C_null(llret_ty) } } + + "align_offset" => { + // `ptr as usize` + let ptr_val = bcx.ptrtoint(llargs[0], bcx.ccx.isize_ty()); + // `ptr_val % align` + let offset = bcx.urem(ptr_val, llargs[1]); + let zero = C_null(bcx.ccx.isize_ty()); + // `offset == 0` + let is_zero = bcx.icmp(llvm::IntPredicate::IntEQ, offset, zero); + // `if offset == 0 { 0 } else { offset - align }` + bcx.select(is_zero, zero, bcx.sub(offset, llargs[1])) + } name if name.starts_with("simd_") => { generic_simd_intrinsic(bcx, name, callee_ty, @@ -676,7 +688,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let lltp_ty = type_of::type_of(ccx, tp_ty); let align = C_i32(ccx, ccx.align_of(tp_ty) as i32); let size = machine::llsize_of(ccx, lltp_ty); - let int_size = machine::llbitsize_of_real(ccx, ccx.int_type()); + let int_size = machine::llbitsize_of_real(ccx, ccx.isize_ty()); let operation = if allow_overlap { "memmove" @@ -810,7 +822,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, catchswitch.add_handler(cs, catchpad.llbb()); let tcx = ccx.tcx(); - let tydesc = match tcx.lang_items.msvc_try_filter() { + let tydesc = match tcx.lang_items().msvc_try_filter() { Some(did) => ::consts::get_static(ccx, did), None => bug!("msvc_try_filter not defined"), }; diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 1758e33112..2b1c62c7f1 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -21,7 +21,6 @@ #![feature(box_patterns)] #![feature(box_syntax)] -#![feature(const_fn)] #![feature(custom_attribute)] #![allow(unused_attributes)] #![feature(i128_type)] @@ -31,9 +30,15 @@ #![feature(slice_patterns)] #![feature(conservative_impl_trait)] +#![cfg_attr(stage0, feature(const_fn))] +#![cfg_attr(not(stage0), feature(const_atomic_bool_new))] +#![cfg_attr(not(stage0), feature(const_once_new))] + use rustc::dep_graph::WorkProduct; use syntax_pos::symbol::Symbol; +#[macro_use] +extern crate bitflags; extern crate flate2; extern crate libc; extern crate owning_ref; @@ -45,9 +50,7 @@ extern crate rustc_incremental; extern crate rustc_llvm as llvm; extern crate rustc_platform_intrinsics as intrinsics; extern crate rustc_const_math; -#[macro_use] -#[no_link] -extern crate rustc_bitflags; +extern crate rustc_trans_utils; extern crate rustc_demangle; extern crate jobserver; extern crate num_cpus; @@ -58,16 +61,34 @@ extern crate syntax_pos; extern crate rustc_errors as errors; extern crate serialize; #[cfg(windows)] -extern crate gcc; // Used to locate MSVC, not gcc :) +extern crate cc; // Used to locate MSVC pub use base::trans_crate; -pub use back::symbol_names::provide; pub use metadata::LlvmMetadataLoader; pub use llvm_util::{init, target_features, print_version, print_passes, print, enable_llvm_debug}; +use std::any::Any; +use std::path::PathBuf; +use std::rc::Rc; +use std::sync::mpsc; + +use rustc::dep_graph::DepGraph; +use rustc::hir::def_id::CrateNum; +use rustc::middle::cstore::MetadataLoader; +use rustc::middle::cstore::{NativeLibrary, CrateSource, LibSource}; +use rustc::session::Session; +use rustc::session::config::{OutputFilenames, OutputType}; +use rustc::ty::maps::Providers; +use rustc::ty::{self, TyCtxt}; +use rustc::util::nodemap::{FxHashSet, FxHashMap}; + +mod diagnostics; + pub mod back { mod archive; + mod bytecode; + mod command; pub(crate) mod linker; pub mod link; mod lto; @@ -77,8 +98,6 @@ pub mod back { mod rpath; } -mod diagnostics; - mod abi; mod adt; mod allocator; @@ -128,6 +147,55 @@ mod type_; mod type_of; mod value; +pub struct LlvmTransCrate(()); + +impl LlvmTransCrate { + pub fn new() -> Self { + LlvmTransCrate(()) + } +} + +impl rustc_trans_utils::trans_crate::TransCrate for LlvmTransCrate { + type MetadataLoader = metadata::LlvmMetadataLoader; + type OngoingCrateTranslation = back::write::OngoingCrateTranslation; + type TranslatedCrate = CrateTranslation; + + fn metadata_loader() -> Box { + box metadata::LlvmMetadataLoader + } + + fn provide_local(providers: &mut ty::maps::Providers) { + provide_local(providers); + } + + fn provide_extern(providers: &mut ty::maps::Providers) { + provide_extern(providers); + } + + fn trans_crate<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver> + ) -> Self::OngoingCrateTranslation { + base::trans_crate(tcx, rx) + } + + fn join_trans( + trans: Self::OngoingCrateTranslation, + sess: &Session, + dep_graph: &DepGraph + ) -> Self::TranslatedCrate { + trans.join(sess, dep_graph) + } + + fn link_binary(sess: &Session, trans: &Self::TranslatedCrate, outputs: &OutputFilenames) { + back::link::link_binary(sess, trans, outputs, &trans.crate_name.as_str()); + } + + fn dump_incremental_data(trans: &Self::TranslatedCrate) { + back::write::dump_incremental_data(trans); + } +} + pub struct ModuleTranslation { /// The name of the module. When the crate may be saved between /// compilations, incremental compilation requires that name be @@ -135,12 +203,12 @@ pub struct ModuleTranslation { /// something unique to this crate (e.g., a module path) as well /// as the crate name and disambiguator. name: String, - symbol_name_hash: u64, + llmod_id: String, pub source: ModuleSource, pub kind: ModuleKind, } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq)] pub enum ModuleKind { Regular, Metadata, @@ -148,35 +216,31 @@ pub enum ModuleKind { } impl ModuleTranslation { - pub fn into_compiled_module(self, emit_obj: bool, emit_bc: bool) -> CompiledModule { + pub fn llvm(&self) -> Option<&ModuleLlvm> { + match self.source { + ModuleSource::Translated(ref llvm) => Some(llvm), + ModuleSource::Preexisting(_) => None, + } + } + + pub fn into_compiled_module(self, + emit_obj: bool, + emit_bc: bool, + outputs: &OutputFilenames) -> CompiledModule { let pre_existing = match self.source { ModuleSource::Preexisting(_) => true, ModuleSource::Translated(_) => false, }; + let object = outputs.temp_path(OutputType::Object, Some(&self.name)); CompiledModule { + llmod_id: self.llmod_id, name: self.name.clone(), kind: self.kind, - symbol_name_hash: self.symbol_name_hash, pre_existing, emit_obj, emit_bc, - } - } -} - -impl Drop for ModuleTranslation { - fn drop(&mut self) { - match self.source { - ModuleSource::Preexisting(_) => { - // Nothing to dispose. - }, - ModuleSource::Translated(llvm) => { - unsafe { - llvm::LLVMDisposeModule(llvm.llmod); - llvm::LLVMContextDispose(llvm.llcx); - } - }, + object, } } } @@ -184,14 +248,14 @@ impl Drop for ModuleTranslation { #[derive(Debug)] pub struct CompiledModule { pub name: String, + pub llmod_id: String, + pub object: PathBuf, pub kind: ModuleKind, - pub symbol_name_hash: u64, pub pre_existing: bool, pub emit_obj: bool, pub emit_bc: bool, } -#[derive(Clone)] pub enum ModuleSource { /// Copy the `.o` files or whatever from the incr. comp. directory. Preexisting(WorkProduct), @@ -200,14 +264,25 @@ pub enum ModuleSource { Translated(ModuleLlvm), } -#[derive(Copy, Clone, Debug)] +#[derive(Debug)] pub struct ModuleLlvm { llcx: llvm::ContextRef, pub llmod: llvm::ModuleRef, + tm: llvm::TargetMachineRef, } -unsafe impl Send for ModuleTranslation { } -unsafe impl Sync for ModuleTranslation { } +unsafe impl Send for ModuleLlvm { } +unsafe impl Sync for ModuleLlvm { } + +impl Drop for ModuleLlvm { + fn drop(&mut self) { + unsafe { + llvm::LLVMDisposeModule(self.llmod); + llvm::LLVMContextDispose(self.llcx); + llvm::LLVMRustDisposeTargetMachine(self.tm); + } + } +} pub struct CrateTranslation { pub crate_name: Symbol, @@ -216,7 +291,36 @@ pub struct CrateTranslation { pub link: rustc::middle::cstore::LinkMeta, pub metadata: rustc::middle::cstore::EncodedMetadata, windows_subsystem: Option, - linker_info: back::linker::LinkerInfo + linker_info: back::linker::LinkerInfo, + crate_info: CrateInfo, +} + +// Misc info we load from metadata to persist beyond the tcx +pub struct CrateInfo { + panic_runtime: Option, + compiler_builtins: Option, + profiler_runtime: Option, + sanitizer_runtime: Option, + is_no_builtins: FxHashSet, + native_libraries: FxHashMap>>, + crate_name: FxHashMap, + used_libraries: Rc>, + link_args: Rc>, + used_crate_source: FxHashMap>, + used_crates_static: Vec<(CrateNum, LibSource)>, + used_crates_dynamic: Vec<(CrateNum, LibSource)>, } __build_diagnostic_array! { librustc_trans, DIAGNOSTICS } + +pub fn provide_local(providers: &mut Providers) { + back::symbol_names::provide(providers); + back::symbol_export::provide_local(providers); + base::provide_local(providers); +} + +pub fn provide_extern(providers: &mut Providers) { + back::symbol_names::provide(providers); + back::symbol_export::provide_extern(providers); + base::provide_extern(providers); +} diff --git a/src/librustc_trans/machine.rs b/src/librustc_trans/machine.rs index cd31f02842..bc383abc7e 100644 --- a/src/librustc_trans/machine.rs +++ b/src/librustc_trans/machine.rs @@ -48,7 +48,7 @@ pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef { // there's no need for that contrivance. The instruction // selection DAG generator would flatten that GEP(1) node into a // constant of the type's alloc size, so let's save it some work. - return C_uint(cx, llsize_of_alloc(cx, ty)); + return C_usize(cx, llsize_of_alloc(cx, ty)); } // Returns the preferred alignment of the given type for the current target. diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index f5f9241785..88407947f0 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -18,7 +18,7 @@ use machine; use monomorphize; use type_::Type; use value::Value; -use rustc::ty; +use rustc::ty::{self, Ty}; #[derive(Copy, Clone, Debug)] pub struct VirtualIndex(usize); @@ -46,7 +46,7 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", Value(llvtable), self); - let llvtable = bcx.pointercast(llvtable, Type::int(bcx.ccx).ptr_to()); + let llvtable = bcx.pointercast(llvtable, Type::isize(bcx.ccx).ptr_to()); let ptr = bcx.load(bcx.gepi(llvtable, &[self.0]), None); // Vtable loads are invariant bcx.set_invariant_load(ptr); @@ -63,7 +63,7 @@ impl<'a, 'tcx> VirtualIndex { /// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: ty::Ty<'tcx>, + ty: Ty<'tcx>, trait_ref: Option>) -> ValueRef { @@ -80,9 +80,9 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let nullptr = C_null(Type::nil(ccx).ptr_to()); let mut components: Vec<_> = [ - callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.shared(), ty)), - C_uint(ccx, ccx.size_of(ty)), - C_uint(ccx, ccx.align_of(ty)) + callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.tcx(), ty)), + C_usize(ccx, ccx.size_of(ty)), + C_usize(ccx, ccx.align_of(ty) as u64) ].iter().cloned().collect(); if let Some(trait_ref) = trait_ref { diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 598af1cda9..1017ec6b3c 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -17,6 +17,7 @@ use rustc::middle::const_val::ConstVal; use rustc::mir::{self, Location, TerminatorKind, Literal}; use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; +use rustc::ty; use common; use super::MirContext; @@ -110,11 +111,11 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { mir::TerminatorKind::Call { func: mir::Operand::Constant(box mir::Constant { literal: Literal::Value { - value: ConstVal::Function(def_id, _), .. + value: &ty::Const { val: ConstVal::Function(def_id, _), .. }, .. }, .. }), ref args, .. - } if Some(def_id) == self.cx.ccx.tcx().lang_items.box_free_fn() => { + } if Some(def_id) == self.cx.ccx.tcx().lang_items().box_free_fn() => { // box_free(x) shares with `drop x` the property that it // is not guaranteed to be statically dominated by the // definition of x, so x must always be in an alloca. @@ -134,60 +135,61 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { location: Location) { debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context); - // Allow uses of projections of immediate pair fields. if let mir::Lvalue::Projection(ref proj) = *lvalue { - if let mir::Lvalue::Local(_) = proj.base { - let ty = proj.base.ty(self.cx.mir, self.cx.ccx.tcx()); - - let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); - if common::type_is_imm_pair(self.cx.ccx, ty) { + // Allow uses of projections of immediate pair fields. + if let LvalueContext::Consume = context { + if let mir::Lvalue::Local(_) = proj.base { if let mir::ProjectionElem::Field(..) = proj.elem { - if let LvalueContext::Consume = context { + let ty = proj.base.ty(self.cx.mir, self.cx.ccx.tcx()); + + let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); + if common::type_is_imm_pair(self.cx.ccx, ty) { return; } } } } - } - if let mir::Lvalue::Local(index) = *lvalue { - match context { - LvalueContext::Call => { - self.mark_assigned(index); - } + // A deref projection only reads the pointer, never needs the lvalue. + if let mir::ProjectionElem::Deref = proj.elem { + return self.visit_lvalue(&proj.base, LvalueContext::Consume, location); + } + } - LvalueContext::StorageLive | - LvalueContext::StorageDead | - LvalueContext::Validate | - LvalueContext::Inspect | - LvalueContext::Consume => {} + self.super_lvalue(lvalue, context, location); + } - LvalueContext::Store | - LvalueContext::Borrow { .. } | - LvalueContext::Projection(..) => { - self.mark_as_lvalue(index); - } + fn visit_local(&mut self, + &index: &mir::Local, + context: LvalueContext<'tcx>, + _: Location) { + match context { + LvalueContext::Call => { + self.mark_assigned(index); + } - LvalueContext::Drop => { - let ty = lvalue.ty(self.cx.mir, self.cx.ccx.tcx()); - let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); + LvalueContext::StorageLive | + LvalueContext::StorageDead | + LvalueContext::Validate | + LvalueContext::Inspect | + LvalueContext::Consume => {} - // Only need the lvalue if we're actually dropping it. - if self.cx.ccx.shared().type_needs_drop(ty) { - self.mark_as_lvalue(index); - } - } + LvalueContext::Store | + LvalueContext::Borrow { .. } | + LvalueContext::Projection(..) => { + self.mark_as_lvalue(index); } - } - // A deref projection only reads the pointer, never needs the lvalue. - if let mir::Lvalue::Projection(ref proj) = *lvalue { - if let mir::ProjectionElem::Deref = proj.elem { - return self.visit_lvalue(&proj.base, LvalueContext::Consume, location); + LvalueContext::Drop => { + let ty = mir::Lvalue::Local(index).ty(self.cx.mir, self.cx.ccx.tcx()); + let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); + + // Only need the lvalue if we're actually dropping it. + if self.cx.ccx.shared().type_needs_drop(ty) { + self.mark_as_lvalue(index); + } } } - - self.super_lvalue(lvalue, context, location); } } @@ -216,8 +218,10 @@ pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { + TerminatorKind::SwitchInt { .. } | + TerminatorKind::Yield { .. } => { /* nothing to do */ } TerminatorKind::Call { cleanup: unwind, .. } | diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 28fe1044e7..591aa97466 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -11,8 +11,9 @@ use llvm::{self, ValueRef, BasicBlockRef}; use rustc::middle::lang_items; use rustc::middle::const_val::{ConstEvalErr, ConstInt, ErrKind}; -use rustc::ty::{self, TypeFoldable}; +use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::layout::{self, LayoutTyper}; +use rustc::traits; use rustc::mir; use abi::{Abi, FnType, ArgType}; use adt; @@ -119,7 +120,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn_ty: FnType<'tcx>, fn_ptr: ValueRef, llargs: &[ValueRef], - destination: Option<(ReturnDest, ty::Ty<'tcx>, mir::BasicBlock)>, + destination: Option<(ReturnDest, Ty<'tcx>, mir::BasicBlock)>, cleanup: Option | { if let Some(cleanup) = cleanup { @@ -265,7 +266,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::TerminatorKind::Drop { ref location, target, unwind } => { let ty = location.ty(self.mir, bcx.tcx()).to_ty(bcx.tcx()); let ty = self.monomorphize(&ty); - let drop_fn = monomorphize::resolve_drop_in_place(bcx.ccx.shared(), ty); + let drop_fn = monomorphize::resolve_drop_in_place(bcx.ccx.tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. @@ -330,7 +331,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { self.set_debug_loc(&bcx, terminator.source_info); // Get the location information. - let loc = bcx.sess().codemap().lookup_char_pos(span.lo); + let loc = bcx.sess().codemap().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name).as_str(); let filename = C_str_slice(bcx.ccx, filename); let line = C_u32(bcx.ccx, loc.line as u32); @@ -374,6 +375,27 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { vec![msg_file_line_col], Some(ErrKind::Math(err.clone()))) } + mir::AssertMessage::GeneratorResumedAfterReturn | + mir::AssertMessage::GeneratorResumedAfterPanic => { + let str = if let mir::AssertMessage::GeneratorResumedAfterReturn = *msg { + "generator resumed after completion" + } else { + "generator resumed after panicking" + }; + let msg_str = Symbol::intern(str).as_str(); + let msg_str = C_str_slice(bcx.ccx, msg_str); + let msg_file_line_col = C_struct(bcx.ccx, + &[msg_str, filename, line, col], + false); + let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line_col)); + let msg_file_line_col = consts::addr_of(bcx.ccx, + msg_file_line_col, + align, + "panic_loc"); + (lang_items::PanicFnLangItem, + vec![msg_file_line_col], + None) + } }; // If we know we always panic, and the error message @@ -408,7 +430,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (instance, mut llfn) = match callee.ty.sty { ty::TyFnDef(def_id, substs) => { - (Some(monomorphize::resolve(bcx.ccx.shared(), def_id, substs)), + (Some(ty::Instance::resolve(bcx.ccx.tcx(), + ty::ParamEnv::empty(traits::Reveal::All), + def_id, + substs).unwrap()), None) } ty::TyFnPtr(_) => { @@ -424,7 +449,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Handle intrinsics old trans wants Expr's for, ourselves. let intrinsic = match def { Some(ty::InstanceDef::Intrinsic(def_id)) - => Some(bcx.tcx().item_name(def_id).as_str()), + => Some(bcx.tcx().item_name(def_id)), _ => None }; let intrinsic = intrinsic.as_ref().map(|s| &s[..]); @@ -525,7 +550,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let callee_ty = common::instance_ty( - bcx.ccx.shared(), instance.as_ref().unwrap()); + bcx.ccx.tcx(), instance.as_ref().unwrap()); trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &llargs, dest, terminator.source_info.span); @@ -557,6 +582,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { destination.as_ref().map(|&(_, target)| (ret_dest, sig.output(), target)), cleanup); } + mir::TerminatorKind::GeneratorDrop | + mir::TerminatorKind::Yield { .. } => bug!("generator ops in trans"), } } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 293e6462de..1b8e68f691 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -14,6 +14,7 @@ use rustc_const_math::ConstInt::*; use rustc_const_math::{ConstInt, ConstMathErr}; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; +use rustc::traits; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; @@ -26,11 +27,10 @@ use abi::{self, Abi}; use callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; -use common::{C_array, C_bool, C_bytes, C_integral, C_big_integral, C_u32, C_u64}; -use common::{C_null, C_struct, C_str_slice, C_undef, C_uint, C_vector, is_undef}; +use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u64}; +use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, is_undef}; use common::const_to_opt_u128; use consts; -use monomorphize; use type_of; use type_::Type; use value::Value; @@ -66,35 +66,29 @@ impl<'tcx> Const<'tcx> { -> Const<'tcx> { let tcx = ccx.tcx(); let (llval, ty) = match *ci { - I8(v) => (C_integral(Type::i8(ccx), v as u64, true), tcx.types.i8), - I16(v) => (C_integral(Type::i16(ccx), v as u64, true), tcx.types.i16), - I32(v) => (C_integral(Type::i32(ccx), v as u64, true), tcx.types.i32), - I64(v) => (C_integral(Type::i64(ccx), v as u64, true), tcx.types.i64), + I8(v) => (C_int(Type::i8(ccx), v as i64), tcx.types.i8), + I16(v) => (C_int(Type::i16(ccx), v as i64), tcx.types.i16), + I32(v) => (C_int(Type::i32(ccx), v as i64), tcx.types.i32), + I64(v) => (C_int(Type::i64(ccx), v as i64), tcx.types.i64), I128(v) => (C_big_integral(Type::i128(ccx), v as u128), tcx.types.i128), - Isize(v) => { - let i = v.as_i64(ccx.tcx().sess.target.int_type); - (C_integral(Type::int(ccx), i as u64, true), tcx.types.isize) - }, - U8(v) => (C_integral(Type::i8(ccx), v as u64, false), tcx.types.u8), - U16(v) => (C_integral(Type::i16(ccx), v as u64, false), tcx.types.u16), - U32(v) => (C_integral(Type::i32(ccx), v as u64, false), tcx.types.u32), - U64(v) => (C_integral(Type::i64(ccx), v, false), tcx.types.u64), + Isize(v) => (C_int(Type::isize(ccx), v.as_i64()), tcx.types.isize), + U8(v) => (C_uint(Type::i8(ccx), v as u64), tcx.types.u8), + U16(v) => (C_uint(Type::i16(ccx), v as u64), tcx.types.u16), + U32(v) => (C_uint(Type::i32(ccx), v as u64), tcx.types.u32), + U64(v) => (C_uint(Type::i64(ccx), v), tcx.types.u64), U128(v) => (C_big_integral(Type::i128(ccx), v), tcx.types.u128), - Usize(v) => { - let u = v.as_u64(ccx.tcx().sess.target.uint_type); - (C_integral(Type::int(ccx), u, false), tcx.types.usize) - }, + Usize(v) => (C_uint(Type::isize(ccx), v.as_u64()), tcx.types.usize), }; Const { llval: llval, ty: ty } } /// Translate ConstVal into a LLVM constant value. pub fn from_constval<'a>(ccx: &CrateContext<'a, 'tcx>, - cv: ConstVal, + cv: &ConstVal, ty: Ty<'tcx>) -> Const<'tcx> { let llty = type_of::type_of(ccx, ty); - let val = match cv { + let val = match *cv { ConstVal::Float(v) => { let bits = match v.ty { ast::FloatTy::F32 => C_u32(ccx, v.bits as u32), @@ -105,12 +99,12 @@ impl<'tcx> Const<'tcx> { ConstVal::Bool(v) => C_bool(ccx, v), ConstVal::Integral(ref i) => return Const::from_constint(ccx, i), ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), - ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"), - ConstVal::Char(c) => C_integral(Type::char(ccx), c as u64, false), + ConstVal::ByteStr(v) => consts::addr_of(ccx, C_bytes(ccx, v.data), 1, "byte_str"), + ConstVal::Char(c) => C_uint(Type::char(ccx), c as u64), ConstVal::Function(..) => C_null(type_of::type_of(ccx, ty)), ConstVal::Variant(_) | - ConstVal::Struct(_) | ConstVal::Tuple(_) | - ConstVal::Array(..) | ConstVal::Repeat(..) => { + ConstVal::Aggregate(..) | + ConstVal::Unevaluated(..) => { bug!("MIR must not use `{:?}` (aggregates are expanded to MIR rvalues)", cv) } }; @@ -207,7 +201,9 @@ impl<'tcx> ConstLvalue<'tcx> { pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { match self.ty.sty { - ty::TyArray(_, n) => C_uint(ccx, n), + ty::TyArray(_, n) => { + C_usize(ccx, n.val.to_const_int().unwrap().to_u64().unwrap()) + } ty::TySlice(_) | ty::TyStr => { assert!(self.llextra != ptr::null_mut()); self.llextra @@ -265,7 +261,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { substs: &'tcx Substs<'tcx>, args: IndexVec, ConstEvalErr<'tcx>>>) -> Result, ConstEvalErr<'tcx>> { - let instance = monomorphize::resolve(ccx.shared(), def_id, substs); + let instance = ty::Instance::resolve(ccx.tcx(), + ty::ParamEnv::empty(traits::Reveal::All), + def_id, + substs).unwrap(); let mir = ccx.tcx().instance_mir(instance.def); MirConstContext::new(ccx, &mir, instance.substs, args).trans() } @@ -336,6 +335,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::AssertMessage::Math(ref err) => { ErrKind::Math(err.clone()) } + mir::AssertMessage::GeneratorResumedAfterReturn | + mir::AssertMessage::GeneratorResumedAfterPanic => + span_bug!(span, "{:?} should not appear in constants?", msg), }; let err = ConstEvalErr { span: span, kind: err }; @@ -362,15 +364,15 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } if let Some((ref dest, target)) = *destination { let result = if fn_ty.fn_sig(tcx).abi() == Abi::RustIntrinsic { - match &tcx.item_name(def_id).as_str()[..] { + match &tcx.item_name(def_id)[..] { "size_of" => { - let llval = C_uint(self.ccx, + let llval = C_usize(self.ccx, self.ccx.size_of(substs.type_at(0))); Ok(Const::new(llval, tcx.types.usize)) } "min_align_of" => { - let llval = C_uint(self.ccx, - self.ccx.align_of(substs.type_at(0))); + let llval = C_usize(self.ccx, + self.ccx.align_of(substs.type_at(0)) as u64); Ok(Const::new(llval, tcx.types.usize)) } _ => span_bug!(span, "{:?} in constant", terminator.kind) @@ -468,7 +470,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { }; (Base::Value(llprojected), llextra) } - mir::ProjectionElem::Index(ref index) => { + mir::ProjectionElem::Index(index) => { + let index = &mir::Operand::Consume(mir::Lvalue::Local(index)); let llindex = self.const_operand(index, span)?.llval; let iv = if let Some(iv) = common::const_to_opt_u128(llindex, false) { @@ -510,16 +513,17 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::Operand::Constant(ref constant) => { let ty = self.monomorphize(&constant.ty); match constant.literal.clone() { - mir::Literal::Item { def_id, substs } => { - let substs = self.monomorphize(&substs); - MirConstContext::trans_def(self.ccx, def_id, substs, IndexVec::new()) - } mir::Literal::Promoted { index } => { let mir = &self.mir.promoted[index]; MirConstContext::new(self.ccx, mir, self.substs, IndexVec::new()).trans() } mir::Literal::Value { value } => { - Ok(Const::from_constval(self.ccx, value, ty)) + if let ConstVal::Unevaluated(def_id, substs) = value.val { + let substs = self.monomorphize(&substs); + MirConstContext::trans_def(self.ccx, def_id, substs, IndexVec::new()) + } else { + Ok(Const::from_constval(self.ccx, &value.val, ty)) + } } } } @@ -553,9 +557,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let val = match *rvalue { mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?, - mir::Rvalue::Repeat(ref elem, ref count) => { + mir::Rvalue::Repeat(ref elem, count) => { let elem = self.const_operand(elem, span)?; - let size = count.as_u64(tcx.sess.target.uint_type); + let size = count.as_u64(); + assert_eq!(size as usize as u64, size); let fields = vec![elem.llval; size as usize]; self.const_array(dest_ty, &fields) } @@ -579,6 +584,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } mir::AggregateKind::Adt(..) | mir::AggregateKind::Closure(..) | + mir::AggregateKind::Generator(..) | mir::AggregateKind::Tuple => { Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty) } @@ -605,7 +611,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { match operand.ty.sty { ty::TyClosure(def_id, substs) => { // Get the def_id for FnOnce::call_once - let fn_once = tcx.lang_items.fn_once_trait().unwrap(); + let fn_once = tcx.lang_items().fn_once_trait().unwrap(); let call_once = tcx .global_tcx().associated_items(fn_once) .find(|it| it.kind == ty::AssociatedKind::Method) @@ -831,7 +837,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(self.ccx.shared().type_is_sized(ty)); - let llval = C_uint(self.ccx, self.ccx.size_of(ty)); + let llval = C_usize(self.ccx, self.ccx.size_of(ty)); Const::new(llval, tcx.types.usize) } @@ -849,10 +855,10 @@ fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option { match t.sty { ty::TyInt(int_type) => const_to_opt_u128(value, true) .and_then(|input| ConstInt::new_signed(input as i128, int_type, - tcx.sess.target.int_type)), + tcx.sess.target.isize_ty)), ty::TyUint(uint_type) => const_to_opt_u128(value, false) .and_then(|input| ConstInt::new_unsigned(input, uint_type, - tcx.sess.target.uint_type)), + tcx.sess.target.usize_ty)), _ => None } @@ -958,16 +964,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("trans_constant({:?})", constant); let ty = self.monomorphize(&constant.ty); let result = match constant.literal.clone() { - mir::Literal::Item { def_id, substs } => { - let substs = self.monomorphize(&substs); - MirConstContext::trans_def(bcx.ccx, def_id, substs, IndexVec::new()) - } mir::Literal::Promoted { index } => { let mir = &self.mir.promoted[index]; MirConstContext::new(bcx.ccx, mir, self.param_substs, IndexVec::new()).trans() } mir::Literal::Value { value } => { - Ok(Const::from_constval(bcx.ccx, value, ty)) + if let ConstVal::Unevaluated(def_id, substs) = value.val { + let substs = self.monomorphize(&substs); + MirConstContext::trans_def(bcx.ccx, def_id, substs, IndexVec::new()) + } else { + Ok(Const::from_constval(bcx.ccx, &value.val, ty)) + } } }; @@ -1033,11 +1040,11 @@ fn trans_const<'a, 'tcx>( }; assert_eq!(vals.len(), 0); adt::assert_discr_in_range(min, max, discr); - C_integral(Type::from_integer(ccx, d), discr, true) + C_int(Type::from_integer(ccx, d), discr as i64) } layout::General { discr: d, ref variants, .. } => { let variant = &variants[variant_index]; - let lldiscr = C_integral(Type::from_integer(ccx, d), variant_index as u64, true); + let lldiscr = C_int(Type::from_integer(ccx, d), variant_index as i64); let mut vals_with_discr = vec![lldiscr]; vals_with_discr.extend_from_slice(vals); let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 89c76ccdd2..6799e52904 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -16,7 +16,7 @@ use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use adt; use builder::Builder; -use common::{self, CrateContext, C_uint}; +use common::{self, CrateContext, C_usize}; use consts; use machine; use type_of; @@ -106,7 +106,9 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { let ty = self.ty.to_ty(ccx.tcx()); match ty.sty { - ty::TyArray(_, n) => common::C_uint(ccx, n), + ty::TyArray(_, n) => { + common::C_usize(ccx, n.val.to_const_int().unwrap().to_u64().unwrap()) + } ty::TySlice(_) | ty::TyStr => { assert!(self.llextra != ptr::null_mut()); self.llextra @@ -186,7 +188,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let offset = st.offsets[ix].bytes(); - let unaligned_offset = C_uint(bcx.ccx, offset); + let unaligned_offset = C_usize(bcx.ccx, offset); // Get the alignment of the field let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); @@ -197,7 +199,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); + let align_sub_1 = bcx.sub(align, C_usize(bcx.ccx, 1)); let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), bcx.neg(align)); @@ -276,7 +278,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Slices already point to the array element type. bcx.inbounds_gep(self.llval, &[llindex]) } else { - let zero = common::C_uint(bcx.ccx, 0u64); + let zero = common::C_usize(bcx.ccx, 0); bcx.inbounds_gep(self.llval, &[zero, llindex]) } } @@ -333,7 +335,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; (tr_base.trans_field_ptr(bcx, field.index()), llextra) } - mir::ProjectionElem::Index(ref index) => { + mir::ProjectionElem::Index(index) => { + let index = &mir::Operand::Consume(mir::Lvalue::Local(index)); let index = self.trans_operand(bcx, index); let llindex = self.prepare_index(bcx, index.immediate()); ((tr_base.project_index(bcx, llindex), align), ptr::null_mut()) @@ -341,19 +344,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = C_uint(bcx.ccx, offset); + let lloffset = C_usize(bcx.ccx, offset as u64); ((tr_base.project_index(bcx, lloffset), align), ptr::null_mut()) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = C_uint(bcx.ccx, offset); + let lloffset = C_usize(bcx.ccx, offset as u64); let lllen = tr_base.len(bcx.ccx); let llindex = bcx.sub(lllen, lloffset); ((tr_base.project_index(bcx, llindex), align), ptr::null_mut()) } mir::ProjectionElem::Subslice { from, to } => { - let llbase = tr_base.project_index(bcx, C_uint(bcx.ccx, from)); + let llbase = tr_base.project_index(bcx, C_usize(bcx.ccx, from as u64)); let base_ty = tr_base.ty.to_ty(bcx.tcx()); match base_ty.sty { @@ -368,7 +371,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { ty::TySlice(..) => { assert!(tr_base.llextra != ptr::null_mut()); let lllen = bcx.sub(tr_base.llextra, - C_uint(bcx.ccx, from+to)); + C_usize(bcx.ccx, (from as u64)+(to as u64))); ((llbase, align), lllen) } _ => bug!("unexpected type {:?} in Subslice", base_ty) @@ -396,11 +399,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { /// nmatsakis: is this still necessary? Not sure. fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.int_type()); + let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.isize_ty()); if index_size < int_size { - bcx.zext(llindex, bcx.ccx.int_type()) + bcx.zext(llindex, bcx.ccx.isize_ty()) } else if index_size > int_size { - bcx.trunc(llindex, bcx.ccx.int_type()) + bcx.trunc(llindex, bcx.ccx.isize_ty()) } else { llindex } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index a67fa07032..5206ad74e2 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -129,23 +129,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // In order to have a good line stepping behavior in debugger, we overwrite debug // locations of macro expansions with that of the outermost expansion site // (unless the crate is being compiled with `-Z debug-macros`). - if source_info.span.ctxt == NO_EXPANSION || + if source_info.span.ctxt() == NO_EXPANSION || self.ccx.sess().opts.debugging_opts.debug_macros { - let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo); + let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo()); (scope, source_info.span) } else { // Walk up the macro expansion chain until we reach a non-expanded span. // We also stop at the function body level because no line stepping can occur // at the level above that. let mut span = source_info.span; - while span.ctxt != NO_EXPANSION && span.ctxt != self.mir.span.ctxt { - if let Some(info) = span.ctxt.outer().expn_info() { + while span.ctxt() != NO_EXPANSION && span.ctxt() != self.mir.span.ctxt() { + if let Some(info) = span.ctxt().outer().expn_info() { span = info.call_site; } else { break; } } - let scope = self.scope_metadata_for_loc(source_info.scope, span.lo); + let scope = self.scope_metadata_for_loc(source_info.scope, span.lo()); // Use span of the outermost expansion site, while keeping the original lexical scope. (scope, span) } @@ -524,15 +524,15 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } // Or is it the closure environment? - let (closure_ty, env_ref) = if let ty::TyRef(_, mt) = arg_ty.sty { - (mt.ty, true) - } else { - (arg_ty, false) + let (closure_ty, env_ref) = match arg_ty.sty { + ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (mt.ty, true), + _ => (arg_ty, false) }; - let upvar_tys = if let ty::TyClosure(def_id, substs) = closure_ty.sty { - substs.upvar_tys(def_id, tcx) - } else { - bug!("upvar_decls with non-closure arg0 type `{}`", closure_ty); + + let upvar_tys = match closure_ty.sty { + ty::TyClosure(def_id, substs) | + ty::TyGenerator(def_id, substs, _) => substs.upvar_tys(def_id, tcx), + _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_ty) }; // Store the pointer to closure data in an alloca for debuginfo diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 096f43e44a..822431eba4 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -19,8 +19,7 @@ use rustc::middle::lang_items::ExchangeMallocFnLangItem; use base; use builder::Builder; use callee; -use common::{self, val_ty, C_bool, C_null, C_uint}; -use common::{C_integral, C_i32}; +use common::{self, val_ty, C_bool, C_i32, C_null, C_usize, C_uint}; use adt; use machine; use monomorphize; @@ -92,7 +91,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx } - mir::Rvalue::Repeat(ref elem, ref count) => { + mir::Rvalue::Repeat(ref elem, count) => { let dest_ty = dest.ty.to_ty(bcx.tcx()); // No need to inizialize memory of a zero-sized slice @@ -101,8 +100,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } let tr_elem = self.trans_operand(&bcx, elem); - let size = count.as_u64(bcx.tcx().sess.target.uint_type); - let size = C_uint(bcx.ccx, size); + let size = count.as_u64(); + let size = C_usize(bcx.ccx, size); let base = base::get_dataptr(&bcx, dest.llval); let align = dest.alignment.to_align(); @@ -113,7 +112,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let align = C_i32(bcx.ccx, align as i32); let ty = type_of::type_of(bcx.ccx, dest_ty); let size = machine::llsize_of(bcx.ccx, ty); - let fill = C_integral(Type::i8(bcx.ccx), 0, false); + let fill = C_uint(Type::i8(bcx.ccx), 0); base::call_memset(&bcx, base, fill, size, align, false); return bcx; } @@ -223,7 +222,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { match operand.ty.sty { ty::TyClosure(def_id, substs) => { let instance = monomorphize::resolve_closure( - bcx.ccx.shared(), def_id, substs, ty::ClosureKind::FnOnce); + bcx.ccx.tcx(), def_id, substs, ty::ClosureKind::FnOnce); OperandValue::Immediate(callee::get_fn(bcx.ccx, instance)) } _ => { @@ -301,7 +300,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { base::call_assume(&bcx, bcx.icmp( llvm::IntULE, llval, - C_integral(common::val_ty(llval), max, false) + C_uint(common::val_ty(llval), max) )); } @@ -464,7 +463,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(bcx.ccx.shared().type_is_sized(ty)); - let val = C_uint(bcx.ccx, bcx.ccx.size_of(ty)); + let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty)); let tcx = bcx.tcx(); (bcx, OperandRef { val: OperandValue::Immediate(val), @@ -477,12 +476,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let llty = type_of::type_of(bcx.ccx, content_ty); let llsize = machine::llsize_of(bcx.ccx, llty); let align = bcx.ccx.align_of(content_ty); - let llalign = C_uint(bcx.ccx, align); + let llalign = C_usize(bcx.ccx, align as u64); let llty_ptr = llty.ptr_to(); let box_ty = bcx.tcx().mk_box(content_ty); // Allocate space: - let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) { + let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); @@ -522,7 +521,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let LocalRef::Operand(Some(op)) = self.locals[index] { if common::type_is_zero_size(bcx.ccx, op.ty) { if let ty::TyArray(_, n) = op.ty.sty { - return common::C_uint(bcx.ccx, n); + let n = n.val.to_const_int().unwrap().to_u64().unwrap(); + return common::C_usize(bcx.ccx, n); } } } diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 52dfc8dc4d..bbf661ae9a 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -67,11 +67,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { variant_index as u64); bcx } - mir::StatementKind::StorageLive(ref lvalue) => { - self.trans_storage_liveness(bcx, lvalue, base::Lifetime::Start) + mir::StatementKind::StorageLive(local) => { + self.trans_storage_liveness(bcx, local, base::Lifetime::Start) } - mir::StatementKind::StorageDead(ref lvalue) => { - self.trans_storage_liveness(bcx, lvalue, base::Lifetime::End) + mir::StatementKind::StorageDead(local) => { + self.trans_storage_liveness(bcx, local, base::Lifetime::End) } mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { @@ -94,13 +94,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn trans_storage_liveness(&self, bcx: Builder<'a, 'tcx>, - lvalue: &mir::Lvalue<'tcx>, + index: mir::Local, intrinsic: base::Lifetime) -> Builder<'a, 'tcx> { - if let mir::Lvalue::Local(index) = *lvalue { - if let LocalRef::Lvalue(tr_lval) = self.locals[index] { - intrinsic.call(&bcx, tr_lval.llval); - } + if let LocalRef::Lvalue(tr_lval) = self.locals[index] { + intrinsic.call(&bcx, tr_lval.llval); } bcx } diff --git a/src/librustc_trans/monomorphize.rs b/src/librustc_trans/monomorphize.rs index b0d8be23b0..cd2a881451 100644 --- a/src/librustc_trans/monomorphize.rs +++ b/src/librustc_trans/monomorphize.rs @@ -8,10 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::Abi; -use common::*; -use glue; - use rustc::hir::def_id::DefId; use rustc::middle::lang_items::DropInPlaceFnLangItem; use rustc::traits; @@ -31,7 +27,7 @@ fn fn_once_adapter_instance<'a, 'tcx>( debug!("fn_once_adapter_shim({:?}, {:?})", closure_did, substs); - let fn_once = tcx.lang_items.fn_once_trait().unwrap(); + let fn_once = tcx.lang_items().fn_once_trait().unwrap(); let call_once = tcx.associated_items(fn_once) .find(|it| it.kind == ty::AssociatedKind::Method) .unwrap().def_id; @@ -86,145 +82,43 @@ fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind, } pub fn resolve_closure<'a, 'tcx> ( - scx: &SharedCrateContext<'a, 'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, substs: ty::ClosureSubsts<'tcx>, requested_kind: ty::ClosureKind) -> Instance<'tcx> { - let actual_kind = scx.tcx().closure_kind(def_id); + let actual_kind = tcx.closure_kind(def_id); match needs_fn_once_adapter_shim(actual_kind, requested_kind) { - Ok(true) => fn_once_adapter_instance(scx.tcx(), def_id, substs), + Ok(true) => fn_once_adapter_instance(tcx, def_id, substs), _ => Instance::new(def_id, substs.substs) } } -fn resolve_associated_item<'a, 'tcx>( - scx: &SharedCrateContext<'a, 'tcx>, - trait_item: &ty::AssociatedItem, - trait_id: DefId, - rcvr_substs: &'tcx Substs<'tcx> -) -> Instance<'tcx> { - let tcx = scx.tcx(); - let def_id = trait_item.def_id; - debug!("resolve_associated_item(trait_item={:?}, \ - trait_id={:?}, \ - rcvr_substs={:?})", - def_id, trait_id, rcvr_substs); - - let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs); - let vtbl = tcx.trans_fulfill_obligation(DUMMY_SP, ty::Binder(trait_ref)); - - // Now that we know which impl is being used, we can dispatch to - // the actual function: - match vtbl { - traits::VtableImpl(impl_data) => { - let (def_id, substs) = traits::find_associated_item( - tcx, trait_item, rcvr_substs, &impl_data); - let substs = tcx.erase_regions(&substs); - ty::Instance::new(def_id, substs) - } - traits::VtableClosure(closure_data) => { - let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); - resolve_closure(scx, closure_data.closure_def_id, closure_data.substs, - trait_closure_kind) - } - traits::VtableFnPointer(ref data) => { - Instance { - def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty), - substs: rcvr_substs - } - } - traits::VtableObject(ref data) => { - let index = tcx.get_vtable_index_of_object_method(data, def_id); - Instance { - def: ty::InstanceDef::Virtual(def_id, index), - substs: rcvr_substs - } - } - traits::VtableBuiltin(..) if Some(trait_id) == tcx.lang_items.clone_trait() => { - Instance { - def: ty::InstanceDef::CloneShim(def_id, trait_ref.self_ty()), - substs: rcvr_substs - } - } - _ => { - bug!("static call to invalid vtable: {:?}", vtbl) - } - } -} - -/// The point where linking happens. Resolve a (def_id, substs) -/// pair to an instance. -pub fn resolve<'a, 'tcx>( - scx: &SharedCrateContext<'a, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx> -) -> Instance<'tcx> { - debug!("resolve(def_id={:?}, substs={:?})", - def_id, substs); - let result = if let Some(trait_def_id) = scx.tcx().trait_of_item(def_id) { - debug!(" => associated item, attempting to find impl"); - let item = scx.tcx().associated_item(def_id); - resolve_associated_item(scx, &item, trait_def_id, substs) - } else { - let item_type = def_ty(scx, def_id, substs); - let def = match item_type.sty { - ty::TyFnDef(..) if { - let f = item_type.fn_sig(scx.tcx()); - f.abi() == Abi::RustIntrinsic || - f.abi() == Abi::PlatformIntrinsic - } => - { - debug!(" => intrinsic"); - ty::InstanceDef::Intrinsic(def_id) - } - _ => { - if Some(def_id) == scx.tcx().lang_items.drop_in_place_fn() { - let ty = substs.type_at(0); - if glue::needs_drop_glue(scx, ty) { - debug!(" => nontrivial drop glue"); - ty::InstanceDef::DropGlue(def_id, Some(ty)) - } else { - debug!(" => trivial drop glue"); - ty::InstanceDef::DropGlue(def_id, None) - } - } else { - debug!(" => free item"); - ty::InstanceDef::Item(def_id) - } - } - }; - Instance { def, substs } - }; - debug!("resolve(def_id={:?}, substs={:?}) = {}", - def_id, substs, result); - result -} - pub fn resolve_drop_in_place<'a, 'tcx>( - scx: &SharedCrateContext<'a, 'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> ty::Instance<'tcx> { - let def_id = scx.tcx().require_lang_item(DropInPlaceFnLangItem); - let substs = scx.tcx().intern_substs(&[Kind::from(ty)]); - resolve(scx, def_id, substs) + let def_id = tcx.require_lang_item(DropInPlaceFnLangItem); + let substs = tcx.intern_substs(&[Kind::from(ty)]); + Instance::resolve(tcx, ty::ParamEnv::empty(traits::Reveal::All), def_id, substs).unwrap() } -pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx>, - source_ty: Ty<'tcx>, - target_ty: Ty<'tcx>) - -> CustomCoerceUnsized { +pub fn custom_coerce_unsize_info<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + source_ty: Ty<'tcx>, + target_ty: Ty<'tcx>) + -> CustomCoerceUnsized { let trait_ref = ty::Binder(ty::TraitRef { - def_id: scx.tcx().lang_items.coerce_unsized_trait().unwrap(), - substs: scx.tcx().mk_substs_trait(source_ty, &[target_ty]) + def_id: tcx.lang_items().coerce_unsized_trait().unwrap(), + substs: tcx.mk_substs_trait(source_ty, &[target_ty]) }); - match scx.tcx().trans_fulfill_obligation(DUMMY_SP, trait_ref) { + match tcx.trans_fulfill_obligation( + DUMMY_SP, ty::ParamEnv::empty(traits::Reveal::All), trait_ref) { traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => { - scx.tcx().coerce_unsized_info(impl_def_id).custom_kind.unwrap() + tcx.coerce_unsized_info(impl_def_id).custom_kind.unwrap() } vtable => { bug!("invalid CoerceUnsized vtable: {:?}", vtable); diff --git a/src/librustc_trans/partitioning.rs b/src/librustc_trans/partitioning.rs index 26256fa78d..386806e4c9 100644 --- a/src/librustc_trans/partitioning.rs +++ b/src/librustc_trans/partitioning.rs @@ -102,24 +102,21 @@ //! source-level module, functions from the same module will be available for //! inlining, even when they are not marked #[inline]. -use back::symbol_export::ExportedSymbols; use collector::InliningMap; use common; -use context::SharedCrateContext; -use llvm; use rustc::dep_graph::{DepNode, WorkProductId}; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; -use rustc::session::config::NUMBERED_CODEGEN_UNIT_MARKER; +use rustc::middle::trans::{Linkage, Visibility}; use rustc::ty::{self, TyCtxt, InstanceDef}; use rustc::ty::item_path::characteristic_def_id_of_type; use rustc::util::nodemap::{FxHashMap, FxHashSet}; -use rustc_incremental::IchHasher; use std::collections::hash_map::Entry; -use std::hash::Hash; use syntax::ast::NodeId; use syntax::symbol::{Symbol, InternedString}; -use trans_item::{TransItem, InstantiationMode}; +use trans_item::{TransItem, TransItemExt, InstantiationMode}; + +pub use rustc::middle::trans::CodegenUnit; pub enum PartitioningStrategy { /// Generate one codegen unit per source-level module. @@ -129,69 +126,35 @@ pub enum PartitioningStrategy { FixedUnitCount(usize) } -pub struct CodegenUnit<'tcx> { - /// A name for this CGU. Incremental compilation requires that - /// name be unique amongst **all** crates. Therefore, it should - /// contain something unique to this crate (e.g., a module path) - /// as well as the crate name and disambiguator. - name: InternedString, - - items: FxHashMap, (llvm::Linkage, llvm::Visibility)>, -} - -impl<'tcx> CodegenUnit<'tcx> { - pub fn new(name: InternedString, - items: FxHashMap, (llvm::Linkage, llvm::Visibility)>) - -> Self { - CodegenUnit { - name, - items, - } - } - - pub fn empty(name: InternedString) -> Self { - Self::new(name, FxHashMap()) - } +pub trait CodegenUnitExt<'tcx> { + fn as_codegen_unit(&self) -> &CodegenUnit<'tcx>; - pub fn contains_item(&self, item: &TransItem<'tcx>) -> bool { - self.items.contains_key(item) + fn contains_item(&self, item: &TransItem<'tcx>) -> bool { + self.items().contains_key(item) } - pub fn name(&self) -> &str { - &self.name + fn name<'a>(&'a self) -> &'a InternedString + where 'tcx: 'a, + { + &self.as_codegen_unit().name() } - pub fn items(&self) -> &FxHashMap, (llvm::Linkage, llvm::Visibility)> { - &self.items + fn items(&self) -> &FxHashMap, (Linkage, Visibility)> { + &self.as_codegen_unit().items() } - pub fn work_product_id(&self) -> WorkProductId { + fn work_product_id(&self) -> WorkProductId { WorkProductId::from_cgu_name(self.name()) } - pub fn work_product_dep_node(&self) -> DepNode { + fn work_product_dep_node(&self) -> DepNode { self.work_product_id().to_dep_node() } - pub fn compute_symbol_name_hash<'a>(&self, - scx: &SharedCrateContext<'a, 'tcx>) - -> u64 { - let mut state = IchHasher::new(); - let all_items = self.items_in_deterministic_order(scx.tcx()); - for (item, (linkage, visibility)) in all_items { - let symbol_name = item.symbol_name(scx.tcx()); - symbol_name.len().hash(&mut state); - symbol_name.hash(&mut state); - linkage.hash(&mut state); - visibility.hash(&mut state); - } - state.finish().to_smaller_hash() - } - - pub fn items_in_deterministic_order<'a>(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> Vec<(TransItem<'tcx>, - (llvm::Linkage, llvm::Visibility))> { + fn items_in_deterministic_order<'a>(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> Vec<(TransItem<'tcx>, + (Linkage, Visibility))> { // The codegen tests rely on items being process in the same order as // they appear in the file, so for local items, we sort by node_id first #[derive(PartialEq, Eq, PartialOrd, Ord)] @@ -200,16 +163,33 @@ impl<'tcx> CodegenUnit<'tcx> { fn item_sort_key<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: TransItem<'tcx>) -> ItemSortKey { ItemSortKey(match item { - TransItem::Fn(instance) => { - tcx.hir.as_local_node_id(instance.def_id()) + TransItem::Fn(ref instance) => { + match instance.def { + // We only want to take NodeIds of user-defined + // instances into account. The others don't matter for + // the codegen tests and can even make item order + // unstable. + InstanceDef::Item(def_id) => { + tcx.hir.as_local_node_id(def_id) + } + InstanceDef::Intrinsic(..) | + InstanceDef::FnPtrShim(..) | + InstanceDef::Virtual(..) | + InstanceDef::ClosureOnceShim { .. } | + InstanceDef::DropGlue(..) | + InstanceDef::CloneShim(..) => { + None + } + } } - TransItem::Static(node_id) | TransItem::GlobalAsm(node_id) => { + TransItem::Static(node_id) | + TransItem::GlobalAsm(node_id) => { Some(node_id) } }, item.symbol_name(tcx)) } - let items: Vec<_> = self.items.iter().map(|(&i, &l)| (i, l)).collect(); + let items: Vec<_> = self.items().iter().map(|(&i, &l)| (i, l)).collect(); let mut items : Vec<_> = items.iter() .map(|il| (il, item_sort_key(tcx, il.0))).collect(); items.sort_by(|&(_, ref key1), &(_, ref key2)| key1.cmp(key2)); @@ -217,25 +197,26 @@ impl<'tcx> CodegenUnit<'tcx> { } } +impl<'tcx> CodegenUnitExt<'tcx> for CodegenUnit<'tcx> { + fn as_codegen_unit(&self) -> &CodegenUnit<'tcx> { + self + } +} // Anything we can't find a proper codegen unit for goes into this. const FALLBACK_CODEGEN_UNIT: &'static str = "__rustc_fallback_codegen_unit"; -pub fn partition<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, +pub fn partition<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trans_items: I, strategy: PartitioningStrategy, - inlining_map: &InliningMap<'tcx>, - exported_symbols: &ExportedSymbols) + inlining_map: &InliningMap<'tcx>) -> Vec> where I: Iterator> { - let tcx = scx.tcx(); - // In the first step, we place all regular translation items into their // respective 'home' codegen unit. Regular translation items are all // functions and statics defined in the local crate. - let mut initial_partitioning = place_root_translation_items(scx, - exported_symbols, + let mut initial_partitioning = place_root_translation_items(tcx, trans_items); debug_dump(tcx, "INITIAL PARTITIONING:", initial_partitioning.codegen_units.iter()); @@ -269,13 +250,13 @@ pub fn partition<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, } = post_inlining; result.sort_by(|cgu1, cgu2| { - (&cgu1.name[..]).cmp(&cgu2.name[..]) + cgu1.name().cmp(cgu2.name()) }); - if scx.sess().opts.enable_dep_node_debug_strs() { + if tcx.sess.opts.enable_dep_node_debug_strs() { for cgu in &result { let dep_node = cgu.work_product_dep_node(); - scx.tcx().dep_graph.register_dep_node_debug_str(dep_node, + tcx.dep_graph.register_dep_node_debug_str(dep_node, || cgu.name().to_string()); } } @@ -304,89 +285,85 @@ struct PostInliningPartitioning<'tcx> { internalization_candidates: FxHashSet>, } -fn place_root_translation_items<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, - exported_symbols: &ExportedSymbols, +fn place_root_translation_items<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trans_items: I) -> PreInliningPartitioning<'tcx> where I: Iterator> { - let tcx = scx.tcx(); - let exported_symbols = exported_symbols.local_exports(); - let mut roots = FxHashSet(); let mut codegen_units = FxHashMap(); let is_incremental_build = tcx.sess.opts.incremental.is_some(); let mut internalization_candidates = FxHashSet(); for trans_item in trans_items { - let is_root = trans_item.instantiation_mode(tcx) == InstantiationMode::GloballyShared; + match trans_item.instantiation_mode(tcx) { + InstantiationMode::GloballyShared { .. } => {} + InstantiationMode::LocalCopy => continue, + } - if is_root { - let characteristic_def_id = characteristic_def_id_of_trans_item(scx, trans_item); - let is_volatile = is_incremental_build && - trans_item.is_generic_fn(); + let characteristic_def_id = characteristic_def_id_of_trans_item(tcx, trans_item); + let is_volatile = is_incremental_build && + trans_item.is_generic_fn(); - let codegen_unit_name = match characteristic_def_id { - Some(def_id) => compute_codegen_unit_name(tcx, def_id, is_volatile), - None => Symbol::intern(FALLBACK_CODEGEN_UNIT).as_str(), - }; + let codegen_unit_name = match characteristic_def_id { + Some(def_id) => compute_codegen_unit_name(tcx, def_id, is_volatile), + None => Symbol::intern(FALLBACK_CODEGEN_UNIT).as_str(), + }; - let make_codegen_unit = || { - CodegenUnit::empty(codegen_unit_name.clone()) - }; + let make_codegen_unit = || { + CodegenUnit::new(codegen_unit_name.clone()) + }; - let codegen_unit = codegen_units.entry(codegen_unit_name.clone()) - .or_insert_with(make_codegen_unit); - - let (linkage, visibility) = match trans_item.explicit_linkage(tcx) { - Some(explicit_linkage) => (explicit_linkage, llvm::Visibility::Default), - None => { - match trans_item { - TransItem::Fn(ref instance) => { - let visibility = match instance.def { - InstanceDef::Item(def_id) => { - if let Some(node_id) = tcx.hir.as_local_node_id(def_id) { - if exported_symbols.contains(&node_id) { - llvm::Visibility::Default - } else { - internalization_candidates.insert(trans_item); - llvm::Visibility::Hidden - } + let codegen_unit = codegen_units.entry(codegen_unit_name.clone()) + .or_insert_with(make_codegen_unit); + + let (linkage, visibility) = match trans_item.explicit_linkage(tcx) { + Some(explicit_linkage) => (explicit_linkage, Visibility::Default), + None => { + match trans_item { + TransItem::Fn(ref instance) => { + let visibility = match instance.def { + InstanceDef::Item(def_id) => { + if def_id.is_local() { + if tcx.is_exported_symbol(def_id) { + Visibility::Default } else { - internalization_candidates.insert(trans_item); - llvm::Visibility::Hidden + Visibility::Hidden } + } else { + Visibility::Hidden } - InstanceDef::FnPtrShim(..) | - InstanceDef::Virtual(..) | - InstanceDef::Intrinsic(..) | - InstanceDef::ClosureOnceShim { .. } | - InstanceDef::DropGlue(..) | - InstanceDef::CloneShim(..) => { - bug!("partitioning: Encountered unexpected - root translation item: {:?}", - trans_item) - } - }; - (llvm::ExternalLinkage, visibility) - } - TransItem::Static(node_id) | - TransItem::GlobalAsm(node_id) => { - let visibility = if exported_symbols.contains(&node_id) { - llvm::Visibility::Default - } else { - internalization_candidates.insert(trans_item); - llvm::Visibility::Hidden - }; - (llvm::ExternalLinkage, visibility) - } + } + InstanceDef::FnPtrShim(..) | + InstanceDef::Virtual(..) | + InstanceDef::Intrinsic(..) | + InstanceDef::ClosureOnceShim { .. } | + InstanceDef::DropGlue(..) | + InstanceDef::CloneShim(..) => { + Visibility::Hidden + } + }; + (Linkage::External, visibility) + } + TransItem::Static(node_id) | + TransItem::GlobalAsm(node_id) => { + let def_id = tcx.hir.local_def_id(node_id); + let visibility = if tcx.is_exported_symbol(def_id) { + Visibility::Default + } else { + Visibility::Hidden + }; + (Linkage::External, visibility) } } - }; - - codegen_unit.items.insert(trans_item, (linkage, visibility)); - roots.insert(trans_item); + } + }; + if visibility == Visibility::Hidden { + internalization_candidates.insert(trans_item); } + + codegen_unit.items_mut().insert(trans_item, (linkage, visibility)); + roots.insert(trans_item); } // always ensure we have at least one CGU; otherwise, if we have a @@ -394,7 +371,7 @@ fn place_root_translation_items<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, if codegen_units.is_empty() { let codegen_unit_name = Symbol::intern(FALLBACK_CODEGEN_UNIT).as_str(); codegen_units.insert(codegen_unit_name.clone(), - CodegenUnit::empty(codegen_unit_name.clone())); + CodegenUnit::new(codegen_unit_name.clone())); } PreInliningPartitioning { @@ -417,26 +394,17 @@ fn merge_codegen_units<'tcx>(initial_partitioning: &mut PreInliningPartitioning< // translation items in a given unit. This could be improved on. while codegen_units.len() > target_cgu_count { // Sort small cgus to the back - codegen_units.sort_by_key(|cgu| -(cgu.items.len() as i64)); - let smallest = codegen_units.pop().unwrap(); + codegen_units.sort_by_key(|cgu| -(cgu.items().len() as i64)); + let mut smallest = codegen_units.pop().unwrap(); let second_smallest = codegen_units.last_mut().unwrap(); - for (k, v) in smallest.items.into_iter() { - second_smallest.items.insert(k, v); + for (k, v) in smallest.items_mut().drain() { + second_smallest.items_mut().insert(k, v); } } for (index, cgu) in codegen_units.iter_mut().enumerate() { - cgu.name = numbered_codegen_unit_name(crate_name, index); - } - - // If the initial partitioning contained less than target_cgu_count to begin - // with, we won't have enough codegen units here, so add a empty units until - // we reach the target count - while codegen_units.len() < target_cgu_count { - let index = codegen_units.len(); - codegen_units.push( - CodegenUnit::empty(numbered_codegen_unit_name(crate_name, index))); + cgu.set_name(numbered_codegen_unit_name(crate_name, index)); } } @@ -457,20 +425,17 @@ fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartit for old_codegen_unit in initial_cgus { // Collect all items that need to be available in this codegen unit let mut reachable = FxHashSet(); - for root in old_codegen_unit.items.keys() { + for root in old_codegen_unit.items().keys() { follow_inlining(*root, inlining_map, &mut reachable); } - let mut new_codegen_unit = CodegenUnit { - name: old_codegen_unit.name, - items: FxHashMap(), - }; + let mut new_codegen_unit = CodegenUnit::new(old_codegen_unit.name().clone()); // Add all translation items that are not already there for trans_item in reachable { - if let Some(linkage) = old_codegen_unit.items.get(&trans_item) { + if let Some(linkage) = old_codegen_unit.items().get(&trans_item) { // This is a root, just copy it over - new_codegen_unit.items.insert(trans_item, *linkage); + new_codegen_unit.items_mut().insert(trans_item, *linkage); } else { if roots.contains(&trans_item) { bug!("GloballyShared trans-item inlined into other CGU: \ @@ -478,8 +443,10 @@ fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartit } // This is a cgu-private copy - new_codegen_unit.items.insert(trans_item, - (llvm::InternalLinkage, llvm::Visibility::Default)); + new_codegen_unit.items_mut().insert( + trans_item, + (Linkage::Internal, Visibility::Default), + ); } if !single_codegen_unit { @@ -490,7 +457,7 @@ fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartit let placement = e.into_mut(); debug_assert!(match *placement { TransItemPlacement::SingleCgu { ref cgu_name } => { - *cgu_name != new_codegen_unit.name + *cgu_name != *new_codegen_unit.name() } TransItemPlacement::MultipleCgus => true, }); @@ -498,7 +465,7 @@ fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartit } Entry::Vacant(e) => { e.insert(TransItemPlacement::SingleCgu { - cgu_name: new_codegen_unit.name.clone() + cgu_name: new_codegen_unit.name().clone() }); } } @@ -536,8 +503,8 @@ fn internalize_symbols<'a, 'tcx>(_tcx: TyCtxt<'a, 'tcx, 'tcx>, // could be accessed from. for cgu in &mut partitioning.codegen_units { for candidate in &partitioning.internalization_candidates { - cgu.items.insert(*candidate, (llvm::InternalLinkage, - llvm::Visibility::Default)); + cgu.items_mut().insert(*candidate, + (Linkage::Internal, Visibility::Default)); } } @@ -561,10 +528,10 @@ fn internalize_symbols<'a, 'tcx>(_tcx: TyCtxt<'a, 'tcx, 'tcx>, // accessed from outside its defining codegen unit. for cgu in &mut partitioning.codegen_units { let home_cgu = TransItemPlacement::SingleCgu { - cgu_name: cgu.name.clone() + cgu_name: cgu.name().clone() }; - for (accessee, linkage_and_visibility) in &mut cgu.items { + for (accessee, linkage_and_visibility) in cgu.items_mut() { if !partitioning.internalization_candidates.contains(accessee) { // This item is no candidate for internalizing, so skip it. continue @@ -587,15 +554,14 @@ fn internalize_symbols<'a, 'tcx>(_tcx: TyCtxt<'a, 'tcx, 'tcx>, // If we got here, we did not find any accesses from other CGUs, // so it's fine to make this translation item internal. - *linkage_and_visibility = (llvm::InternalLinkage, llvm::Visibility::Default); + *linkage_and_visibility = (Linkage::Internal, Visibility::Default); } } } -fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, +fn characteristic_def_id_of_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trans_item: TransItem<'tcx>) -> Option { - let tcx = scx.tcx(); match trans_item { TransItem::Fn(instance) => { let def_id = match instance.def { @@ -621,7 +587,7 @@ fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 't if let Some(impl_def_id) = tcx.impl_of_method(def_id) { // This is a method within an inherent impl, find out what the // self-type is: - let impl_self_ty = common::def_ty(scx, impl_def_id, instance.substs); + let impl_self_ty = common::def_ty(tcx, impl_def_id, instance.substs); if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) { return Some(def_id); } @@ -667,7 +633,7 @@ fn compute_codegen_unit_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } fn numbered_codegen_unit_name(crate_name: &str, index: usize) -> InternedString { - Symbol::intern(&format!("{}{}{}", crate_name, NUMBERED_CODEGEN_UNIT_MARKER, index)).as_str() + Symbol::intern(&format!("{}{}", crate_name, index)).as_str() } fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -679,9 +645,9 @@ fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if cfg!(debug_assertions) { debug!("{}", label); for cgu in cgus { - debug!("CodegenUnit {}:", cgu.name); + debug!("CodegenUnit {}:", cgu.name()); - for (trans_item, linkage) in &cgu.items { + for (trans_item, linkage) in cgu.items() { let symbol_name = trans_item.symbol_name(tcx); let symbol_hash_start = symbol_name.rfind('h'); let symbol_hash = symbol_hash_start.map(|i| &symbol_name[i ..]) diff --git a/src/librustc_trans/time_graph.rs b/src/librustc_trans/time_graph.rs index ead6e43256..a8502682a8 100644 --- a/src/librustc_trans/time_graph.rs +++ b/src/librustc_trans/time_graph.rs @@ -9,21 +9,24 @@ // except according to those terms. use std::collections::HashMap; +use std::fs::File; +use std::io::prelude::*; use std::marker::PhantomData; +use std::mem; use std::sync::{Arc, Mutex}; use std::time::Instant; -use std::io::prelude::*; -use std::fs::File; const OUTPUT_WIDTH_IN_PX: u64 = 1000; -const TIME_LINE_HEIGHT_IN_PX: u64 = 7; -const TIME_LINE_HEIGHT_STRIDE_IN_PX: usize = 10; +const TIME_LINE_HEIGHT_IN_PX: u64 = 20; +const TIME_LINE_HEIGHT_STRIDE_IN_PX: usize = 30; #[derive(Clone)] struct Timing { start: Instant, end: Instant, work_package_kind: WorkPackageKind, + name: String, + events: Vec<(String, Instant)>, } #[derive(Clone, Copy, Hash, Eq, PartialEq, Debug)] @@ -32,7 +35,7 @@ pub struct TimelineId(pub usize); #[derive(Clone)] struct PerThread { timings: Vec, - open_work_package: Option<(Instant, WorkPackageKind)>, + open_work_package: Option<(Instant, WorkPackageKind, String)>, } #[derive(Clone)] @@ -43,9 +46,14 @@ pub struct TimeGraph { #[derive(Clone, Copy)] pub struct WorkPackageKind(pub &'static [&'static str]); -pub struct RaiiToken { +pub struct Timeline { + token: Option, +} + +struct RaiiToken { graph: TimeGraph, timeline: TimelineId, + events: Vec<(String, Instant)>, // The token must not be Send: _marker: PhantomData<*const ()> } @@ -53,7 +61,7 @@ pub struct RaiiToken { impl Drop for RaiiToken { fn drop(&mut self) { - self.graph.end(self.timeline); + self.graph.end(self.timeline, mem::replace(&mut self.events, Vec::new())); } } @@ -66,7 +74,8 @@ impl TimeGraph { pub fn start(&self, timeline: TimelineId, - work_package_kind: WorkPackageKind) -> RaiiToken { + work_package_kind: WorkPackageKind, + name: &str) -> Timeline { { let mut table = self.data.lock().unwrap(); @@ -76,33 +85,36 @@ impl TimeGraph { }); assert!(data.open_work_package.is_none()); - data.open_work_package = Some((Instant::now(), work_package_kind)); + data.open_work_package = Some((Instant::now(), work_package_kind, name.to_string())); } - RaiiToken { - graph: self.clone(), - timeline, - _marker: PhantomData, + Timeline { + token: Some(RaiiToken { + graph: self.clone(), + timeline, + events: Vec::new(), + _marker: PhantomData, + }), } } - fn end(&self, timeline: TimelineId) { + fn end(&self, timeline: TimelineId, events: Vec<(String, Instant)>) { let end = Instant::now(); let mut table = self.data.lock().unwrap(); let data = table.get_mut(&timeline).unwrap(); - if let Some((start, work_package_kind)) = data.open_work_package { + if let Some((start, work_package_kind, name)) = data.open_work_package.take() { data.timings.push(Timing { start, end, work_package_kind, + name, + events, }); } else { bug!("end timing without start?") } - - data.open_work_package = None; } pub fn dump(&self, output_filename: &str) { @@ -112,13 +124,13 @@ impl TimeGraph { assert!(data.open_work_package.is_none()); } - let mut timelines: Vec = + let mut threads: Vec = table.values().map(|data| data.clone()).collect(); - timelines.sort_by_key(|timeline| timeline.timings[0].start); + threads.sort_by_key(|timeline| timeline.timings[0].start); - let earliest_instant = timelines[0].timings[0].start; - let latest_instant = timelines.iter() + let earliest_instant = threads[0].timings[0].start; + let latest_instant = threads.iter() .map(|timeline| timeline.timings .last() .unwrap() @@ -129,16 +141,46 @@ impl TimeGraph { let mut file = File::create(format!("{}.html", output_filename)).unwrap(); - writeln!(file, "").unwrap(); - writeln!(file, "").unwrap(); - writeln!(file, "").unwrap(); + writeln!(file, " + + + + + +

+ ").unwrap(); + + let mut idx = 0; + for thread in threads.iter() { + for timing in &thread.timings { + let colors = timing.work_package_kind.0; + let height = TIME_LINE_HEIGHT_STRIDE_IN_PX * timing.events.len(); + writeln!(file, "
", + idx, + colors[idx % colors.len()], + height).unwrap(); + idx += 1; + let max = distance(timing.start, timing.end); + for (i, &(ref event, time)) in timing.events.iter().enumerate() { + let i = i as u64; + let time = distance(timing.start, time); + let at = normalize(time, max, OUTPUT_WIDTH_IN_PX); + writeln!(file, "{}", + at, + TIME_LINE_HEIGHT_IN_PX * i, + event).unwrap(); + } + writeln!(file, "
").unwrap(); + } + } + + writeln!(file, " + + + ").unwrap(); + } +} + +impl Timeline { + pub fn noop() -> Timeline { + Timeline { token: None } + } + + /// Record an event which happened at this moment on this timeline. + /// + /// Events are displayed in the eventual HTML output where you can click on + /// a particular timeline and it'll expand to all of the events that + /// happened on that timeline. This can then be used to drill into a + /// particular timeline and see what events are happening and taking the + /// most time. + pub fn record(&mut self, name: &str) { + if let Some(ref mut token) = self.token { + token.events.push((name.to_string(), Instant::now())); + } } } diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs index 38232ed1d1..e40b1617d0 100644 --- a/src/librustc_trans/trans_item.rs +++ b/src/librustc_trans/trans_item.rs @@ -25,45 +25,56 @@ use llvm; use monomorphize::Instance; use rustc::hir; use rustc::hir::def_id::DefId; +use rustc::middle::trans::{Linkage, Visibility}; +use rustc::session::config::OptLevel; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::subst::{Subst, Substs}; -use syntax::ast::{self, NodeId}; +use syntax::ast; use syntax::attr; use syntax_pos::Span; use syntax_pos::symbol::Symbol; use type_of; -use std::fmt::Write; +use std::fmt::{self, Write}; use std::iter; -#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] -pub enum TransItem<'tcx> { - Fn(Instance<'tcx>), - Static(NodeId), - GlobalAsm(NodeId), -} +pub use rustc::middle::trans::TransItem; /// Describes how a translation item will be instantiated in object files. #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] pub enum InstantiationMode { /// There will be exactly one instance of the given TransItem. It will have /// external linkage so that it can be linked to from other codegen units. - GloballyShared, + GloballyShared { + /// In some compilation scenarios we may decide to take functions that + /// are typically `LocalCopy` and instead move them to `GloballyShared` + /// to avoid translating them a bunch of times. In this situation, + /// however, our local copy may conflict with other crates also + /// inlining the same function. + /// + /// This flag indicates that this situation is occuring, and informs + /// symbol name calculation that some extra mangling is needed to + /// avoid conflicts. Note that this may eventually go away entirely if + /// ThinLTO enables us to *always* have a globally shared instance of a + /// function within one crate's compilation. + may_conflict: bool, + }, /// Each codegen unit containing a reference to the given TransItem will /// have its own private copy of the function (with internal linkage). LocalCopy, } -impl<'a, 'tcx> TransItem<'tcx> { +pub trait TransItemExt<'a, 'tcx>: fmt::Debug { + fn as_trans_item(&self) -> &TransItem<'tcx>; - pub fn define(&self, ccx: &CrateContext<'a, 'tcx>) { + fn define(&self, ccx: &CrateContext<'a, 'tcx>) { debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(ccx.tcx()), - self.to_raw_string(), - ccx.codegen_unit().name()); + self.to_string(ccx.tcx()), + self.to_raw_string(), + ccx.codegen_unit().name()); - match *self { + match *self.as_trans_item() { TransItem::Static(node_id) => { let tcx = ccx.tcx(); let item = tcx.hir.expect_item(node_id); @@ -97,10 +108,10 @@ impl<'a, 'tcx> TransItem<'tcx> { ccx.codegen_unit().name()); } - pub fn predefine(&self, - ccx: &CrateContext<'a, 'tcx>, - linkage: llvm::Linkage, - visibility: llvm::Visibility) { + fn predefine(&self, + ccx: &CrateContext<'a, 'tcx>, + linkage: Linkage, + visibility: Visibility) { debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", self.to_string(ccx.tcx()), self.to_raw_string(), @@ -110,12 +121,12 @@ impl<'a, 'tcx> TransItem<'tcx> { debug!("symbol {}", &symbol_name); - match *self { + match *self.as_trans_item() { TransItem::Static(node_id) => { - TransItem::predefine_static(ccx, node_id, linkage, visibility, &symbol_name); + predefine_static(ccx, node_id, linkage, visibility, &symbol_name); } TransItem::Fn(instance) => { - TransItem::predefine_fn(ccx, instance, linkage, visibility, &symbol_name); + predefine_fn(ccx, instance, linkage, visibility, &symbol_name); } TransItem::GlobalAsm(..) => {} } @@ -126,75 +137,8 @@ impl<'a, 'tcx> TransItem<'tcx> { ccx.codegen_unit().name()); } - fn predefine_static(ccx: &CrateContext<'a, 'tcx>, - node_id: ast::NodeId, - linkage: llvm::Linkage, - visibility: llvm::Visibility, - symbol_name: &str) { - let def_id = ccx.tcx().hir.local_def_id(node_id); - let instance = Instance::mono(ccx.tcx(), def_id); - let ty = common::instance_ty(ccx.shared(), &instance); - let llty = type_of::type_of(ccx, ty); - - let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { - ccx.sess().span_fatal(ccx.tcx().hir.span(node_id), - &format!("symbol `{}` is already defined", symbol_name)) - }); - - unsafe { - llvm::LLVMRustSetLinkage(g, linkage); - llvm::LLVMRustSetVisibility(g, visibility); - } - - ccx.instances().borrow_mut().insert(instance, g); - ccx.statics().borrow_mut().insert(g, def_id); - } - - fn predefine_fn(ccx: &CrateContext<'a, 'tcx>, - instance: Instance<'tcx>, - linkage: llvm::Linkage, - visibility: llvm::Visibility, - symbol_name: &str) { - assert!(!instance.substs.needs_infer() && - !instance.substs.has_param_types()); - - let mono_ty = common::instance_ty(ccx.shared(), &instance); - let attrs = instance.def.attrs(ccx.tcx()); - let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty); - unsafe { llvm::LLVMRustSetLinkage(lldecl, linkage) }; - base::set_link_section(ccx, lldecl, &attrs); - if linkage == llvm::Linkage::LinkOnceODRLinkage || - linkage == llvm::Linkage::WeakODRLinkage { - llvm::SetUniqueComdat(ccx.llmod(), lldecl); - } - - // If we're compiling the compiler-builtins crate, e.g. the equivalent of - // compiler-rt, then we want to implicitly compile everything with hidden - // visibility as we're going to link this object all over the place but - // don't want the symbols to get exported. - if linkage != llvm::Linkage::InternalLinkage && - linkage != llvm::Linkage::PrivateLinkage && - attr::contains_name(ccx.tcx().hir.krate_attrs(), "compiler_builtins") { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); - } - } else { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, visibility); - } - } - - debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); - if common::is_inline_instance(ccx.tcx(), &instance) { - attributes::inline(lldecl, attributes::InlineAttr::Hint); - } - attributes::from_fn_attrs(ccx, &attrs, lldecl); - - ccx.instances().borrow_mut().insert(instance, lldecl); - } - - pub fn symbol_name(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ty::SymbolName { - match *self { + fn symbol_name(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ty::SymbolName { + match *self.as_trans_item() { TransItem::Fn(instance) => tcx.symbol_name(instance), TransItem::Static(node_id) => { let def_id = tcx.hir.local_def_id(node_id); @@ -209,8 +153,8 @@ impl<'a, 'tcx> TransItem<'tcx> { } } - pub fn local_span(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option { - match *self { + fn local_span(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option { + match *self.as_trans_item() { TransItem::Fn(Instance { def, .. }) => { tcx.hir.as_local_node_id(def.def_id()) } @@ -221,26 +165,39 @@ impl<'a, 'tcx> TransItem<'tcx> { }.map(|node_id| tcx.hir.span(node_id)) } - pub fn instantiation_mode(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> InstantiationMode { - match *self { + fn instantiation_mode(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> InstantiationMode { + let inline_in_all_cgus = + tcx.sess.opts.debugging_opts.inline_in_all_cgus.unwrap_or_else(|| { + tcx.sess.opts.optimize != OptLevel::No + }); + + match *self.as_trans_item() { TransItem::Fn(ref instance) => { if self.explicit_linkage(tcx).is_none() && common::requests_inline(tcx, instance) { - InstantiationMode::LocalCopy + if inline_in_all_cgus { + InstantiationMode::LocalCopy + } else { + InstantiationMode::GloballyShared { may_conflict: true } + } } else { - InstantiationMode::GloballyShared + InstantiationMode::GloballyShared { may_conflict: false } } } - TransItem::Static(..) => InstantiationMode::GloballyShared, - TransItem::GlobalAsm(..) => InstantiationMode::GloballyShared, + TransItem::Static(..) => { + InstantiationMode::GloballyShared { may_conflict: false } + } + TransItem::GlobalAsm(..) => { + InstantiationMode::GloballyShared { may_conflict: false } + } } } - pub fn is_generic_fn(&self) -> bool { - match *self { + fn is_generic_fn(&self) -> bool { + match *self.as_trans_item() { TransItem::Fn(ref instance) => { instance.substs.types().next().is_some() } @@ -249,8 +206,8 @@ impl<'a, 'tcx> TransItem<'tcx> { } } - pub fn explicit_linkage(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option { - let def_id = match *self { + fn explicit_linkage(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option { + let def_id = match *self.as_trans_item() { TransItem::Fn(ref instance) => instance.def_id(), TransItem::Static(node_id) => tcx.hir.local_def_id(node_id), TransItem::GlobalAsm(..) => return None, @@ -258,7 +215,7 @@ impl<'a, 'tcx> TransItem<'tcx> { let attributes = tcx.get_attrs(def_id); if let Some(name) = attr::first_attr_value_str_by_name(&attributes, "linkage") { - if let Some(linkage) = base::llvm_linkage_by_name(&name.as_str()) { + if let Some(linkage) = base::linkage_by_name(&name.as_str()) { Some(linkage) } else { let span = tcx.hir.span_if_local(def_id); @@ -298,9 +255,9 @@ impl<'a, 'tcx> TransItem<'tcx> { /// Similarly, if a vtable method has such a signature, and therefore can't /// be used, we can just not emit it and have a placeholder (a null pointer, /// which will never be accessed) in its place. - pub fn is_instantiable(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool { + fn is_instantiable(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool { debug!("is_instantiable({:?})", self); - let (def_id, substs) = match *self { + let (def_id, substs) = match *self.as_trans_item() { TransItem::Fn(ref instance) => (instance.def_id(), instance.substs), TransItem::Static(node_id) => (tcx.hir.local_def_id(node_id), Substs::empty()), // global asm never has predicates @@ -311,10 +268,10 @@ impl<'a, 'tcx> TransItem<'tcx> { traits::normalize_and_test_predicates(tcx, predicates) } - pub fn to_string(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> String { + fn to_string(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> String { let hir_map = &tcx.hir; - return match *self { + return match *self.as_trans_item() { TransItem::Fn(instance) => { to_string_internal(tcx, "fn ", instance) }, @@ -340,8 +297,8 @@ impl<'a, 'tcx> TransItem<'tcx> { } } - pub fn to_raw_string(&self) -> String { - match *self { + fn to_raw_string(&self) -> String { + match *self.as_trans_item() { TransItem::Fn(instance) => { format!("Fn({:?}, {})", instance.def, @@ -357,6 +314,77 @@ impl<'a, 'tcx> TransItem<'tcx> { } } +impl<'a, 'tcx> TransItemExt<'a, 'tcx> for TransItem<'tcx> { + fn as_trans_item(&self) -> &TransItem<'tcx> { + self + } +} + +fn predefine_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + node_id: ast::NodeId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + let def_id = ccx.tcx().hir.local_def_id(node_id); + let instance = Instance::mono(ccx.tcx(), def_id); + let ty = common::instance_ty(ccx.tcx(), &instance); + let llty = type_of::type_of(ccx, ty); + + let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { + ccx.sess().span_fatal(ccx.tcx().hir.span(node_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); + + unsafe { + llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); + llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); + } + + ccx.instances().borrow_mut().insert(instance, g); + ccx.statics().borrow_mut().insert(g, def_id); +} + +fn predefine_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + assert!(!instance.substs.needs_infer() && + !instance.substs.has_param_types()); + + let mono_ty = common::instance_ty(ccx.tcx(), &instance); + let attrs = instance.def.attrs(ccx.tcx()); + let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty); + unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; + base::set_link_section(ccx, lldecl, &attrs); + if linkage == Linkage::LinkOnceODR || + linkage == Linkage::WeakODR { + llvm::SetUniqueComdat(ccx.llmod(), lldecl); + } + + // If we're compiling the compiler-builtins crate, e.g. the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal && linkage != Linkage::Private && + attr::contains_name(ccx.tcx().hir.krate_attrs(), "compiler_builtins") { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); + } + } else { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); + } + } + + debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); + if common::is_inline_instance(ccx.tcx(), &instance) { + attributes::inline(lldecl, attributes::InlineAttr::Hint); + } + attributes::from_fn_attrs(ccx, &attrs, lldecl); + + ccx.instances().borrow_mut().insert(instance, lldecl); +} //=----------------------------------------------------------------------------- // TransItem String Keys @@ -446,7 +474,8 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { ty::TyArray(inner_type, len) => { output.push('['); self.push_type_name(inner_type, output); - write!(output, "; {}", len).unwrap(); + write!(output, "; {}", + len.val.to_const_int().unwrap().to_u64().unwrap()).unwrap(); output.push(']'); }, ty::TySlice(inner_type) => { @@ -504,6 +533,7 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { self.push_type_name(sig.output(), output); } }, + ty::TyGenerator(def_id, ref closure_substs, _) | ty::TyClosure(def_id, ref closure_substs) => { self.push_def_path(def_id, output); let generics = self.tcx.generics_of(self.tcx.closure_base_def_id(def_id)); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index de4d217c73..da4a4e55a6 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -34,7 +34,7 @@ pub fn slice_for_each<'a, 'tcx, F>( let next_bcx = bcx.build_sibling_block("slice_loop_next"); let start = if zst { - C_uint(bcx.ccx, 0usize) + C_usize(bcx.ccx, 1) } else { data_ptr }; @@ -46,7 +46,7 @@ pub fn slice_for_each<'a, 'tcx, F>( let keep_going = header_bcx.icmp(llvm::IntNE, current, end); header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); - let next = add(&body_bcx, current, C_uint(bcx.ccx, 1usize)); + let next = add(&body_bcx, current, C_usize(bcx.ccx, 1)); f(&body_bcx, if zst { data_ptr } else { current }, header_bcx.llbb()); header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); next_bcx diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index b8a8068d36..ffb303688a 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -131,7 +131,7 @@ impl Type { Type::i8_llcx(llcx).ptr_to() } - pub fn int(ccx: &CrateContext) -> Type { + pub fn isize(ccx: &CrateContext) -> Type { match &ccx.tcx().sess.target.target.target_pointer_width[..] { "16" => Type::i16(ccx), "32" => Type::i32(ccx), @@ -140,9 +140,18 @@ impl Type { } } + pub fn c_int(ccx: &CrateContext) -> Type { + match &ccx.tcx().sess.target.target.target_c_int_width[..] { + "16" => Type::i16(ccx), + "32" => Type::i32(ccx), + "64" => Type::i64(ccx), + width => bug!("Unsupported target_c_int_width: {}", width), + } + } + pub fn int_from_ty(ccx: &CrateContext, t: ast::IntTy) -> Type { match t { - ast::IntTy::Is => ccx.int_type(), + ast::IntTy::Is => ccx.isize_ty(), ast::IntTy::I8 => Type::i8(ccx), ast::IntTy::I16 => Type::i16(ccx), ast::IntTy::I32 => Type::i32(ccx), @@ -153,7 +162,7 @@ impl Type { pub fn uint_from_ty(ccx: &CrateContext, t: ast::UintTy) -> Type { match t { - ast::UintTy::Us => ccx.int_type(), + ast::UintTy::Us => ccx.isize_ty(), ast::UintTy::U8 => Type::i8(ccx), ast::UintTy::U16 => Type::i16(ccx), ast::UintTy::U32 => Type::i32(ccx), @@ -207,7 +216,7 @@ impl Type { pub fn vec(ccx: &CrateContext, ty: &Type) -> Type { Type::struct_(ccx, - &[Type::array(ty, 0), Type::int(ccx)], + &[Type::array(ty, 0), Type::isize(ccx)], false) } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 9f9126ba83..992c74b902 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -133,6 +133,11 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> // fill it in *after* placing it into the type cache. adt::incomplete_type_of(cx, t, "closure") } + ty::TyGenerator(..) => { + // Only create the named struct, but don't fill it in. We + // fill it in *after* placing it into the type cache. + adt::incomplete_type_of(cx, t, "generator") + } ty::TyRef(_, ty::TypeAndMut{ty, ..}) | ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { @@ -143,8 +148,8 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> } ty::TyArray(ty, size) => { - let size = size as u64; let llty = in_memory_type_of(cx, ty); + let size = size.val.to_const_int().unwrap().to_u64().unwrap(); Type::array(&llty, size) } @@ -197,7 +202,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> // If this was an enum or struct, fill in the type now. match t.sty { - ty::TyAdt(..) | ty::TyClosure(..) if !t.is_simd() && !t.is_box() => { + ty::TyAdt(..) | ty::TyClosure(..) | ty::TyGenerator(..) if !t.is_simd() && !t.is_box() => { adt::finish_type_of(cx, t, &mut llty); } _ => () diff --git a/src/librustc_trans_utils/Cargo.toml b/src/librustc_trans_utils/Cargo.toml index f026d4fcbc..bedbea0068 100644 --- a/src/librustc_trans_utils/Cargo.toml +++ b/src/librustc_trans_utils/Cargo.toml @@ -10,6 +10,12 @@ crate-type = ["dylib"] test = false [dependencies] -rustc = { path = "../librustc" } +ar = "0.3.0" +flate2 = "0.2" +owning_ref = "0.3.3" +log = "0.3" + syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } diff --git a/src/librustc_trans_utils/lib.rs b/src/librustc_trans_utils/lib.rs index 90e1790632..6873befd2b 100644 --- a/src/librustc_trans_utils/lib.rs +++ b/src/librustc_trans_utils/lib.rs @@ -19,7 +19,6 @@ #![feature(box_patterns)] #![feature(box_syntax)] -#![feature(const_fn)] #![feature(custom_attribute)] #![allow(unused_attributes)] #![feature(i128_type)] @@ -28,8 +27,91 @@ #![feature(slice_patterns)] #![feature(conservative_impl_trait)] +#![cfg_attr(stage0, feature(const_fn))] + +extern crate ar; +extern crate flate2; +extern crate owning_ref; +#[macro_use] +extern crate log; + +#[macro_use] extern crate rustc; +extern crate rustc_back; extern crate syntax; extern crate syntax_pos; +use rustc::ty::TyCtxt; +use rustc::hir; +use rustc::hir::def_id::LOCAL_CRATE; +use rustc::hir::map as hir_map; +use rustc::util::nodemap::NodeSet; + +use syntax::attr; + pub mod link; +pub mod trans_crate; + +/// check for the #[rustc_error] annotation, which forces an +/// error in trans. This is used to write compile-fail tests +/// that actually test that compilation succeeds without +/// reporting an error. +pub fn check_for_rustc_errors_attr(tcx: TyCtxt) { + if let Some((id, span)) = *tcx.sess.entry_fn.borrow() { + let main_def_id = tcx.hir.local_def_id(id); + + if tcx.has_attr(main_def_id, "rustc_error") { + tcx.sess.span_fatal(span, "compilation successful"); + } + } +} + +/// The context provided lists a set of reachable ids as calculated by +/// middle::reachable, but this contains far more ids and symbols than we're +/// actually exposing from the object file. This function will filter the set in +/// the context to the set of ids which correspond to symbols that are exposed +/// from the object file being generated. +/// +/// This list is later used by linkers to determine the set of symbols needed to +/// be exposed from a dynamic library and it's also encoded into the metadata. +pub fn find_exported_symbols(tcx: TyCtxt) -> NodeSet { + tcx.reachable_set(LOCAL_CRATE).0.iter().cloned().filter(|&id| { + // Next, we want to ignore some FFI functions that are not exposed from + // this crate. Reachable FFI functions can be lumped into two + // categories: + // + // 1. Those that are included statically via a static library + // 2. Those included otherwise (e.g. dynamically or via a framework) + // + // Although our LLVM module is not literally emitting code for the + // statically included symbols, it's an export of our library which + // needs to be passed on to the linker and encoded in the metadata. + // + // As a result, if this id is an FFI item (foreign item) then we only + // let it through if it's included statically. + match tcx.hir.get(id) { + hir_map::NodeForeignItem(..) => { + let def_id = tcx.hir.local_def_id(id); + tcx.is_statically_included_foreign_item(def_id) + } + + // Only consider nodes that actually have exported symbols. + hir_map::NodeItem(&hir::Item { + node: hir::ItemStatic(..), .. }) | + hir_map::NodeItem(&hir::Item { + node: hir::ItemFn(..), .. }) | + hir_map::NodeImplItem(&hir::ImplItem { + node: hir::ImplItemKind::Method(..), .. }) => { + let def_id = tcx.hir.local_def_id(id); + let generics = tcx.generics_of(def_id); + let attributes = tcx.get_attrs(def_id); + (generics.parent_types == 0 && generics.types.is_empty()) && + // Functions marked with #[inline] are only ever translated + // with "internal" linkage and are never exported. + !attr::requests_inline(&attributes) + } + + _ => false + } + }).collect() +} diff --git a/src/librustc_trans_utils/link.rs b/src/librustc_trans_utils/link.rs index aa8637fabe..47484488fb 100644 --- a/src/librustc_trans_utils/link.rs +++ b/src/librustc_trans_utils/link.rs @@ -8,13 +8,56 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use rustc::ich::Fingerprint; use rustc::session::config::{self, OutputFilenames, Input, OutputType}; use rustc::session::Session; -use rustc::middle::cstore; -use std::path::PathBuf; +use rustc::middle::cstore::{self, LinkMeta}; +use rustc::hir::svh::Svh; +use std::path::{Path, PathBuf}; use syntax::ast; use syntax_pos::Span; +pub fn out_filename(sess: &Session, + crate_type: config::CrateType, + outputs: &OutputFilenames, + crate_name: &str) + -> PathBuf { + let default_filename = filename_for_input(sess, crate_type, crate_name, outputs); + let out_filename = outputs.outputs.get(&OutputType::Exe) + .and_then(|s| s.to_owned()) + .or_else(|| outputs.single_output_file.clone()) + .unwrap_or(default_filename); + + check_file_is_writeable(&out_filename, sess); + + out_filename +} + +// Make sure files are writeable. Mac, FreeBSD, and Windows system linkers +// check this already -- however, the Linux linker will happily overwrite a +// read-only file. We should be consistent. +pub fn check_file_is_writeable(file: &Path, sess: &Session) { + if !is_writeable(file) { + sess.fatal(&format!("output file {} is not writeable -- check its \ + permissions", file.display())); + } +} + +fn is_writeable(p: &Path) -> bool { + match p.metadata() { + Err(..) => true, + Ok(m) => !m.permissions().readonly() + } +} + +pub fn build_link_meta(crate_hash: Fingerprint) -> LinkMeta { + let r = LinkMeta { + crate_hash: Svh::new(crate_hash.to_smaller_hash()), + }; + info!("{:?}", r); + return r; +} + pub fn find_crate_name(sess: Option<&Session>, attrs: &[ast::Attribute], input: &Input) -> String { diff --git a/src/librustc_trans_utils/trans_crate.rs b/src/librustc_trans_utils/trans_crate.rs new file mode 100644 index 0000000000..f51a463fcc --- /dev/null +++ b/src/librustc_trans_utils/trans_crate.rs @@ -0,0 +1,249 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The Rust compiler. +//! +//! # Note +//! +//! This API is completely unstable and subject to change. + +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![deny(warnings)] + +#![feature(box_syntax)] + +use std::any::Any; +use std::io::prelude::*; +use std::io::{self, Cursor}; +use std::fs::File; +use std::path::Path; +use std::sync::mpsc; + +use owning_ref::{ErasedBoxRef, OwningRef}; +use ar::{Archive, Builder, Header}; +use flate2::Compression; +use flate2::write::DeflateEncoder; + +use syntax::symbol::Symbol; +use rustc::hir::def_id::LOCAL_CRATE; +use rustc::session::Session; +use rustc::session::config::{CrateType, OutputFilenames}; +use rustc::ty::TyCtxt; +use rustc::ty::maps::Providers; +use rustc::middle::cstore::EncodedMetadata; +use rustc::middle::cstore::MetadataLoader as MetadataLoaderTrait; +use rustc::dep_graph::{DepGraph, DepNode, DepKind}; +use rustc_back::target::Target; +use link::{build_link_meta, out_filename}; + +pub trait TransCrate { + type MetadataLoader: MetadataLoaderTrait; + type OngoingCrateTranslation; + type TranslatedCrate; + + fn metadata_loader() -> Box; + fn provide_local(_providers: &mut Providers); + fn provide_extern(_providers: &mut Providers); + fn trans_crate<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver> + ) -> Self::OngoingCrateTranslation; + fn join_trans( + trans: Self::OngoingCrateTranslation, + sess: &Session, + dep_graph: &DepGraph + ) -> Self::TranslatedCrate; + fn link_binary(sess: &Session, trans: &Self::TranslatedCrate, outputs: &OutputFilenames); + fn dump_incremental_data(trans: &Self::TranslatedCrate); +} + +pub struct DummyTransCrate; + +impl TransCrate for DummyTransCrate { + type MetadataLoader = DummyMetadataLoader; + type OngoingCrateTranslation = (); + type TranslatedCrate = (); + + fn metadata_loader() -> Box { + box DummyMetadataLoader(()) + } + + fn provide_local(_providers: &mut Providers) { + bug!("DummyTransCrate::provide_local"); + } + + fn provide_extern(_providers: &mut Providers) { + bug!("DummyTransCrate::provide_extern"); + } + + fn trans_crate<'a, 'tcx>( + _tcx: TyCtxt<'a, 'tcx, 'tcx>, + _rx: mpsc::Receiver> + ) -> Self::OngoingCrateTranslation { + bug!("DummyTransCrate::trans_crate"); + } + + fn join_trans( + _trans: Self::OngoingCrateTranslation, + _sess: &Session, + _dep_graph: &DepGraph + ) -> Self::TranslatedCrate { + bug!("DummyTransCrate::join_trans"); + } + + fn link_binary(_sess: &Session, _trans: &Self::TranslatedCrate, _outputs: &OutputFilenames) { + bug!("DummyTransCrate::link_binary"); + } + + fn dump_incremental_data(_trans: &Self::TranslatedCrate) { + bug!("DummyTransCrate::dump_incremental_data"); + } +} + +pub struct DummyMetadataLoader(()); + +impl MetadataLoaderTrait for DummyMetadataLoader { + fn get_rlib_metadata( + &self, + _target: &Target, + _filename: &Path + ) -> Result, String> { + bug!("DummyMetadataLoader::get_rlib_metadata"); + } + + fn get_dylib_metadata( + &self, + _target: &Target, + _filename: &Path + ) -> Result, String> { + bug!("DummyMetadataLoader::get_dylib_metadata"); + } +} + +pub struct NoLlvmMetadataLoader; + +impl MetadataLoaderTrait for NoLlvmMetadataLoader { + fn get_rlib_metadata(&self, _: &Target, filename: &Path) -> Result, String> { + let file = File::open(filename) + .map_err(|e| format!("metadata file open err: {:?}", e))?; + let mut archive = Archive::new(file); + + while let Some(entry_result) = archive.next_entry() { + let mut entry = entry_result + .map_err(|e| format!("metadata section read err: {:?}", e))?; + if entry.header().identifier() == "rust.metadata.bin" { + let mut buf = Vec::new(); + io::copy(&mut entry, &mut buf).unwrap(); + let buf: OwningRef, [u8]> = OwningRef::new(buf).into(); + return Ok(buf.map_owner_box().erase_owner()); + } + } + + Err("Couldnt find metadata section".to_string()) + } + + fn get_dylib_metadata( + &self, + _target: &Target, + _filename: &Path, + ) -> Result, String> { + // FIXME: Support reading dylibs from llvm enabled rustc + self.get_rlib_metadata(_target, _filename) + } +} + +pub struct MetadataOnlyTransCrate; +pub struct OngoingCrateTranslation { + metadata: EncodedMetadata, + metadata_version: Vec, + crate_name: Symbol, +} +pub struct TranslatedCrate(OngoingCrateTranslation); + +impl MetadataOnlyTransCrate { + #[allow(dead_code)] + pub fn new() -> Self { + MetadataOnlyTransCrate + } +} + +impl TransCrate for MetadataOnlyTransCrate { + type MetadataLoader = NoLlvmMetadataLoader; + type OngoingCrateTranslation = OngoingCrateTranslation; + type TranslatedCrate = TranslatedCrate; + + fn metadata_loader() -> Box { + box NoLlvmMetadataLoader + } + + fn provide_local(_providers: &mut Providers) {} + fn provide_extern(_providers: &mut Providers) {} + + fn trans_crate<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + _rx: mpsc::Receiver> + ) -> Self::OngoingCrateTranslation { + ::check_for_rustc_errors_attr(tcx); + let _ = tcx.link_args(LOCAL_CRATE); + let _ = tcx.native_libraries(LOCAL_CRATE); + tcx.sess.abort_if_errors(); + + let crate_hash = tcx.dep_graph + .fingerprint_of(&DepNode::new_no_params(DepKind::Krate)); + let link_meta = build_link_meta(crate_hash); + let exported_symbols = ::find_exported_symbols(tcx); + let (metadata, _hashes) = tcx.encode_metadata(&link_meta, &exported_symbols); + + OngoingCrateTranslation { + metadata: metadata, + metadata_version: tcx.metadata_encoding_version().to_vec(), + crate_name: tcx.crate_name(LOCAL_CRATE), + } + } + + fn join_trans( + trans: Self::OngoingCrateTranslation, + _sess: &Session, + _dep_graph: &DepGraph, + ) -> Self::TranslatedCrate { + TranslatedCrate(trans) + } + + fn link_binary(sess: &Session, trans: &Self::TranslatedCrate, outputs: &OutputFilenames) { + for &crate_type in sess.opts.crate_types.iter() { + if crate_type != CrateType::CrateTypeRlib && crate_type != CrateType::CrateTypeDylib { + continue; + } + let output_name = + out_filename(sess, crate_type, &outputs, &trans.0.crate_name.as_str()); + let mut compressed = trans.0.metadata_version.clone(); + let metadata = if crate_type == CrateType::CrateTypeDylib { + DeflateEncoder::new(&mut compressed, Compression::Fast) + .write_all(&trans.0.metadata.raw_data) + .unwrap(); + &compressed + } else { + &trans.0.metadata.raw_data + }; + let mut builder = Builder::new(File::create(&output_name).unwrap()); + let header = Header::new("rust.metadata.bin".to_string(), metadata.len() as u64); + builder.append(&header, Cursor::new(metadata)).unwrap(); + } + + if !sess.opts.crate_types.contains(&CrateType::CrateTypeRlib) + && !sess.opts.crate_types.contains(&CrateType::CrateTypeDylib) { + sess.fatal("Executables are not supported by the metadata-only backend."); + } + } + + fn dump_incremental_data(_trans: &Self::TranslatedCrate) {} +} diff --git a/src/librustc_typeck/README.md b/src/librustc_typeck/README.md new file mode 100644 index 0000000000..a38f04e304 --- /dev/null +++ b/src/librustc_typeck/README.md @@ -0,0 +1,48 @@ +NB: This crate is part of the Rust compiler. For an overview of the +compiler as a whole, see +[the README.md file found in `librustc`](../librustc/README.md). + +The `rustc_typeck` crate contains the source for "type collection" and +"type checking", as well as a few other bits of related functionality. +(It draws heavily on the [type inferencing][infer] and +[trait solving][traits] code found in librustc.) + +[infer]: ../librustc/infer/README.md +[traits]: ../librustc/traits/README.md + +## Type collection + +Type "collection" is the process of convering the types found in the +HIR (`hir::Ty`), which represent the syntactic things that the user +wrote, into the **internal representation** used by the compiler +(`Ty<'tcx>`) -- we also do similar conversions for where-clauses and +other bits of the function signature. + +To try and get a sense for the difference, consider this function: + +```rust +struct Foo { } +fn foo(x: Foo, y: self::Foo) { .. } +// ^^^ ^^^^^^^^^ +``` + +Those two parameters `x` and `y` each have the same type: but they +will have distinct `hir::Ty` nodes. Those nodes will have different +spans, and of course they encode the path somewhat differently. But +once they are "collected" into `Ty<'tcx>` nodes, they will be +represented by the exact same internal type. + +Collection is defined as a bundle of queries (e.g., `type_of`) for +computing information about the various functions, traits, and other +items in the crate being compiled. Note that each of these queries is +concerned with *interprocedural* things -- for example, for a function +definition, collection will figure out the type and signature of the +function, but it will not visit the *body* of the function in any way, +nor examine type annotations on local variables (that's the job of +type *checking*). + +For more details, see the `collect` module. + +## Type checking + +TODO diff --git a/src/librustc_typeck/astconv.rs b/src/librustc_typeck/astconv.rs index b55d762bf0..af8cc2c806 100644 --- a/src/librustc_typeck/astconv.rs +++ b/src/librustc_typeck/astconv.rs @@ -12,7 +12,7 @@ //! representation. The main routine here is `ast_ty_to_ty()`: each use //! is parameterized by an instance of `AstConv`. -use rustc::middle::const_val::eval_length; +use rustc::middle::const_val::ConstVal; use rustc_data_structures::accumulate_vec::AccumulateVec; use hir; use hir::def::Def; @@ -76,6 +76,8 @@ pub trait AstConv<'gcx, 'tcx> { /// used to help suppress derived errors typeck might otherwise /// report. fn set_tainted_by_errors(&self); + + fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, span: Span); } struct ConvertedBinding<'tcx> { @@ -96,35 +98,40 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { -> ty::Region<'tcx> { let tcx = self.tcx(); - let r = match tcx.named_region_map.defs.get(&lifetime.id) { - Some(&rl::Region::Static) => { + let lifetime_name = |def_id| { + tcx.hir.name(tcx.hir.as_local_node_id(def_id).unwrap()) + }; + + let hir_id = tcx.hir.node_to_hir_id(lifetime.id); + let r = match tcx.named_region(hir_id) { + Some(rl::Region::Static) => { tcx.types.re_static } - Some(&rl::Region::LateBound(debruijn, id)) => { - let name = tcx.hir.name(id); + Some(rl::Region::LateBound(debruijn, id)) => { + let name = lifetime_name(id); tcx.mk_region(ty::ReLateBound(debruijn, - ty::BrNamed(tcx.hir.local_def_id(id), name))) + ty::BrNamed(id, name))) } - Some(&rl::Region::LateBoundAnon(debruijn, index)) => { + Some(rl::Region::LateBoundAnon(debruijn, index)) => { tcx.mk_region(ty::ReLateBound(debruijn, ty::BrAnon(index))) } - Some(&rl::Region::EarlyBound(index, id)) => { - let name = tcx.hir.name(id); + Some(rl::Region::EarlyBound(index, id)) => { + let name = lifetime_name(id); tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { - def_id: tcx.hir.local_def_id(id), + def_id: id, index, name, })) } - Some(&rl::Region::Free(scope, id)) => { - let name = tcx.hir.name(id); + Some(rl::Region::Free(scope, id)) => { + let name = lifetime_name(id); tcx.mk_region(ty::ReFree(ty::FreeRegion { scope, - bound_region: ty::BrNamed(tcx.hir.local_def_id(id), name) + bound_region: ty::BrNamed(id, name) })) // (*) -- not late-bound, won't change @@ -150,11 +157,16 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { item_segment: &hir::PathSegment) -> &'tcx Substs<'tcx> { + let (substs, assoc_bindings) = - self.create_substs_for_ast_path(span, - def_id, - &item_segment.parameters, - None); + item_segment.with_parameters(|parameters| { + self.create_substs_for_ast_path( + span, + def_id, + parameters, + item_segment.infer_types, + None) + }); assoc_bindings.first().map(|b| self.prohibit_projection(b.span)); @@ -170,6 +182,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { span: Span, def_id: DefId, parameters: &hir::PathParameters, + infer_types: bool, self_ty: Option>) -> (&'tcx Substs<'tcx>, Vec>) { @@ -197,7 +210,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { // Check the number of type parameters supplied by the user. let ty_param_defs = &decl_generics.types[self_ty.is_some() as usize..]; - if !parameters.infer_types || num_types_provided > ty_param_defs.len() { + if !infer_types || num_types_provided > ty_param_defs.len() { check_type_argument_count(tcx, span, num_types_provided, ty_param_defs); } @@ -233,7 +246,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { if i < num_types_provided { // A provided type parameter. self.ast_ty_to_ty(¶meters.types[i]) - } else if parameters.infer_types { + } else if infer_types { // No type parameters were provided, we can infer all. let ty_var = if !default_needs_object_self(def) { self.ty_infer_for_def(def, substs, span) @@ -343,9 +356,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { poly_projections.extend(assoc_bindings.iter().filter_map(|binding| { // specify type to assert that error was already reported in Err case: let predicate: Result<_, ErrorReported> = - self.ast_type_binding_to_poly_projection_predicate(trait_ref.ref_id, - poly_trait_ref, - binding); + self.ast_type_binding_to_poly_projection_predicate(poly_trait_ref, binding); predicate.ok() // ok to ignore Err() because ErrorReported (see above) })); @@ -383,7 +394,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { let trait_def = self.tcx().trait_def(trait_def_id); if !self.tcx().sess.features.borrow().unboxed_closures && - trait_segment.parameters.parenthesized != trait_def.paren_sugar { + trait_segment.with_parameters(|p| p.parenthesized) != trait_def.paren_sugar { // For now, require that parenthetical notation be used only with `Fn()` etc. let msg = if trait_def.paren_sugar { "the precise format of `Fn`-family traits' type parameters is subject to change. \ @@ -395,10 +406,13 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { span, GateIssue::Language, msg); } - self.create_substs_for_ast_path(span, - trait_def_id, - &trait_segment.parameters, - Some(self_ty)) + trait_segment.with_parameters(|parameters| { + self.create_substs_for_ast_path(span, + trait_def_id, + parameters, + trait_segment.infer_types, + Some(self_ty)) + }) } fn trait_defines_associated_type_named(&self, @@ -407,13 +421,13 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { -> bool { self.tcx().associated_items(trait_def_id).any(|item| { - item.kind == ty::AssociatedKind::Type && item.name == assoc_name + item.kind == ty::AssociatedKind::Type && + self.tcx().hygienic_eq(assoc_name, item.name, trait_def_id) }) } fn ast_type_binding_to_poly_projection_predicate( &self, - _path_id: ast::NodeId, trait_ref: ty::PolyTraitRef<'tcx>, binding: &ConvertedBinding<'tcx>) -> Result, ErrorReported> @@ -488,7 +502,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { let candidate = self.one_bound_for_assoc_type(candidates, &trait_ref.to_string(), - &binding.item_name.as_str(), + binding.item_name, binding.span)?; Ok(candidate.map_bound(|trait_ref| { @@ -627,7 +641,8 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { self.ast_region_to_region(lifetime, None) } else { self.compute_object_lifetime_bound(span, existential_predicates).unwrap_or_else(|| { - if tcx.named_region_map.defs.contains_key(&lifetime.id) { + let hir_id = tcx.hir.node_to_hir_id(lifetime.id); + if tcx.named_region(hir_id).is_some() { self.ast_region_to_region(lifetime, None) } else { self.re_infer(span, None).unwrap_or_else(|| { @@ -685,7 +700,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { let param_name = tcx.hir.ty_param_name(param_node_id); self.one_bound_for_assoc_type(suitable_bounds, ¶m_name.as_str(), - &assoc_name.as_str(), + assoc_name, span) } @@ -695,7 +710,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { fn one_bound_for_assoc_type(&self, mut bounds: I, ty_param_name: &str, - assoc_name: &str, + assoc_name: ast::Name, span: Span) -> Result, ErrorReported> where I: Iterator> @@ -724,7 +739,8 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { for bound in bounds { let bound_span = self.tcx().associated_items(bound.def_id()).find(|item| { - item.kind == ty::AssociatedKind::Type && item.name == assoc_name + item.kind == ty::AssociatedKind::Type && + self.tcx().hygienic_eq(assoc_name, item.name, bound.def_id()) }) .and_then(|item| self.tcx().hir.span_if_local(item.def_id)); @@ -785,10 +801,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { .filter(|r| self.trait_defines_associated_type_named(r.def_id(), assoc_name)); - match self.one_bound_for_assoc_type(candidates, - "Self", - &assoc_name.as_str(), - span) { + match self.one_bound_for_assoc_type(candidates, "Self", assoc_name, span) { Ok(bound) => bound, Err(ErrorReported) => return (tcx.types.err, Def::Err), } @@ -813,14 +826,14 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { }; let trait_did = bound.0.def_id; - let item = tcx.associated_items(trait_did).find(|i| i.name == assoc_name) + let (assoc_ident, def_scope) = tcx.adjust(assoc_name, trait_did, ref_id); + let item = tcx.associated_items(trait_did).find(|i| i.name.to_ident() == assoc_ident) .expect("missing associated type"); let ty = self.projected_ty_from_poly_trait_ref(span, item.def_id, bound); let ty = self.normalize_ty(span, ty); let def = Def::AssociatedTy(item.def_id); - let def_scope = tcx.adjust(assoc_name, item.container.id(), ref_id).1; if !item.vis.is_accessible_from(def_scope, tcx) { let msg = format!("{} `{}` is private", def.kind_name(), assoc_name); tcx.sess.span_err(span, &msg); @@ -868,25 +881,27 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { pub fn prohibit_type_params(&self, segments: &[hir::PathSegment]) { for segment in segments { - for typ in &segment.parameters.types { - struct_span_err!(self.tcx().sess, typ.span, E0109, - "type parameters are not allowed on this type") - .span_label(typ.span, "type parameter not allowed") - .emit(); - break; - } - for lifetime in &segment.parameters.lifetimes { - struct_span_err!(self.tcx().sess, lifetime.span, E0110, - "lifetime parameters are not allowed on this type") - .span_label(lifetime.span, - "lifetime parameter not allowed on this type") - .emit(); - break; - } - for binding in &segment.parameters.bindings { - self.prohibit_projection(binding.span); - break; - } + segment.with_parameters(|parameters| { + for typ in ¶meters.types { + struct_span_err!(self.tcx().sess, typ.span, E0109, + "type parameters are not allowed on this type") + .span_label(typ.span, "type parameter not allowed") + .emit(); + break; + } + for lifetime in ¶meters.lifetimes { + struct_span_err!(self.tcx().sess, lifetime.span, E0110, + "lifetime parameters are not allowed on this type") + .span_label(lifetime.span, + "lifetime parameter not allowed on this type") + .emit(); + break; + } + for binding in ¶meters.bindings { + self.prohibit_projection(binding.span); + break; + } + }) } } @@ -1008,46 +1023,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } hir::TyBareFn(ref bf) => { require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span); - let bare_fn_ty = self.ty_of_fn(bf.unsafety, bf.abi, &bf.decl); - - // Find any late-bound regions declared in return type that do - // not appear in the arguments. These are not wellformed. - // - // Example: - // - // for<'a> fn() -> &'a str <-- 'a is bad - // for<'a> fn(&'a String) -> &'a str <-- 'a is ok - // - // Note that we do this check **here** and not in - // `ty_of_bare_fn` because the latter is also used to make - // the types for fn items, and we do not want to issue a - // warning then. (Once we fix #32330, the regions we are - // checking for here would be considered early bound - // anyway.) - let inputs = bare_fn_ty.inputs(); - let late_bound_in_args = tcx.collect_constrained_late_bound_regions( - &inputs.map_bound(|i| i.to_owned())); - let output = bare_fn_ty.output(); - let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output); - for br in late_bound_in_ret.difference(&late_bound_in_args) { - let br_name = match *br { - ty::BrNamed(_, name) => name, - _ => { - span_bug!( - bf.decl.output.span(), - "anonymous bound region {:?} in return but not args", - br); - } - }; - struct_span_err!(tcx.sess, - ast_ty.span, - E0581, - "return type references lifetime `{}`, \ - which does not appear in the fn input types", - br_name) - .emit(); - } - tcx.mk_fn_ptr(bare_fn_ty) + tcx.mk_fn_ptr(self.ty_of_fn(bf.unsafety, bf.abi, &bf.decl)) } hir::TyTraitObject(ref bounds, ref lifetime) => { self.conv_object_ty_poly_trait_ref(ast_ty.span, bounds, lifetime) @@ -1119,11 +1095,14 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { self.associated_path_def_to_ty(ast_ty.id, ast_ty.span, ty, def, segment).0 } hir::TyArray(ref ty, length) => { - if let Ok(length) = eval_length(tcx, length, "array length") { - tcx.mk_array(self.ast_ty_to_ty(&ty), length) - } else { - self.tcx().types.err - } + let length_def_id = tcx.hir.body_owner_def_id(length); + let substs = Substs::identity_for_item(tcx, length_def_id); + let length = tcx.mk_const(ty::Const { + val: ConstVal::Unevaluated(length_def_id, substs), + ty: tcx.types.usize + }); + let array_ty = tcx.mk_ty(ty::TyArray(self.ast_ty_to_ty(&ty), length)); + self.normalize_ty(ast_ty.span, array_ty) } hir::TyTypeof(ref _e) => { struct_span_err!(tcx.sess, ast_ty.span, E0516, @@ -1145,6 +1124,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } }; + self.record_ty(ast_ty.hir_id, result_ty, ast_ty.span); result_ty } @@ -1154,8 +1134,10 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { -> Ty<'tcx> { match ty.node { - hir::TyInfer if expected_ty.is_some() => expected_ty.unwrap(), - hir::TyInfer => self.ty_infer(ty.span), + hir::TyInfer if expected_ty.is_some() => { + self.record_ty(ty.hir_id, expected_ty.unwrap(), ty.span); + expected_ty.unwrap() + } _ => self.ast_ty_to_ty(ty), } } @@ -1167,23 +1149,56 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { -> ty::PolyFnSig<'tcx> { debug!("ty_of_fn"); + let tcx = self.tcx(); let input_tys: Vec = decl.inputs.iter().map(|a| self.ty_of_arg(a, None)).collect(); let output_ty = match decl.output { hir::Return(ref output) => self.ast_ty_to_ty(output), - hir::DefaultReturn(..) => self.tcx().mk_nil(), + hir::DefaultReturn(..) => tcx.mk_nil(), }; debug!("ty_of_fn: output_ty={:?}", output_ty); - ty::Binder(self.tcx().mk_fn_sig( + let bare_fn_ty = ty::Binder(tcx.mk_fn_sig( input_tys.into_iter(), output_ty, decl.variadic, unsafety, abi - )) + )); + + // Find any late-bound regions declared in return type that do + // not appear in the arguments. These are not wellformed. + // + // Example: + // for<'a> fn() -> &'a str <-- 'a is bad + // for<'a> fn(&'a String) -> &'a str <-- 'a is ok + let inputs = bare_fn_ty.inputs(); + let late_bound_in_args = tcx.collect_constrained_late_bound_regions( + &inputs.map_bound(|i| i.to_owned())); + let output = bare_fn_ty.output(); + let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output); + for br in late_bound_in_ret.difference(&late_bound_in_args) { + let br_name = match *br { + ty::BrNamed(_, name) => name, + _ => { + span_bug!( + decl.output.span(), + "anonymous bound region {:?} in return but not args", + br); + } + }; + struct_span_err!(tcx.sess, + decl.output.span(), + E0581, + "return type references lifetime `{}`, \ + which does not appear in the fn input types", + br_name) + .emit(); + } + + bare_fn_ty } pub fn ty_of_closure(&self, @@ -1211,19 +1226,22 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { let expected_ret_ty = expected_sig.as_ref().map(|e| e.output()); - let is_infer = match decl.output { - hir::Return(ref output) if output.node == hir::TyInfer => true, - hir::DefaultReturn(..) => true, - _ => false - }; - let output_ty = match decl.output { - _ if is_infer && expected_ret_ty.is_some() => - expected_ret_ty.unwrap(), - _ if is_infer => self.ty_infer(decl.output.span()), - hir::Return(ref output) => - self.ast_ty_to_ty(&output), - hir::DefaultReturn(..) => bug!(), + hir::Return(ref output) => { + if let (&hir::TyInfer, Some(expected_ret_ty)) = (&output.node, expected_ret_ty) { + self.record_ty(output.hir_id, expected_ret_ty, output.span); + expected_ret_ty + } else { + self.ast_ty_to_ty(&output) + } + } + hir::DefaultReturn(span) => { + if let Some(expected_ret_ty) = expected_ret_ty { + expected_ret_ty + } else { + self.ty_infer(span) + } + } }; debug!("ty_of_closure: output_ty={:?}", output_ty); @@ -1292,18 +1310,19 @@ fn split_auto_traits<'a, 'b, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, Def::Trait(trait_did) => { // Checks whether `trait_did` refers to one of the builtin // traits, like `Send`, and adds it to `auto_traits` if so. - if Some(trait_did) == tcx.lang_items.send_trait() || - Some(trait_did) == tcx.lang_items.sync_trait() { + if Some(trait_did) == tcx.lang_items().send_trait() || + Some(trait_did) == tcx.lang_items().sync_trait() { let segments = &bound.trait_ref.path.segments; - let parameters = &segments[segments.len() - 1].parameters; - if !parameters.types.is_empty() { - check_type_argument_count(tcx, bound.trait_ref.path.span, - parameters.types.len(), &[]); - } - if !parameters.lifetimes.is_empty() { - report_lifetime_number_error(tcx, bound.trait_ref.path.span, - parameters.lifetimes.len(), 0); - } + segments[segments.len() - 1].with_parameters(|parameters| { + if !parameters.types.is_empty() { + check_type_argument_count(tcx, bound.trait_ref.path.span, + parameters.types.len(), &[]); + } + if !parameters.lifetimes.is_empty() { + report_lifetime_number_error(tcx, bound.trait_ref.path.span, + parameters.lifetimes.len(), 0); + } + }); true } else { false @@ -1406,7 +1425,7 @@ impl<'a, 'gcx, 'tcx> Bounds<'tcx> { // If it could be sized, and is, add the sized predicate if self.implicitly_sized { - if let Some(sized) = tcx.lang_items.sized_trait() { + if let Some(sized) = tcx.lang_items().sized_trait() { let trait_ref = ty::TraitRef { def_id: sized, substs: tcx.mk_substs_trait(param_ty, &[]) diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index b49b9377e8..ab8994bcae 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -27,25 +27,111 @@ use syntax::ptr::P; use syntax_pos::Span; impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { - pub fn check_pat(&self, pat: &'gcx hir::Pat, expected: Ty<'tcx>) { - self.check_pat_arg(pat, expected, false); - } - /// The `is_arg` argument indicates whether this pattern is the /// *outermost* pattern in an argument (e.g., in `fn foo(&x: /// &u32)`, it is true for the `&x` pattern but not `x`). This is /// used to tailor error reporting. - pub fn check_pat_arg(&self, pat: &'gcx hir::Pat, expected: Ty<'tcx>, is_arg: bool) { + pub fn check_pat_walk( + &self, + pat: &'gcx hir::Pat, + mut expected: Ty<'tcx>, + mut def_bm: ty::BindingMode, + is_arg: bool) + { let tcx = self.tcx; - debug!("check_pat(pat={:?},expected={:?},is_arg={})", pat, expected, is_arg); + debug!("check_pat_walk(pat={:?},expected={:?},def_bm={:?},is_arg={})", + pat, expected, def_bm, is_arg); + + let is_non_ref_pat = match pat.node { + PatKind::Struct(..) | + PatKind::TupleStruct(..) | + PatKind::Tuple(..) | + PatKind::Box(_) | + PatKind::Range(..) | + PatKind::Slice(..) => true, + PatKind::Lit(ref lt) => { + let ty = self.check_expr(lt); + match ty.sty { + ty::TypeVariants::TyRef(..) => false, + _ => true, + } + } + PatKind::Path(ref qpath) => { + let (def, _, _) = self.resolve_ty_and_def_ufcs(qpath, pat.id, pat.span); + match def { + Def::Const(..) | Def::AssociatedConst(..) => false, + _ => true, + } + } + PatKind::Wild | + PatKind::Binding(..) | + PatKind::Ref(..) => false, + }; + if is_non_ref_pat && tcx.sess.features.borrow().match_default_bindings { + debug!("pattern is non reference pattern"); + let mut exp_ty = self.resolve_type_vars_with_obligations(&expected); + + // Peel off as many `&` or `&mut` from the discriminant as possible. For example, + // for `match &&&mut Some(5)` the loop runs three times, aborting when it reaches + // the `Some(5)` which is not of type TyRef. + // + // For each ampersand peeled off, update the binding mode and push the original + // type into the adjustments vector. + // + // See the examples in `run-pass/match-defbm*.rs`. + let mut pat_adjustments = vec![]; + expected = loop { + debug!("inspecting {:?} with type {:?}", exp_ty, exp_ty.sty); + match exp_ty.sty { + ty::TypeVariants::TyRef(_, ty::TypeAndMut{ + ty: inner_ty, mutbl: inner_mutability, + }) => { + debug!("current discriminant is TyRef, inserting implicit deref"); + // Preserve the reference type. We'll need it later during HAIR lowering. + pat_adjustments.push(exp_ty); + + exp_ty = inner_ty; + def_bm = match def_bm { + // If default binding mode is by value, make it `ref` or `ref mut` + // (depending on whether we observe `&` or `&mut`). + ty::BindByValue(_) => + ty::BindByReference(inner_mutability), + + // Once a `ref`, always a `ref`. This is because a `& &mut` can't mutate + // the underlying value. + ty::BindByReference(hir::Mutability::MutImmutable) => + ty::BindByReference(hir::Mutability::MutImmutable), + + // When `ref mut`, stay a `ref mut` (on `&mut`) or downgrade to `ref` + // (on `&`). + ty::BindByReference(hir::Mutability::MutMutable) => + ty::BindByReference(inner_mutability), + }; + }, + _ => break exp_ty, + } + }; + if pat_adjustments.len() > 0 { + debug!("default binding mode is now {:?}", def_bm); + self.inh.tables.borrow_mut() + .pat_adjustments_mut() + .insert(pat.hir_id, pat_adjustments); + } + } + + // Lose mutability now that we know binding mode and discriminant type. + let def_bm = def_bm; + let expected = expected; let ty = match pat.node { PatKind::Wild => { expected } PatKind::Lit(ref lt) => { - let ty = self.check_expr(<); + // We've already computed the type above (when checking for a non-ref pat), so + // avoid computing it again. + let ty = self.node_ty(lt.hir_id); // Byte string patterns behave the same way as array patterns // They can denote both statically and dynamically sized byte arrays @@ -113,11 +199,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.demand_eqtype(pat.span, expected, rhs_ty); common_type } - PatKind::Binding(ba, def_id, _, ref sub) => { - // Note the binding mode in the typeck tables. For now, what we store is always - // identical to what could be scraped from the HIR, but this will change with - // default binding modes (#42640). - let bm = ty::BindingMode::convert(ba); + PatKind::Binding(ba, var_id, _, ref sub) => { + let bm = if ba == hir::BindingAnnotation::Unannotated { + def_bm + } else { + ty::BindingMode::convert(ba) + }; self.inh .tables .borrow_mut() @@ -149,26 +236,25 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // if there are multiple arms, make sure they all agree on // what the type of the binding `x` ought to be - let var_id = tcx.hir.as_local_node_id(def_id).unwrap(); if var_id != pat.id { let vt = self.local_ty(pat.span, var_id); self.demand_eqtype(pat.span, vt, typ); } if let Some(ref p) = *sub { - self.check_pat(&p, expected); + self.check_pat_walk(&p, expected, def_bm, true); } typ } PatKind::TupleStruct(ref qpath, ref subpats, ddpos) => { - self.check_pat_tuple_struct(pat, qpath, &subpats, ddpos, expected) + self.check_pat_tuple_struct(pat, qpath, &subpats, ddpos, expected, def_bm) } PatKind::Path(ref qpath) => { self.check_pat_path(pat, qpath, expected) } PatKind::Struct(ref qpath, ref fields, etc) => { - self.check_pat_struct(pat, qpath, fields, etc, expected) + self.check_pat_struct(pat, qpath, fields, etc, expected, def_bm) } PatKind::Tuple(ref elements, ddpos) => { let mut expected_len = elements.len(); @@ -189,7 +275,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let pat_ty = tcx.mk_ty(ty::TyTuple(element_tys, false)); self.demand_eqtype(pat.span, expected, pat_ty); for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { - self.check_pat(elem, &element_tys[i]); + self.check_pat_walk(elem, &element_tys[i], def_bm, true); } pat_ty } @@ -202,10 +288,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // think any errors can be introduced by using // `demand::eqtype`. self.demand_eqtype(pat.span, expected, uniq_ty); - self.check_pat(&inner, inner_ty); + self.check_pat_walk(&inner, inner_ty, def_bm, true); uniq_ty } else { - self.check_pat(&inner, tcx.types.err); + self.check_pat_walk(&inner, tcx.types.err, def_bm, true); tcx.types.err } } @@ -220,7 +306,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // can, to avoid creating needless variables. This // also helps with the bad interactions of the given // hack detailed in (*) below. - debug!("check_pat_arg: expected={:?}", expected); + debug!("check_pat_walk: expected={:?}", expected); let (rptr_ty, inner_ty) = match expected.sty { ty::TyRef(_, mt) if mt.mutbl == mutbl => { (expected, mt.ty) @@ -231,7 +317,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl }; let region = self.next_region_var(infer::PatternRegion(pat.span)); let rptr_ty = tcx.mk_ref(region, mt); - debug!("check_pat_arg: demanding {:?} = {:?}", expected, rptr_ty); + debug!("check_pat_walk: demanding {:?} = {:?}", expected, rptr_ty); let err = self.demand_eqtype_diag(pat.span, expected, rptr_ty); // Look for a case like `fn foo(&foo: u32)` and suggest @@ -254,10 +340,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }; - self.check_pat(&inner, inner_ty); + self.check_pat_walk(&inner, inner_ty, def_bm, true); rptr_ty } else { - self.check_pat(&inner, tcx.types.err); + self.check_pat_walk(&inner, tcx.types.err, def_bm, true); tcx.types.err } } @@ -265,7 +351,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let expected_ty = self.structurally_resolved_type(pat.span, expected); let (inner_ty, slice_ty) = match expected_ty.sty { ty::TyArray(inner_ty, size) => { - let min_len = before.len() + after.len(); + let size = size.val.to_const_int().unwrap().to_u64().unwrap(); + let min_len = before.len() as u64 + after.len() as u64; if slice.is_none() { if min_len != size { struct_span_err!( @@ -314,13 +401,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; for elt in before { - self.check_pat(&elt, inner_ty); + self.check_pat_walk(&elt, inner_ty, def_bm, true); } if let Some(ref slice) = *slice { - self.check_pat(&slice, slice_ty); + self.check_pat_walk(&slice, slice_ty, def_bm, true); } for elt in after { - self.check_pat(&elt, inner_ty); + self.check_pat_walk(&elt, inner_ty, def_bm, true); } expected_ty } @@ -329,7 +416,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.write_ty(pat.hir_id, ty); // (*) In most of the cases above (literals and constants being - // the exception), we relate types using strict equality, evewn + // the exception), we relate types using strict equality, even // though subtyping would be sufficient. There are a few reasons // for this, some of which are fairly subtle and which cost me // (nmatsakis) an hour or two debugging to remember, so I thought @@ -339,7 +426,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // cause some inconvenience. What we are saying is that the type // of `x` becomes *exactly* what is expected. This can cause unnecessary // errors in some cases, such as this one: - // it will cause errors in a case like this: // // ``` // fn foo<'x>(x: &'x int) { @@ -409,11 +495,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // use the *precise* type of the discriminant, *not* some supertype, as // the "discriminant type" (issue #23116). // - // FIXME(tschottdorf): don't call contains_explicit_ref_binding, which - // is problematic as the HIR is being scraped, but ref bindings may be - // implicit after #42640. We need to make sure that pat_adjustments - // (once introduced) is populated by the time we get here. - // // arielb1 [writes here in this comment thread][c] that there // is certainly *some* potential danger, e.g. for an example // like: @@ -455,7 +536,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // assert_eq!(foo.0.0, 42); // } // ``` - + // + // FIXME(tschottdorf): don't call contains_explicit_ref_binding, which + // is problematic as the HIR is being scraped, but ref bindings may be + // implicit after #42640. We need to make sure that pat_adjustments + // (once introduced) is populated by the time we get here. + // + // See #44848. let contains_ref_bindings = arms.iter() .filter_map(|a| a.contains_explicit_ref_binding()) .max_by_key(|m| match *m { @@ -495,7 +582,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let mut all_pats_diverge = Diverges::WarnedAlways; for p in &arm.pats { self.diverges.set(Diverges::Maybe); - self.check_pat(&p, discrim_ty); + self.check_pat_walk(&p, discrim_ty, + ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), true); all_pats_diverge &= self.diverges.get(); } @@ -576,14 +664,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { qpath: &hir::QPath, fields: &'gcx [Spanned], etc: bool, - expected: Ty<'tcx>) -> Ty<'tcx> + expected: Ty<'tcx>, + def_bm: ty::BindingMode) -> Ty<'tcx> { // Resolve the path and check the definition for errors. let (variant, pat_ty) = if let Some(variant_ty) = self.check_struct_path(qpath, pat.id) { variant_ty } else { for field in fields { - self.check_pat(&field.node.pat, self.tcx.types.err); + self.check_pat_walk(&field.node.pat, self.tcx.types.err, def_bm, true); } return self.tcx.types.err; }; @@ -592,7 +681,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.demand_eqtype(pat.span, expected, pat_ty); // Type check subpatterns. - self.check_struct_pat_fields(pat_ty, pat.id, pat.span, variant, fields, etc); + self.check_struct_pat_fields(pat_ty, pat.id, pat.span, variant, fields, etc, def_bm); pat_ty } @@ -637,12 +726,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { qpath: &hir::QPath, subpats: &'gcx [P], ddpos: Option, - expected: Ty<'tcx>) -> Ty<'tcx> + expected: Ty<'tcx>, + def_bm: ty::BindingMode) -> Ty<'tcx> { let tcx = self.tcx; let on_error = || { for pat in subpats { - self.check_pat(&pat, tcx.types.err); + self.check_pat_walk(&pat, tcx.types.err, def_bm, true); } }; let report_unexpected_def = |def: Def| { @@ -678,6 +768,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Replace constructor type with constructed type for tuple struct patterns. let pat_ty = pat_ty.fn_sig(tcx).output(); let pat_ty = tcx.no_late_bound_regions(&pat_ty).expect("expected fn type"); + self.demand_eqtype(pat.span, expected, pat_ty); // Type check subpatterns. @@ -689,7 +780,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) { let field_ty = self.field_ty(subpat.span, &variant.fields[i], substs); - self.check_pat(&subpat, field_ty); + self.check_pat_walk(&subpat, field_ty, def_bm, true); self.tcx.check_stability(variant.fields[i].did, pat.id, subpat.span); } @@ -715,7 +806,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { span: Span, variant: &'tcx ty::VariantDef, fields: &'gcx [Spanned], - etc: bool) { + etc: bool, + def_bm: ty::BindingMode) { let tcx = self.tcx; let (substs, kind_name) = match adt_ty.sty { @@ -772,7 +864,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }; - self.check_pat(&field.pat, field_ty); + self.check_pat_walk(&field.pat, field_ty, def_bm, true); } // Report an error if incorrect number of the fields were specified. @@ -787,11 +879,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { for field in variant.fields .iter() .filter(|field| !used_fields.contains_key(&field.name)) { - struct_span_err!(tcx.sess, span, E0027, - "pattern does not mention field `{}`", - field.name) - .span_label(span, format!("missing field `{}`", field.name)) - .emit(); + let mut diag = struct_span_err!(tcx.sess, span, E0027, + "pattern does not mention field `{}`", + field.name); + diag.span_label(span, format!("missing field `{}`", field.name)); + if variant.ctor_kind == CtorKind::Fn { + diag.note("trying to match a tuple variant with a struct variant pattern"); + } + diag.emit(); } } } diff --git a/src/librustc_typeck/check/autoderef.rs b/src/librustc_typeck/check/autoderef.rs index e0e946a9c6..a25deb7685 100644 --- a/src/librustc_typeck/check/autoderef.rs +++ b/src/librustc_typeck/check/autoderef.rs @@ -108,7 +108,7 @@ impl<'a, 'gcx, 'tcx> Autoderef<'a, 'gcx, 'tcx> { // let trait_ref = TraitRef { - def_id: match tcx.lang_items.deref_trait() { + def_id: match tcx.lang_items().deref_trait() { Some(f) => f, None => return None, }, diff --git a/src/librustc_typeck/check/callee.rs b/src/librustc_typeck/check/callee.rs index 460e2858b2..7461df8bda 100644 --- a/src/librustc_typeck/check/callee.rs +++ b/src/librustc_typeck/check/callee.rs @@ -28,7 +28,7 @@ use rustc::hir; /// to `trait_id` (this only cares about the trait, not the specific /// method that is called) pub fn check_legal_trait_for_method_call(tcx: TyCtxt, span: Span, trait_id: DefId) { - if tcx.lang_items.drop_trait() == Some(trait_id) { + if tcx.lang_items().drop_trait() == Some(trait_id) { struct_span_err!(tcx.sess, span, E0040, "explicit use of destructor method") .span_label(span, "explicit destructor calls not allowed") .emit(); @@ -38,6 +38,7 @@ pub fn check_legal_trait_for_method_call(tcx: TyCtxt, span: Span, trait_id: DefI enum CallStep<'tcx> { Builtin(Ty<'tcx>), DeferredClosure(ty::FnSig<'tcx>), + /// e.g. enum variant constructors Overloaded(MethodCallee<'tcx>), } @@ -156,9 +157,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { MethodCallee<'tcx>)> { // Try the options that are least restrictive on the caller first. for &(opt_trait_def_id, method_name, borrow) in - &[(self.tcx.lang_items.fn_trait(), Symbol::intern("call"), true), - (self.tcx.lang_items.fn_mut_trait(), Symbol::intern("call_mut"), true), - (self.tcx.lang_items.fn_once_trait(), Symbol::intern("call_once"), false)] { + &[(self.tcx.lang_items().fn_trait(), Symbol::intern("call"), true), + (self.tcx.lang_items().fn_mut_trait(), Symbol::intern("call_mut"), true), + (self.tcx.lang_items().fn_once_trait(), Symbol::intern("call_once"), false)] { let trait_def_id = match opt_trait_def_id { Some(def_id) => def_id, None => continue, @@ -226,10 +227,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } else { Def::Err }; - if def != Def::Err { - if let Some(span) = self.tcx.hir.span_if_local(def.def_id()) { - err.span_note(span, "defined here"); + let def_span = match def { + Def::Err => None, + Def::Local(id) | Def::Upvar(id, ..) => { + Some(self.tcx.hir.span(id)) } + _ => self.tcx.hir.span_if_local(def.def_id()) + }; + if let Some(span) = def_span { + err.span_note(span, "defined here"); } } diff --git a/src/librustc_typeck/check/closure.rs b/src/librustc_typeck/check/closure.rs index 61795a7e62..07159770d5 100644 --- a/src/librustc_typeck/check/closure.rs +++ b/src/librustc_typeck/check/closure.rs @@ -70,22 +70,29 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // inference phase (`upvar.rs`). let base_substs = Substs::identity_for_item(self.tcx, self.tcx.closure_base_def_id(expr_def_id)); - let closure_type = self.tcx.mk_closure(expr_def_id, - base_substs.extend_to(self.tcx, expr_def_id, + let substs = base_substs.extend_to(self.tcx, expr_def_id, |_, _| span_bug!(expr.span, "closure has region param"), |_, _| self.infcx.next_ty_var(TypeVariableOrigin::TransformedUpvar(expr.span)) - ) ); - debug!("check_closure: expr.id={:?} closure_type={:?}", expr.id, closure_type); - let fn_sig = self.liberate_late_bound_regions(expr_def_id, &sig); let fn_sig = self.inh.normalize_associated_types_in(body.value.span, body.value.id, self.param_env, &fn_sig); - check_fn(self, self.param_env, fn_sig, decl, expr.id, body); + let interior = check_fn(self, self.param_env, fn_sig, decl, expr.id, body, true).1; + + if let Some(interior) = interior { + let closure_substs = ty::ClosureSubsts { + substs: substs, + }; + return self.tcx.mk_generator(expr_def_id, closure_substs, interior); + } + + let closure_type = self.tcx.mk_closure(expr_def_id, substs); + + debug!("check_closure: expr.id={:?} closure_type={:?}", expr.id, closure_type); // Tuple up the arguments and insert the resulting function type into // the `closures` table. @@ -132,7 +139,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }) .next(); let kind = object_type.principal() - .and_then(|p| self.tcx.lang_items.fn_trait_kind(p.def_id())); + .and_then(|p| self.tcx.lang_items().fn_trait_kind(p.def_id())); (sig, kind) } ty::TyInfer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid), @@ -185,6 +192,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty::Predicate::TypeOutlives(..) => None, ty::Predicate::WellFormed(..) => None, ty::Predicate::ObjectSafe(..) => None, + ty::Predicate::ConstEvaluatable(..) => None, // NB: This predicate is created by breaking down a // `ClosureType: FnFoo()` predicate, where @@ -197,7 +205,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty::Predicate::ClosureKind(..) => None, }; opt_trait_ref.and_then(|tr| self.self_type_matches_expected_vid(tr, expected_vid)) - .and_then(|tr| self.tcx.lang_items.fn_trait_kind(tr.def_id())) + .and_then(|tr| self.tcx.lang_items().fn_trait_kind(tr.def_id())) }) .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur)))); @@ -216,7 +224,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let trait_ref = projection.to_poly_trait_ref(tcx); - if tcx.lang_items.fn_trait_kind(trait_ref.def_id()).is_none() { + if tcx.lang_items().fn_trait_kind(trait_ref.def_id()).is_none() { return None; } diff --git a/src/librustc_typeck/check/coercion.rs b/src/librustc_typeck/check/coercion.rs index e406ce845a..94422f93e5 100644 --- a/src/librustc_typeck/check/coercion.rs +++ b/src/librustc_typeck/check/coercion.rs @@ -187,7 +187,11 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { } // Consider coercing the subtype to a DST - let unsize = self.coerce_unsized(a, b); + // + // NOTE: this is wrapped in a `commit_if_ok` because it creates + // a "spurious" type variable, and we don't want to have that + // type variable in memory if the coercion fails. + let unsize = self.commit_if_ok(|_| self.coerce_unsized(a, b)); if unsize.is_ok() { debug!("coerce: unsize successful"); return unsize; @@ -438,8 +442,8 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { fn coerce_unsized(&self, source: Ty<'tcx>, target: Ty<'tcx>) -> CoerceResult<'tcx> { debug!("coerce_unsized(source={:?}, target={:?})", source, target); - let traits = (self.tcx.lang_items.unsize_trait(), - self.tcx.lang_items.coerce_unsized_trait()); + let traits = (self.tcx.lang_items().unsize_trait(), + self.tcx.lang_items().coerce_unsized_trait()); let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits { (u, cu) } else { diff --git a/src/librustc_typeck/check/compare_method.rs b/src/librustc_typeck/check/compare_method.rs index bf134f9547..b21d488861 100644 --- a/src/librustc_typeck/check/compare_method.rs +++ b/src/librustc_typeck/check/compare_method.rs @@ -11,7 +11,7 @@ use rustc::hir::{self, ImplItemKind, TraitItemKind}; use rustc::infer::{self, InferOk}; use rustc::middle::free_region::FreeRegionMap; -use rustc::middle::region::RegionMaps; +use rustc::middle::region; use rustc::ty::{self, TyCtxt}; use rustc::traits::{self, ObligationCause, ObligationCauseCode, Reveal}; use rustc::ty::error::{ExpectedFound, TypeError}; @@ -340,10 +340,12 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // region obligations that get overlooked. The right // thing to do is the code below. But we keep this old // pass around temporarily. - let region_maps = RegionMaps::new(); + let region_scope_tree = region::ScopeTree::default(); let mut free_regions = FreeRegionMap::new(); free_regions.relate_free_regions_from_predicates(¶m_env.caller_bounds); - infcx.resolve_regions_and_report_errors(impl_m.def_id, ®ion_maps, &free_regions); + infcx.resolve_regions_and_report_errors(impl_m.def_id, + ®ion_scope_tree, + &free_regions); } else { let fcx = FnCtxt::new(&inh, param_env, impl_m_node_id); fcx.regionck_item(impl_m_node_id, impl_m_span, &[]); diff --git a/src/librustc_typeck/check/demand.rs b/src/librustc_typeck/check/demand.rs index fc241c023c..7110a1ba81 100644 --- a/src/librustc_typeck/check/demand.rs +++ b/src/librustc_typeck/check/demand.rs @@ -207,7 +207,29 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { expected: Ty<'tcx>) -> Option { match (&expected.sty, &checked_ty.sty) { - (&ty::TyRef(_, _), &ty::TyRef(_, _)) => None, + (&ty::TyRef(_, exp), &ty::TyRef(_, check)) => match (&exp.ty.sty, &check.ty.sty) { + (&ty::TyStr, &ty::TyArray(arr, _)) | + (&ty::TyStr, &ty::TySlice(arr)) if arr == self.tcx.types.u8 => { + if let hir::ExprLit(_) = expr.node { + let sp = self.sess().codemap().call_span_if_macro(expr.span); + if let Ok(src) = self.tcx.sess.codemap().span_to_snippet(sp) { + return Some(format!("try `{}`", &src[1..])); + } + } + None + }, + (&ty::TyArray(arr, _), &ty::TyStr) | + (&ty::TySlice(arr), &ty::TyStr) if arr == self.tcx.types.u8 => { + if let hir::ExprLit(_) = expr.node { + let sp = self.sess().codemap().call_span_if_macro(expr.span); + if let Ok(src) = self.tcx.sess.codemap().span_to_snippet(sp) { + return Some(format!("try `b{}`", src)); + } + } + None + } + _ => None, + }, (&ty::TyRef(_, mutability), _) => { // Check if it can work when put into a ref. For example: // @@ -239,6 +261,39 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } None } + (_, &ty::TyRef(_, checked)) => { + // We have `&T`, check if what was expected was `T`. If so, + // we may want to suggest adding a `*`, or removing + // a `&`. + // + // (But, also check check the `expn_info()` to see if this is + // a macro; if so, it's hard to extract the text and make a good + // suggestion, so don't bother.) + if self.infcx.can_sub(self.param_env, checked.ty, &expected).is_ok() && + expr.span.ctxt().outer().expn_info().is_none() { + match expr.node { + // Maybe remove `&`? + hir::ExprAddrOf(_, ref expr) => { + if let Ok(code) = self.tcx.sess.codemap().span_to_snippet(expr.span) { + return Some(format!("try with `{}`", code)); + } + } + + // Maybe add `*`? Only if `T: Copy`. + _ => { + if !self.infcx.type_moves_by_default(self.param_env, + checked.ty, + expr.span) { + let sp = self.sess().codemap().call_span_if_macro(expr.span); + if let Ok(code) = self.tcx.sess.codemap().span_to_snippet(sp) { + return Some(format!("try with `*{}`", code)); + } + } + }, + } + } + None + } _ => None, } } diff --git a/src/librustc_typeck/check/dropck.rs b/src/librustc_typeck/check/dropck.rs index 72ff9eb6f5..610d07efa3 100644 --- a/src/librustc_typeck/check/dropck.rs +++ b/src/librustc_typeck/check/dropck.rs @@ -13,7 +13,7 @@ use check::regionck::RegionCtxt; use hir::def_id::DefId; use middle::free_region::FreeRegionMap; use rustc::infer::{self, InferOk}; -use rustc::middle::region::{self, RegionMaps}; +use rustc::middle::region; use rustc::ty::subst::{Subst, Substs}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::traits::{self, ObligationCause}; @@ -114,9 +114,9 @@ fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( return Err(ErrorReported); } - let region_maps = RegionMaps::new(); + let region_scope_tree = region::ScopeTree::default(); let free_regions = FreeRegionMap::new(); - infcx.resolve_regions_and_report_errors(drop_impl_did, ®ion_maps, &free_regions); + infcx.resolve_regions_and_report_errors(drop_impl_did, ®ion_scope_tree, &free_regions); Ok(()) }) } @@ -268,16 +268,16 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( /// pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>( rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>, - ty: ty::Ty<'tcx>, + ty: Ty<'tcx>, span: Span, - scope: region::CodeExtent) + scope: region::Scope) -> Result<(), ErrorReported> { debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}", ty, scope); - let parent_scope = match rcx.region_maps.opt_encl_scope(scope) { + let parent_scope = match rcx.region_scope_tree.opt_encl_scope(scope) { Some(parent_scope) => parent_scope, // If no enclosing scope, then it must be the root scope // which cannot be outlived. diff --git a/src/librustc_typeck/check/generator_interior.rs b/src/librustc_typeck/check/generator_interior.rs new file mode 100644 index 0000000000..af1297697c --- /dev/null +++ b/src/librustc_typeck/check/generator_interior.rs @@ -0,0 +1,133 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This calculates the types which has storage which lives across a suspension point in a +//! generator from the perspective of typeck. The actual types used at runtime +//! is calculated in `rustc_mir::transform::generator` and may be a subset of the +//! types computed here. + +use rustc::hir::def_id::DefId; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir::{self, Pat, PatKind, Expr}; +use rustc::middle::region; +use rustc::ty::Ty; +use std::rc::Rc; +use super::FnCtxt; +use util::nodemap::FxHashMap; + +struct InteriorVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, + types: FxHashMap, usize>, + region_scope_tree: Rc, + expr_count: usize, +} + +impl<'a, 'gcx, 'tcx> InteriorVisitor<'a, 'gcx, 'tcx> { + fn record(&mut self, ty: Ty<'tcx>, scope: Option, expr: Option<&'tcx Expr>) { + use syntax_pos::DUMMY_SP; + + let live_across_yield = scope.map_or(Some(DUMMY_SP), |s| { + self.region_scope_tree.yield_in_scope(s).and_then(|(span, expr_count)| { + // If we are recording an expression that is the last yield + // in the scope, or that has a postorder CFG index larger + // than the one of all of the yields, then its value can't + // be storage-live (and therefore live) at any of the yields. + // + // See the mega-comment at `yield_in_scope` for a proof. + if expr_count >= self.expr_count { + Some(span) + } else { + None + } + }) + }); + + if let Some(span) = live_across_yield { + let ty = self.fcx.resolve_type_vars_if_possible(&ty); + + debug!("type in expr = {:?}, scope = {:?}, type = {:?}, span = {:?}", + expr, scope, ty, span); + + // Map the type to the number of types added before it + let entries = self.types.len(); + self.types.entry(&ty).or_insert(entries); + } else { + debug!("no type in expr = {:?}, span = {:?}", expr, expr.map(|e| e.span)); + } + } +} + +pub fn resolve_interior<'a, 'gcx, 'tcx>(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, + def_id: DefId, + body_id: hir::BodyId, + witness: Ty<'tcx>) { + let body = fcx.tcx.hir.body(body_id); + let mut visitor = InteriorVisitor { + fcx, + types: FxHashMap(), + region_scope_tree: fcx.tcx.region_scope_tree(def_id), + expr_count: 0, + }; + intravisit::walk_body(&mut visitor, body); + + // Check that we visited the same amount of expressions and the RegionResolutionVisitor + let region_expr_count = visitor.region_scope_tree.body_expr_count(body_id).unwrap(); + assert_eq!(region_expr_count, visitor.expr_count); + + let mut types: Vec<_> = visitor.types.drain().collect(); + + // Sort types by insertion order + types.sort_by_key(|t| t.1); + + // Extract type components + let types: Vec<_> = types.into_iter().map(|t| t.0).collect(); + + let tuple = fcx.tcx.intern_tup(&types, false); + + debug!("Types in generator {:?}, span = {:?}", tuple, body.value.span); + + // Unify the tuple with the witness + match fcx.at(&fcx.misc(body.value.span), fcx.param_env).eq(witness, tuple) { + Ok(ok) => fcx.register_infer_ok_obligations(ok), + _ => bug!(), + } +} + +// This visitor has to have the same visit_expr calls as RegionResolutionVisitor in +// librustc/middle/region.rs since `expr_count` is compared against the results +// there. +impl<'a, 'gcx, 'tcx> Visitor<'tcx> for InteriorVisitor<'a, 'gcx, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::None + } + + fn visit_pat(&mut self, pat: &'tcx Pat) { + if let PatKind::Binding(..) = pat.node { + let scope = self.region_scope_tree.var_scope(pat.hir_id.local_id); + let ty = self.fcx.tables.borrow().pat_ty(pat); + self.record(ty, Some(scope), None); + } + + self.expr_count += 1; + + intravisit::walk_pat(self, pat); + } + + fn visit_expr(&mut self, expr: &'tcx Expr) { + intravisit::walk_expr(self, expr); + + self.expr_count += 1; + + let scope = self.region_scope_tree.temporary_scope(expr.hir_id.local_id); + + let ty = self.fcx.tables.borrow().expr_ty_adjusted(expr); + self.record(ty, scope, Some(expr)); + } +} diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index 96643ae72a..3861a358b2 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -313,6 +313,11 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, (0, vec![tcx.mk_fn_ptr(fn_ty), mut_u8, mut_u8], tcx.types.i32) } + "align_offset" => { + let ptr_ty = tcx.mk_imm_ptr(tcx.mk_nil()); + (0, vec![ptr_ty, tcx.types.usize], tcx.types.usize) + }, + ref other => { struct_span_err!(tcx.sess, it.span, E0093, "unrecognized intrinsic function: `{}`", @@ -355,7 +360,7 @@ pub fn check_platform_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, match name["simd_shuffle".len()..].parse() { Ok(n) => { let params = vec![param(0), param(0), - tcx.mk_ty(ty::TyArray(tcx.types.u32, n))]; + tcx.mk_array(tcx.types.u32, n)]; (2, params, param(1)) } Err(_) => { @@ -418,8 +423,8 @@ fn match_intrinsic_type_to_type<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, position: &str, span: Span, - structural_to_nominal: &mut FxHashMap<&'a intrinsics::Type, ty::Ty<'tcx>>, - expected: &'a intrinsics::Type, t: ty::Ty<'tcx>) + structural_to_nominal: &mut FxHashMap<&'a intrinsics::Type, Ty<'tcx>>, + expected: &'a intrinsics::Type, t: Ty<'tcx>) { use intrinsics::Type::*; diff --git a/src/librustc_typeck/check/method/confirm.rs b/src/librustc_typeck/check/method/confirm.rs index db383b6305..17ed0aaa30 100644 --- a/src/librustc_typeck/check/method/confirm.rs +++ b/src/librustc_typeck/check/method/confirm.rs @@ -16,6 +16,7 @@ use hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::traits; use rustc::ty::{self, LvaluePreference, NoPreference, PreferMutLvalue, Ty}; +use rustc::ty::subst::Subst; use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow, OverloadedDeref}; use rustc::ty::fold::TypeFoldable; use rustc::infer::{self, InferOk}; @@ -84,9 +85,6 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // Adjust the self expression the user provided and obtain the adjusted type. let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick); - // Make sure nobody calls `drop()` explicitly. - self.enforce_illegal_method_limitations(&pick); - // Create substitutions for the method's type parameters. let rcvr_substs = self.fresh_receiver_substs(self_ty, &pick); let all_substs = self.instantiate_method_substs(&pick, segment, rcvr_substs); @@ -96,6 +94,22 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // Create the final signature for the method, replacing late-bound regions. let (method_sig, method_predicates) = self.instantiate_method_sig(&pick, all_substs); + // Unify the (adjusted) self type with what the method expects. + // + // SUBTLE: if we want good error messages, because of "guessing" while matching + // traits, no trait system method can be called before this point because they + // could alter our Self-type, except for normalizing the receiver from the + // signature (which is also done during probing). + let method_sig_rcvr = + self.normalize_associated_types_in(self.span, &method_sig.inputs()[0]); + self.unify_receivers(self_ty, method_sig_rcvr); + + let (method_sig, method_predicates) = + self.normalize_associated_types_in(self.span, &(method_sig, method_predicates)); + + // Make sure nobody calls `drop()` explicitly. + self.enforce_illegal_method_limitations(&pick); + // If there is a `Self: Sized` bound and `Self` is a trait object, it is possible that // something which derefs to `Self` actually implements the trait and the caller // wanted to make a static dispatch on it but forgot to import the trait. @@ -106,9 +120,6 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // appropriate hint suggesting to import the trait. let illegal_sized_bound = self.predicates_require_illegal_sized_bound(&method_predicates); - // Unify the (adjusted) self type with what the method expects. - self.unify_receivers(self_ty, method_sig.inputs()[0]); - // Add any trait/regions obligations specified on the method's type parameters. // We won't add these if we encountered an illegal sized bound, so that we can use // a custom error in that case. @@ -232,24 +243,6 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { }) } - probe::ExtensionImplPick(impl_def_id) => { - // The method being invoked is the method as defined on the trait, - // so return the substitutions from the trait. Consider: - // - // impl Trait for Foo { ... } - // - // If we instantiate A, B, and C with $A, $B, and $C - // respectively, then we want to return the type - // parameters from the trait ([$A,$B]), not those from - // the impl ([$A,$B,$C]) not the receiver type ([$C]). - let impl_polytype = self.impl_self_ty(self.span, impl_def_id); - let impl_trait_ref = - self.instantiate_type_scheme(self.span, - impl_polytype.substs, - &self.tcx.impl_trait_ref(impl_def_id).unwrap()); - impl_trait_ref.substs - } - probe::TraitPick => { let trait_def_id = pick.item.container.id(); @@ -318,7 +311,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { if i < parent_substs.len() { parent_substs.region_at(i) } else if let Some(lifetime) - = provided.lifetimes.get(i - parent_substs.len()) { + = provided.as_ref().and_then(|p| p.lifetimes.get(i - parent_substs.len())) { AstConv::ast_region_to_region(self.fcx, lifetime, Some(def)) } else { self.region_var_for_def(self.span, def) @@ -328,7 +321,10 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { if i < parent_substs.len() { parent_substs.type_at(i) } else if let Some(ast_ty) - = provided.types.get(i - parent_substs.len() - method_generics.regions.len()) { + = provided.as_ref().and_then(|p| { + p.types.get(i - parent_substs.len() - method_generics.regions.len()) + }) + { self.to_ty(ast_ty) } else { self.type_var_for_def(self.span, def, cur_substs) @@ -353,6 +349,9 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { /////////////////////////////////////////////////////////////////////////// // + // NOTE: this returns the *unnormalized* predicates and method sig. Because of + // inference guessing, the predicates and method signature can't be normalized + // until we unify the `Self` type. fn instantiate_method_sig(&mut self, pick: &probe::Pick<'tcx>, all_substs: &'tcx Substs<'tcx>) @@ -367,8 +366,6 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { let def_id = pick.item.def_id; let method_predicates = self.tcx.predicates_of(def_id) .instantiate(self.tcx, all_substs); - let method_predicates = self.normalize_associated_types_in(self.span, - &method_predicates); debug!("method_predicates after subst = {:?}", method_predicates); @@ -384,7 +381,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { debug!("late-bound lifetimes from method instantiated, method_sig={:?}", method_sig); - let method_sig = self.instantiate_type_scheme(self.span, all_substs, &method_sig); + let method_sig = method_sig.subst(self.tcx, all_substs); debug!("type scheme substituted, method_sig={:?}", method_sig); (method_sig, method_predicates) @@ -558,7 +555,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { fn predicates_require_illegal_sized_bound(&self, predicates: &ty::InstantiatedPredicates<'tcx>) -> bool { - let sized_def_id = match self.tcx.lang_items.sized_trait() { + let sized_def_id = match self.tcx.lang_items().sized_trait() { Some(def_id) => def_id, None => return false, }; diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index 819f48a1b5..3ddeba9d44 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -15,7 +15,7 @@ use hir::def::Def; use hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::traits; -use rustc::ty::{self, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable}; +use rustc::ty::{self, Ty, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable}; use rustc::ty::subst::Subst; use rustc::infer::{self, InferOk}; @@ -52,10 +52,6 @@ pub enum MethodError<'tcx> { // Multiple methods might apply. Ambiguity(Vec), - // Using a `Fn`/`FnMut`/etc method on a raw closure type before we have inferred its kind. - ClosureAmbiguity(// DefId of fn trait - DefId), - // Found an applicable method, but it is not visible. The second argument contains a list of // not-in-scope traits which may work. PrivateMatch(Def, Vec), @@ -63,6 +59,9 @@ pub enum MethodError<'tcx> { // Found a `Self: Sized` bound where `Self` is a trait object, also the caller may have // forgotten to import a trait. IllegalSizedBound(Vec), + + // Found a match, but the return type is wrong + BadReturnType, } // Contains a list of static methods that may apply, a list of unsatisfied trait predicates which @@ -71,6 +70,7 @@ pub struct NoMatchData<'tcx> { pub static_candidates: Vec, pub unsatisfied_predicates: Vec>, pub out_of_scope_traits: Vec, + pub lev_candidate: Option, pub mode: probe::Mode, } @@ -78,12 +78,14 @@ impl<'tcx> NoMatchData<'tcx> { pub fn new(static_candidates: Vec, unsatisfied_predicates: Vec>, out_of_scope_traits: Vec, + lev_candidate: Option, mode: probe::Mode) -> Self { NoMatchData { static_candidates, unsatisfied_predicates, out_of_scope_traits, + lev_candidate, mode, } } @@ -103,7 +105,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn method_exists(&self, span: Span, method_name: ast::Name, - self_ty: ty::Ty<'tcx>, + self_ty: Ty<'tcx>, call_expr_id: ast::NodeId, allow_private: bool) -> bool { @@ -113,9 +115,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Ok(..) => true, Err(NoMatch(..)) => false, Err(Ambiguity(..)) => true, - Err(ClosureAmbiguity(..)) => true, Err(PrivateMatch(..)) => allow_private, Err(IllegalSizedBound(..)) => true, + Err(BadReturnType) => { + bug!("no return type expectations but got BadReturnType") + } + } } @@ -134,7 +139,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// * `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`) /// * `self_expr`: the self expression (`foo`) pub fn lookup_method(&self, - self_ty: ty::Ty<'tcx>, + self_ty: Ty<'tcx>, segment: &hir::PathSegment, span: Span, call_expr: &'gcx hir::Expr, @@ -204,7 +209,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn lookup_probe(&self, span: Span, method_name: ast::Name, - self_ty: ty::Ty<'tcx>, + self_ty: Ty<'tcx>, call_expr: &'gcx hir::Expr, scope: ProbeScope) -> probe::PickResult<'tcx> { @@ -227,8 +232,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { span: Span, m_name: ast::Name, trait_def_id: DefId, - self_ty: ty::Ty<'tcx>, - opt_input_types: Option<&[ty::Ty<'tcx>]>) + self_ty: Ty<'tcx>, + opt_input_types: Option<&[Ty<'tcx>]>) -> Option>> { debug!("lookup_in_trait_adjusted(self_ty={:?}, \ m_name={}, trait_def_id={:?})", @@ -345,7 +350,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn resolve_ufcs(&self, span: Span, method_name: ast::Name, - self_ty: ty::Ty<'tcx>, + self_ty: Ty<'tcx>, expr_id: ast::NodeId) -> Result> { let mode = probe::Mode::Path; @@ -368,7 +373,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// and return it, or `None`, if no such item was defined there. pub fn associated_item(&self, def_id: DefId, item_name: ast::Name) -> Option { - let ident = self.tcx.adjust(item_name, def_id, self.body_id).0; - self.tcx.associated_items(def_id).find(|item| item.name.to_ident() == ident) + self.tcx.associated_items(def_id) + .find(|item| self.tcx.hygienic_eq(item_name, item.name, def_id)) + } } diff --git a/src/librustc_typeck/check/method/probe.rs b/src/librustc_typeck/check/method/probe.rs index 096b778cab..a3b196f99d 100644 --- a/src/librustc_typeck/check/method/probe.rs +++ b/src/librustc_typeck/check/method/probe.rs @@ -18,29 +18,22 @@ use hir::def_id::DefId; use hir::def::Def; use rustc::ty::subst::{Subst, Substs}; use rustc::traits::{self, ObligationCause}; -use rustc::ty::{self, Ty, ToPolyTraitRef, TraitRef, TypeFoldable}; +use rustc::ty::{self, Ty, ToPolyTraitRef, ToPredicate, TraitRef, TypeFoldable}; use rustc::infer::type_variable::TypeVariableOrigin; use rustc::util::nodemap::FxHashSet; use rustc::infer::{self, InferOk}; use syntax::ast; +use syntax::util::lev_distance::{lev_distance, find_best_match_for_name}; use syntax_pos::Span; use rustc::hir; use std::mem; use std::ops::Deref; use std::rc::Rc; +use std::cmp::max; use self::CandidateKind::*; pub use self::PickKind::*; -pub enum LookingFor<'tcx> { - /// looking for methods with the given name; this is the normal case - MethodName(ast::Name), - - /// looking for methods that return a given type; this is used to - /// assemble suggestions - ReturnType(Ty<'tcx>), -} - /// Boolean flag used to indicate if this search is for a suggestion /// or not. If true, we can allow ambiguity and so forth. pub struct IsSuggestion(pub bool); @@ -49,9 +42,9 @@ struct ProbeContext<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, span: Span, mode: Mode, - looking_for: LookingFor<'tcx>, + method_name: Option, + return_type: Option>, steps: Rc>>, - opt_simplified_steps: Option>, inherent_candidates: Vec>, extension_candidates: Vec>, impl_dups: FxHashSet, @@ -60,6 +53,10 @@ struct ProbeContext<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { /// used for error reporting static_candidates: Vec, + /// When probing for names, include names that are close to the + /// requested name (by Levensthein distance) + allow_similar_names: bool, + /// Some(candidate) if there is a private candidate private_candidate: Option, @@ -85,6 +82,7 @@ struct CandidateStep<'tcx> { #[derive(Debug)] struct Candidate<'tcx> { xform_self_ty: Ty<'tcx>, + xform_ret_ty: Option>, item: ty::AssociatedItem, kind: CandidateKind<'tcx>, import_id: Option, @@ -95,17 +93,19 @@ enum CandidateKind<'tcx> { InherentImplCandidate(&'tcx Substs<'tcx>, // Normalize obligations Vec>), - ExtensionImplCandidate(// Impl - DefId, - &'tcx Substs<'tcx>, - // Normalize obligations - Vec>), ObjectCandidate, - TraitCandidate, + TraitCandidate(ty::TraitRef<'tcx>), WhereClauseCandidate(// Trait ty::PolyTraitRef<'tcx>), } +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +enum ProbeResult { + NoMatch, + BadReturnType, + Match, +} + #[derive(Debug, PartialEq, Eq, Clone)] pub struct Pick<'tcx> { pub item: ty::AssociatedItem, @@ -133,8 +133,6 @@ pub struct Pick<'tcx> { #[derive(Clone, Debug, PartialEq, Eq)] pub enum PickKind<'tcx> { InherentImplPick, - ExtensionImplPick(// Impl - DefId), ObjectPick, TraitPick, WhereClausePick(// Trait @@ -183,19 +181,19 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { return_type, scope_expr_id); let method_names = - self.probe_op(span, mode, LookingFor::ReturnType(return_type), IsSuggestion(true), + self.probe_op(span, mode, None, Some(return_type), IsSuggestion(true), self_ty, scope_expr_id, ProbeScope::TraitsInScope, |probe_cx| Ok(probe_cx.candidate_method_names())) .unwrap_or(vec![]); - method_names - .iter() - .flat_map(|&method_name| { - match self.probe_for_name(span, mode, method_name, IsSuggestion(true), self_ty, - scope_expr_id, ProbeScope::TraitsInScope) { - Ok(pick) => Some(pick.item), - Err(_) => None, - } - }) + method_names + .iter() + .flat_map(|&method_name| { + self.probe_op( + span, mode, Some(method_name), Some(return_type), + IsSuggestion(true), self_ty, scope_expr_id, + ProbeScope::TraitsInScope, |probe_cx| probe_cx.pick() + ).ok().map(|pick| pick.item) + }) .collect() } @@ -214,7 +212,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { scope_expr_id); self.probe_op(span, mode, - LookingFor::MethodName(item_name), + Some(item_name), + None, is_suggestion, self_ty, scope_expr_id, @@ -225,7 +224,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn probe_op(&'a self, span: Span, mode: Mode, - looking_for: LookingFor<'tcx>, + method_name: Option, + return_type: Option>, is_suggestion: IsSuggestion, self_ty: Ty<'tcx>, scope_expr_id: ast::NodeId, @@ -248,6 +248,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(), Vec::new(), Vec::new(), + None, mode))) } } @@ -259,24 +260,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }] }; - // Create a list of simplified self types, if we can. - let mut simplified_steps = Vec::new(); - for step in &steps { - match ty::fast_reject::simplify_type(self.tcx, step.self_ty, true) { - None => { - break; - } - Some(simplified_type) => { - simplified_steps.push(simplified_type); - } - } - } - let opt_simplified_steps = if simplified_steps.len() < steps.len() { - None // failed to convert at least one of the steps - } else { - Some(simplified_steps) - }; - debug!("ProbeContext: steps for self_ty={:?} are {:?}", self_ty, steps); @@ -285,8 +268,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // that we create during the probe process are removed later self.probe(|_| { let mut probe_cx = - ProbeContext::new(self, span, mode, looking_for, - steps, opt_simplified_steps); + ProbeContext::new(self, span, mode, method_name, return_type, Rc::new(steps)); probe_cx.assemble_inherent_candidates(); match scope { @@ -356,21 +338,22 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, span: Span, mode: Mode, - looking_for: LookingFor<'tcx>, - steps: Vec>, - opt_simplified_steps: Option>) + method_name: Option, + return_type: Option>, + steps: Rc>>) -> ProbeContext<'a, 'gcx, 'tcx> { ProbeContext { fcx, span, mode, - looking_for, + method_name, + return_type, inherent_candidates: Vec::new(), extension_candidates: Vec::new(), impl_dups: FxHashSet(), - steps: Rc::new(steps), - opt_simplified_steps, + steps: steps, static_candidates: Vec::new(), + allow_similar_names: false, private_candidate: None, unsatisfied_predicates: Vec::new(), } @@ -387,33 +370,25 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { /////////////////////////////////////////////////////////////////////////// // CANDIDATE ASSEMBLY - fn push_inherent_candidate(&mut self, xform_self_ty: Ty<'tcx>, item: ty::AssociatedItem, - kind: CandidateKind<'tcx>, import_id: Option) { - let is_accessible = if let LookingFor::MethodName(name) = self.looking_for { - let def_scope = self.tcx.adjust(name, item.container.id(), self.body_id).1; - item.vis.is_accessible_from(def_scope, self.tcx) - } else { - true - }; - if is_accessible { - self.inherent_candidates.push(Candidate { xform_self_ty, item, kind, import_id }); - } else if self.private_candidate.is_none() { - self.private_candidate = Some(item.def()); - } - } - - fn push_extension_candidate(&mut self, xform_self_ty: Ty<'tcx>, item: ty::AssociatedItem, - kind: CandidateKind<'tcx>, import_id: Option) { - let is_accessible = if let LookingFor::MethodName(name) = self.looking_for { + fn push_candidate(&mut self, + candidate: Candidate<'tcx>, + is_inherent: bool) + { + let is_accessible = if let Some(name) = self.method_name { + let item = candidate.item; let def_scope = self.tcx.adjust(name, item.container.id(), self.body_id).1; item.vis.is_accessible_from(def_scope, self.tcx) } else { true }; if is_accessible { - self.extension_candidates.push(Candidate { xform_self_ty, item, kind, import_id }); + if is_inherent { + self.inherent_candidates.push(candidate); + } else { + self.extension_candidates.push(candidate); + } } else if self.private_candidate.is_none() { - self.private_candidate = Some(item.def()); + self.private_candidate = Some(candidate.item.def()); } } @@ -426,6 +401,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn assemble_probe(&mut self, self_ty: Ty<'tcx>) { debug!("assemble_probe: self_ty={:?}", self_ty); + let lang_items = self.tcx.lang_items(); match self_ty.sty { ty::TyDynamic(ref data, ..) => { @@ -441,79 +417,79 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { self.assemble_inherent_candidates_from_param(self_ty, p); } ty::TyChar => { - let lang_def_id = self.tcx.lang_items.char_impl(); + let lang_def_id = lang_items.char_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyStr => { - let lang_def_id = self.tcx.lang_items.str_impl(); + let lang_def_id = lang_items.str_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TySlice(_) => { - let lang_def_id = self.tcx.lang_items.slice_impl(); + let lang_def_id = lang_items.slice_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { - let lang_def_id = self.tcx.lang_items.const_ptr_impl(); + let lang_def_id = lang_items.const_ptr_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { - let lang_def_id = self.tcx.lang_items.mut_ptr_impl(); + let lang_def_id = lang_items.mut_ptr_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyInt(ast::IntTy::I8) => { - let lang_def_id = self.tcx.lang_items.i8_impl(); + let lang_def_id = lang_items.i8_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyInt(ast::IntTy::I16) => { - let lang_def_id = self.tcx.lang_items.i16_impl(); + let lang_def_id = lang_items.i16_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyInt(ast::IntTy::I32) => { - let lang_def_id = self.tcx.lang_items.i32_impl(); + let lang_def_id = lang_items.i32_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyInt(ast::IntTy::I64) => { - let lang_def_id = self.tcx.lang_items.i64_impl(); + let lang_def_id = lang_items.i64_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyInt(ast::IntTy::I128) => { - let lang_def_id = self.tcx.lang_items.i128_impl(); + let lang_def_id = lang_items.i128_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyInt(ast::IntTy::Is) => { - let lang_def_id = self.tcx.lang_items.isize_impl(); + let lang_def_id = lang_items.isize_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyUint(ast::UintTy::U8) => { - let lang_def_id = self.tcx.lang_items.u8_impl(); + let lang_def_id = lang_items.u8_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyUint(ast::UintTy::U16) => { - let lang_def_id = self.tcx.lang_items.u16_impl(); + let lang_def_id = lang_items.u16_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyUint(ast::UintTy::U32) => { - let lang_def_id = self.tcx.lang_items.u32_impl(); + let lang_def_id = lang_items.u32_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyUint(ast::UintTy::U64) => { - let lang_def_id = self.tcx.lang_items.u64_impl(); + let lang_def_id = lang_items.u64_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyUint(ast::UintTy::U128) => { - let lang_def_id = self.tcx.lang_items.u128_impl(); + let lang_def_id = lang_items.u128_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyUint(ast::UintTy::Us) => { - let lang_def_id = self.tcx.lang_items.usize_impl(); + let lang_def_id = lang_items.usize_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyFloat(ast::FloatTy::F32) => { - let lang_def_id = self.tcx.lang_items.f32_impl(); + let lang_def_id = lang_items.f32_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyFloat(ast::FloatTy::F64) => { - let lang_def_id = self.tcx.lang_items.f64_impl(); + let lang_def_id = lang_items.f64_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } _ => {} @@ -551,19 +527,22 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { let impl_ty = impl_ty.subst(self.tcx, impl_substs); // Determine the receiver type that the method itself expects. - let xform_self_ty = self.xform_self_ty(&item, impl_ty, impl_substs); + let xform_tys = self.xform_self_ty(&item, impl_ty, impl_substs); // We can't use normalize_associated_types_in as it will pollute the // fcx's fulfillment context after this probe is over. let cause = traits::ObligationCause::misc(self.span, self.body_id); let selcx = &mut traits::SelectionContext::new(self.fcx); - let traits::Normalized { value: xform_self_ty, obligations } = - traits::normalize(selcx, self.param_env, cause, &xform_self_ty); - debug!("assemble_inherent_impl_probe: xform_self_ty = {:?}", - xform_self_ty); - - self.push_inherent_candidate(xform_self_ty, item, - InherentImplCandidate(impl_substs, obligations), None); + let traits::Normalized { value: (xform_self_ty, xform_ret_ty), obligations } = + traits::normalize(selcx, self.param_env, cause, &xform_tys); + debug!("assemble_inherent_impl_probe: xform_self_ty = {:?}/{:?}", + xform_self_ty, xform_ret_ty); + + self.push_candidate(Candidate { + xform_self_ty, xform_ret_ty, item, + kind: InherentImplCandidate(impl_substs, obligations), + import_id: None + }, true); } } @@ -584,10 +563,13 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { self.elaborate_bounds(&[trait_ref], |this, new_trait_ref, item| { let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref); - let xform_self_ty = + let (xform_self_ty, xform_ret_ty) = this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs); - - this.push_inherent_candidate(xform_self_ty, item, ObjectCandidate, None); + this.push_candidate(Candidate { + xform_self_ty, xform_ret_ty, item, + kind: ObjectCandidate, + import_id: None + }, true); }); } @@ -616,7 +598,8 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | ty::Predicate::ClosureKind(..) | - ty::Predicate::TypeOutlives(..) => None, + ty::Predicate::TypeOutlives(..) | + ty::Predicate::ConstEvaluatable(..) => None, } }) .collect(); @@ -624,7 +607,8 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { self.elaborate_bounds(&bounds, |this, poly_trait_ref, item| { let trait_ref = this.erase_late_bound_regions(&poly_trait_ref); - let xform_self_ty = this.xform_self_ty(&item, trait_ref.self_ty(), trait_ref.substs); + let (xform_self_ty, xform_ret_ty) = + this.xform_self_ty(&item, trait_ref.self_ty(), trait_ref.substs); // Because this trait derives from a where-clause, it // should not contain any inference variables or other @@ -633,8 +617,11 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // `WhereClausePick`. assert!(!trait_ref.substs.needs_infer()); - this.push_inherent_candidate(xform_self_ty, item, - WhereClauseCandidate(poly_trait_ref), None); + this.push_candidate(Candidate { + xform_self_ty, xform_ret_ty, item, + kind: WhereClauseCandidate(poly_trait_ref), + import_id: None + }, true); }); } @@ -662,10 +649,14 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn assemble_extension_candidates_for_traits_in_scope(&mut self, expr_id: ast::NodeId) -> Result<(), MethodError<'tcx>> { + if expr_id == ast::DUMMY_NODE_ID { + return Ok(()) + } let mut duplicates = FxHashSet(); - let opt_applicable_traits = self.tcx.trait_map.get(&expr_id); + let expr_hir_id = self.tcx.hir.node_to_hir_id(expr_id); + let opt_applicable_traits = self.tcx.in_scope_traits(expr_hir_id); if let Some(applicable_traits) = opt_applicable_traits { - for trait_candidate in applicable_traits { + for trait_candidate in applicable_traits.iter() { let trait_did = trait_candidate.def_id; if duplicates.insert(trait_did) { let import_id = trait_candidate.import_id; @@ -687,17 +678,27 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { Ok(()) } - pub fn matches_return_type(&self, method: &ty::AssociatedItem, - expected: ty::Ty<'tcx>) -> bool { + pub fn matches_return_type(&self, + method: &ty::AssociatedItem, + self_ty: Option>, + expected: Ty<'tcx>) -> bool { match method.def() { Def::Method(def_id) => { let fty = self.tcx.fn_sig(def_id); self.probe(|_| { let substs = self.fresh_substs_for_item(self.span, method.def_id); - let output = fty.output().subst(self.tcx, substs); - let (output, _) = self.replace_late_bound_regions_with_fresh_var( - self.span, infer::FnCall, &output); - self.can_sub(self.param_env, output, expected).is_ok() + let fty = fty.subst(self.tcx, substs); + let (fty, _) = self.replace_late_bound_regions_with_fresh_var( + self.span, infer::FnCall, &fty); + + if let Some(self_ty) = self_ty { + if let Err(_) = self.at(&ObligationCause::dummy(), self.param_env) + .sup(fty.inputs()[0], self_ty) + { + return false + } + } + self.can_sub(self.param_env, fty.output(), expected).is_ok() }) } _ => false, @@ -710,6 +711,8 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { -> Result<(), MethodError<'tcx>> { debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})", trait_def_id); + let trait_substs = self.fresh_item_substs(trait_def_id); + let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs); for item in self.impl_or_trait_item(trait_def_id) { // Check whether `trait_def_id` defines a method with suitable name: @@ -719,282 +722,31 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { continue; } - self.assemble_builtin_candidates(import_id, trait_def_id, item.clone()); - - self.assemble_extension_candidates_for_trait_impls(import_id, trait_def_id, - item.clone()); - - self.assemble_closure_candidates(import_id, trait_def_id, item.clone())?; - - self.assemble_projection_candidates(import_id, trait_def_id, item.clone()); - - self.assemble_where_clause_candidates(import_id, trait_def_id, item.clone()); + let (xform_self_ty, xform_ret_ty) = + self.xform_self_ty(&item, trait_ref.self_ty(), trait_substs); + self.push_candidate(Candidate { + xform_self_ty, xform_ret_ty, item, import_id, + kind: TraitCandidate(trait_ref), + }, false); } - Ok(()) } - fn assemble_builtin_candidates(&mut self, - import_id: Option, - trait_def_id: DefId, - item: ty::AssociatedItem) { - if Some(trait_def_id) == self.tcx.lang_items.clone_trait() { - self.assemble_builtin_clone_candidates(import_id, trait_def_id, item); - } - } - - fn assemble_builtin_clone_candidates(&mut self, - import_id: Option, - trait_def_id: DefId, - item: ty::AssociatedItem) { - for step in Rc::clone(&self.steps).iter() { - match step.self_ty.sty { - ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | - ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | - ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar | - ty::TyRawPtr(..) | ty::TyError | ty::TyNever | - ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) | - ty::TyArray(..) | ty::TyTuple(..) => { - () - } - - _ => continue, - }; - - let substs = Substs::for_item(self.tcx, - trait_def_id, - |def, _| self.region_var_for_def(self.span, def), - |def, substs| { - if def.index == 0 { - step.self_ty - } else { - self.type_var_for_def(self.span, def, substs) - } - }); - - let xform_self_ty = self.xform_self_ty(&item, step.self_ty, substs); - self.push_inherent_candidate(xform_self_ty, item, TraitCandidate, import_id); - } - } - - fn assemble_extension_candidates_for_trait_impls(&mut self, - import_id: Option, - trait_def_id: DefId, - item: ty::AssociatedItem) { - // FIXME(arielb1): can we use for_each_relevant_impl here? - self.tcx.for_each_impl(trait_def_id, |impl_def_id| { - debug!("assemble_extension_candidates_for_trait_impl: trait_def_id={:?} \ - impl_def_id={:?}", - trait_def_id, - impl_def_id); - - if !self.impl_can_possibly_match(impl_def_id) { - return; - } - - let (_, impl_substs) = self.impl_ty_and_substs(impl_def_id); - - debug!("impl_substs={:?}", impl_substs); - - let impl_trait_ref = self.tcx.impl_trait_ref(impl_def_id) - .unwrap() // we know this is a trait impl - .subst(self.tcx, impl_substs); - - debug!("impl_trait_ref={:?}", impl_trait_ref); - - // Determine the receiver type that the method itself expects. - let xform_self_ty = - self.xform_self_ty(&item, impl_trait_ref.self_ty(), impl_trait_ref.substs); - - // Normalize the receiver. We can't use normalize_associated_types_in - // as it will pollute the fcx's fulfillment context after this probe - // is over. - let cause = traits::ObligationCause::misc(self.span, self.body_id); - let selcx = &mut traits::SelectionContext::new(self.fcx); - let traits::Normalized { value: xform_self_ty, obligations } = - traits::normalize(selcx, self.param_env, cause, &xform_self_ty); - - debug!("xform_self_ty={:?}", xform_self_ty); - - self.push_extension_candidate(xform_self_ty, item, - ExtensionImplCandidate(impl_def_id, impl_substs, obligations), import_id); - }); - } - - fn impl_can_possibly_match(&self, impl_def_id: DefId) -> bool { - let simplified_steps = match self.opt_simplified_steps { - Some(ref simplified_steps) => simplified_steps, - None => { - return true; - } - }; - - let impl_type = self.tcx.type_of(impl_def_id); - let impl_simplified_type = - match ty::fast_reject::simplify_type(self.tcx, impl_type, false) { - Some(simplified_type) => simplified_type, - None => { - return true; - } - }; - - simplified_steps.contains(&impl_simplified_type) - } - - fn assemble_closure_candidates(&mut self, - import_id: Option, - trait_def_id: DefId, - item: ty::AssociatedItem) - -> Result<(), MethodError<'tcx>> { - // Check if this is one of the Fn,FnMut,FnOnce traits. - let tcx = self.tcx; - let kind = if Some(trait_def_id) == tcx.lang_items.fn_trait() { - ty::ClosureKind::Fn - } else if Some(trait_def_id) == tcx.lang_items.fn_mut_trait() { - ty::ClosureKind::FnMut - } else if Some(trait_def_id) == tcx.lang_items.fn_once_trait() { - ty::ClosureKind::FnOnce - } else { - return Ok(()); - }; - - // Check if there is an unboxed-closure self-type in the list of receivers. - // If so, add "synthetic impls". - let steps = self.steps.clone(); - for step in steps.iter() { - let closure_id = match step.self_ty.sty { - ty::TyClosure(def_id, _) => { - if let Some(id) = self.tcx.hir.as_local_node_id(def_id) { - self.tcx.hir.node_to_hir_id(id) - } else { - continue; - } - } - _ => continue, - }; - - let closure_kind = { - match self.tables.borrow().closure_kinds().get(closure_id) { - Some(&(k, _)) => k, - None => { - return Err(MethodError::ClosureAmbiguity(trait_def_id)); - } - } - }; - - // this closure doesn't implement the right kind of `Fn` trait - if !closure_kind.extends(kind) { - continue; - } - - // create some substitutions for the argument/return type; - // for the purposes of our method lookup, we only take - // receiver type into account, so we can just substitute - // fresh types here to use during substitution and subtyping. - let substs = Substs::for_item(self.tcx, - trait_def_id, - |def, _| self.region_var_for_def(self.span, def), - |def, substs| { - if def.index == 0 { - step.self_ty - } else { - self.type_var_for_def(self.span, def, substs) - } - }); - - let xform_self_ty = self.xform_self_ty(&item, step.self_ty, substs); - self.push_inherent_candidate(xform_self_ty, item, TraitCandidate, import_id); - } - - Ok(()) - } - - fn assemble_projection_candidates(&mut self, - import_id: Option, - trait_def_id: DefId, - item: ty::AssociatedItem) { - debug!("assemble_projection_candidates(\ - trait_def_id={:?}, \ - item={:?})", - trait_def_id, - item); - - for step in Rc::clone(&self.steps).iter() { - debug!("assemble_projection_candidates: step={:?}", step); - - let (def_id, substs) = match step.self_ty.sty { - ty::TyProjection(ref data) => { - let trait_ref = data.trait_ref(self.tcx); - (trait_ref.def_id, trait_ref.substs) - }, - ty::TyAnon(def_id, substs) => (def_id, substs), - _ => continue, - }; - - debug!("assemble_projection_candidates: def_id={:?} substs={:?}", - def_id, - substs); - - let trait_predicates = self.tcx.predicates_of(def_id); - let bounds = trait_predicates.instantiate(self.tcx, substs); - let predicates = bounds.predicates; - debug!("assemble_projection_candidates: predicates={:?}", - predicates); - for poly_bound in traits::elaborate_predicates(self.tcx, predicates) - .filter_map(|p| p.to_opt_poly_trait_ref()) - .filter(|b| b.def_id() == trait_def_id) { - let bound = self.erase_late_bound_regions(&poly_bound); - - debug!("assemble_projection_candidates: def_id={:?} substs={:?} bound={:?}", - def_id, - substs, - bound); - - if self.can_eq(self.param_env, step.self_ty, bound.self_ty()).is_ok() { - let xform_self_ty = self.xform_self_ty(&item, bound.self_ty(), bound.substs); - - debug!("assemble_projection_candidates: bound={:?} xform_self_ty={:?}", - bound, - xform_self_ty); - - self.push_extension_candidate(xform_self_ty, item, TraitCandidate, import_id); - } - } - } - } - - fn assemble_where_clause_candidates(&mut self, - import_id: Option, - trait_def_id: DefId, - item: ty::AssociatedItem) { - debug!("assemble_where_clause_candidates(trait_def_id={:?})", - trait_def_id); - - let caller_predicates = self.param_env.caller_bounds.to_vec(); - for poly_bound in traits::elaborate_predicates(self.tcx, caller_predicates) - .filter_map(|p| p.to_opt_poly_trait_ref()) - .filter(|b| b.def_id() == trait_def_id) { - let bound = self.erase_late_bound_regions(&poly_bound); - let xform_self_ty = self.xform_self_ty(&item, bound.self_ty(), bound.substs); - - debug!("assemble_where_clause_candidates: bound={:?} xform_self_ty={:?}", - bound, - xform_self_ty); - - self.push_extension_candidate(xform_self_ty, item, - WhereClauseCandidate(poly_bound), import_id); - } - } - fn candidate_method_names(&self) -> Vec { let mut set = FxHashSet(); - let mut names: Vec<_> = - self.inherent_candidates - .iter() - .chain(&self.extension_candidates) - .map(|candidate| candidate.item.name) - .filter(|&name| set.insert(name)) - .collect(); + let mut names: Vec<_> = self.inherent_candidates + .iter() + .chain(&self.extension_candidates) + .filter(|candidate| { + if let Some(return_ty) = self.return_type { + self.matches_return_type(&candidate.item, None, return_ty) + } else { + true + } + }) + .map(|candidate| candidate.item.name) + .filter(|&name| set.insert(name)) + .collect(); // sort them by the name so we have a stable result names.sort_by_key(|n| n.as_str()); @@ -1005,10 +757,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // THE ACTUAL SEARCH fn pick(mut self) -> PickResult<'tcx> { - assert!(match self.looking_for { - LookingFor::MethodName(_) => true, - LookingFor::ReturnType(_) => false, - }); + assert!(self.method_name.is_some()); if let Some(r) = self.pick_core() { return r; @@ -1051,20 +800,18 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { assert!(others.is_empty()); vec![] } - Some(Err(MethodError::ClosureAmbiguity(..))) => { - // this error only occurs when assembling candidates - span_bug!(span, "encountered ClosureAmbiguity from pick_core"); - } _ => vec![], }; if let Some(def) = private_candidate { return Err(MethodError::PrivateMatch(def, out_of_scope_traits)); } + let lev_candidate = self.probe_for_lev_candidate()?; Err(MethodError::NoMatch(NoMatchData::new(static_candidates, unsatisfied_predicates, out_of_scope_traits, + lev_candidate, self.mode))) } @@ -1072,21 +819,17 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { let steps = self.steps.clone(); // find the first step that works - steps.iter().filter_map(|step| self.pick_step(step)).next() - } - - fn pick_step(&mut self, step: &CandidateStep<'tcx>) -> Option> { - debug!("pick_step: step={:?}", step); - - if step.self_ty.references_error() { - return None; - } - - if let Some(result) = self.pick_by_value_method(step) { - return Some(result); - } - - self.pick_autorefd_method(step) + steps + .iter() + .filter(|step| { + debug!("pick_core: step={:?}", step); + !step.self_ty.references_error() + }).flat_map(|step| { + self.pick_by_value_method(step).or_else(|| { + self.pick_autorefd_method(step, hir::MutImmutable).or_else(|| { + self.pick_autorefd_method(step, hir::MutMutable) + })})}) + .next() } fn pick_by_value_method(&mut self, step: &CandidateStep<'tcx>) -> Option> { @@ -1117,36 +860,30 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { }) } - fn pick_autorefd_method(&mut self, step: &CandidateStep<'tcx>) -> Option> { + fn pick_autorefd_method(&mut self, step: &CandidateStep<'tcx>, mutbl: hir::Mutability) + -> Option> { let tcx = self.tcx; // In general, during probing we erase regions. See // `impl_self_ty()` for an explanation. let region = tcx.types.re_erased; - // Search through mutabilities in order to find one where pick works: - [hir::MutImmutable, hir::MutMutable] - .iter() - .filter_map(|&m| { - let autoref_ty = tcx.mk_ref(region, - ty::TypeAndMut { - ty: step.self_ty, - mutbl: m, - }); - self.pick_method(autoref_ty).map(|r| { - r.map(|mut pick| { - pick.autoderefs = step.autoderefs; - pick.autoref = Some(m); - pick.unsize = if step.unsize { - Some(step.self_ty) - } else { - None - }; - pick - }) - }) + let autoref_ty = tcx.mk_ref(region, + ty::TypeAndMut { + ty: step.self_ty, mutbl + }); + self.pick_method(autoref_ty).map(|r| { + r.map(|mut pick| { + pick.autoderefs = step.autoderefs; + pick.autoref = Some(mutbl); + pick.unsize = if step.unsize { + Some(step.self_ty) + } else { + None + }; + pick }) - .nth(0) + }) } fn pick_method(&mut self, self_ty: Ty<'tcx>) -> Option> { @@ -1177,33 +914,75 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { possibly_unsatisfied_predicates: &mut Vec>) -> Option> { let mut applicable_candidates: Vec<_> = probes.iter() - .filter(|&probe| self.consider_probe(self_ty, probe, possibly_unsatisfied_predicates)) + .map(|probe| { + (probe, self.consider_probe(self_ty, probe, possibly_unsatisfied_predicates)) + }) + .filter(|&(_, status)| status != ProbeResult::NoMatch) .collect(); debug!("applicable_candidates: {:?}", applicable_candidates); if applicable_candidates.len() > 1 { - match self.collapse_candidates_to_trait_pick(&applicable_candidates[..]) { - Some(pick) => { - return Some(Ok(pick)); - } - None => {} + if let Some(pick) = self.collapse_candidates_to_trait_pick(&applicable_candidates[..]) { + return Some(Ok(pick)); } } if applicable_candidates.len() > 1 { - let sources = probes.iter().map(|p| p.to_source()).collect(); + let sources = probes.iter() + .map(|p| self.candidate_source(p, self_ty)) + .collect(); return Some(Err(MethodError::Ambiguity(sources))); } - applicable_candidates.pop().map(|probe| Ok(probe.to_unadjusted_pick())) + applicable_candidates.pop().map(|(probe, status)| { + if status == ProbeResult::Match { + Ok(probe.to_unadjusted_pick()) + } else { + Err(MethodError::BadReturnType) + } + }) + } + + fn select_trait_candidate(&self, trait_ref: ty::TraitRef<'tcx>) + -> traits::SelectionResult<'tcx, traits::Selection<'tcx>> + { + let cause = traits::ObligationCause::misc(self.span, self.body_id); + let predicate = + trait_ref.to_poly_trait_ref().to_poly_trait_predicate(); + let obligation = traits::Obligation::new(cause, self.param_env, predicate); + traits::SelectionContext::new(self).select(&obligation) + } + + fn candidate_source(&self, candidate: &Candidate<'tcx>, self_ty: Ty<'tcx>) + -> CandidateSource + { + match candidate.kind { + InherentImplCandidate(..) => ImplSource(candidate.item.container.id()), + ObjectCandidate | + WhereClauseCandidate(_) => TraitSource(candidate.item.container.id()), + TraitCandidate(trait_ref) => self.probe(|_| { + let _ = self.at(&ObligationCause::dummy(), self.param_env) + .sup(candidate.xform_self_ty, self_ty); + match self.select_trait_candidate(trait_ref) { + Ok(Some(traits::Vtable::VtableImpl(ref impl_data))) => { + // If only a single impl matches, make the error message point + // to that impl. + ImplSource(impl_data.impl_def_id) + } + _ => { + TraitSource(candidate.item.container.id()) + } + } + }) + } } fn consider_probe(&self, self_ty: Ty<'tcx>, probe: &Candidate<'tcx>, possibly_unsatisfied_predicates: &mut Vec>) - -> bool { + -> ProbeResult { debug!("consider_probe: self_ty={:?} probe={:?}", self_ty, probe); self.probe(|_| { @@ -1213,60 +992,102 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { Ok(InferOk { obligations, value: () }) => obligations, Err(_) => { debug!("--> cannot relate self-types"); - return false; + return ProbeResult::NoMatch; } }; + let mut result = ProbeResult::Match; + let selcx = &mut traits::SelectionContext::new(self); + let cause = traits::ObligationCause::misc(self.span, self.body_id); + // If so, impls may carry other conditions (e.g., where // clauses) that must be considered. Make sure that those // match as well (or at least may match, sometimes we // don't have enough information to fully evaluate). - let (impl_def_id, substs, ref_obligations) = match probe.kind { + let candidate_obligations : Vec<_> = match probe.kind { InherentImplCandidate(ref substs, ref ref_obligations) => { - (probe.item.container.id(), substs, ref_obligations) - } - - ExtensionImplCandidate(impl_def_id, ref substs, ref ref_obligations) => { - (impl_def_id, substs, ref_obligations) + // Check whether the impl imposes obligations we have to worry about. + let impl_def_id = probe.item.container.id(); + let impl_bounds = self.tcx.predicates_of(impl_def_id); + let impl_bounds = impl_bounds.instantiate(self.tcx, substs); + let traits::Normalized { value: impl_bounds, obligations: norm_obligations } = + traits::normalize(selcx, self.param_env, cause.clone(), &impl_bounds); + + // Convert the bounds into obligations. + let impl_obligations = traits::predicates_for_generics( + cause.clone(), self.param_env, &impl_bounds); + + debug!("impl_obligations={:?}", impl_obligations); + impl_obligations.into_iter() + .chain(norm_obligations.into_iter()) + .chain(ref_obligations.iter().cloned()) + .collect() } ObjectCandidate | - TraitCandidate | WhereClauseCandidate(..) => { // These have no additional conditions to check. - return true; + vec![] } - }; - - let selcx = &mut traits::SelectionContext::new(self); - let cause = traits::ObligationCause::misc(self.span, self.body_id); - // Check whether the impl imposes obligations we have to worry about. - let impl_bounds = self.tcx.predicates_of(impl_def_id); - let impl_bounds = impl_bounds.instantiate(self.tcx, substs); - let traits::Normalized { value: impl_bounds, obligations: norm_obligations } = - traits::normalize(selcx, self.param_env, cause.clone(), &impl_bounds); + TraitCandidate(trait_ref) => { + let predicate = trait_ref.to_predicate(); + let obligation = + traits::Obligation::new(cause.clone(), self.param_env, predicate); + if !selcx.evaluate_obligation(&obligation) { + if self.probe(|_| self.select_trait_candidate(trait_ref).is_err()) { + // This candidate's primary obligation doesn't even + // select - don't bother registering anything in + // `potentially_unsatisfied_predicates`. + return ProbeResult::NoMatch; + } else { + // Some nested subobligation of this predicate + // failed. + // + // FIXME: try to find the exact nested subobligation + // and point at it rather than reporting the entire + // trait-ref? + result = ProbeResult::NoMatch; + let trait_ref = self.resolve_type_vars_if_possible(&trait_ref); + possibly_unsatisfied_predicates.push(trait_ref); + } + } + vec![] + } + }; - // Convert the bounds into obligations. - let obligations = traits::predicates_for_generics(cause.clone(), - self.param_env, - &impl_bounds); - debug!("impl_obligations={:?}", obligations); + debug!("consider_probe - candidate_obligations={:?} sub_obligations={:?}", + candidate_obligations, sub_obligations); // Evaluate those obligations to see if they might possibly hold. - let mut all_true = true; - for o in obligations.iter() - .chain(sub_obligations.iter()) - .chain(norm_obligations.iter()) - .chain(ref_obligations.iter()) { - if !selcx.evaluate_obligation(o) { - all_true = false; + for o in candidate_obligations.into_iter().chain(sub_obligations) { + let o = self.resolve_type_vars_if_possible(&o); + if !selcx.evaluate_obligation(&o) { + result = ProbeResult::NoMatch; if let &ty::Predicate::Trait(ref pred) = &o.predicate { possibly_unsatisfied_predicates.push(pred.0.trait_ref); } } } - all_true + + if let ProbeResult::Match = result { + if let (Some(return_ty), Some(xform_ret_ty)) = + (self.return_type, probe.xform_ret_ty) + { + let xform_ret_ty = self.resolve_type_vars_if_possible(&xform_ret_ty); + debug!("comparing return_ty {:?} with xform ret ty {:?}", + return_ty, + probe.xform_ret_ty); + if self.at(&ObligationCause::dummy(), self.param_env) + .sup(return_ty, xform_ret_ty) + .is_err() + { + return ProbeResult::BadReturnType; + } + } + } + + result }) } @@ -1287,28 +1108,79 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { /// /// Now imagine the receiver is `Vec<_>`. It doesn't really matter at this time which impl we /// use, so it's ok to just commit to "using the method from the trait Foo". - fn collapse_candidates_to_trait_pick(&self, probes: &[&Candidate<'tcx>]) -> Option> { + fn collapse_candidates_to_trait_pick(&self, probes: &[(&Candidate<'tcx>, ProbeResult)]) + -> Option> + { // Do all probes correspond to the same trait? - let container = probes[0].item.container; + let container = probes[0].0.item.container; match container { ty::TraitContainer(_) => {} ty::ImplContainer(_) => return None, } - if probes[1..].iter().any(|p| p.item.container != container) { + if probes[1..].iter().any(|&(p, _)| p.item.container != container) { return None; } + // FIXME: check the return type here somehow. // If so, just use this trait and call it a day. Some(Pick { - item: probes[0].item.clone(), + item: probes[0].0.item.clone(), kind: TraitPick, - import_id: probes[0].import_id, + import_id: probes[0].0.import_id, autoderefs: 0, autoref: None, unsize: None, }) } + /// Similarly to `probe_for_return_type`, this method attempts to find the best matching + /// candidate method where the method name may have been misspelt. Similarly to other + /// Levenshtein based suggestions, we provide at most one such suggestion. + fn probe_for_lev_candidate(&mut self) -> Result, MethodError<'tcx>> { + debug!("Probing for method names similar to {:?}", + self.method_name); + + let steps = self.steps.clone(); + self.probe(|_| { + let mut pcx = ProbeContext::new(self.fcx, self.span, self.mode, self.method_name, + self.return_type, steps); + pcx.allow_similar_names = true; + pcx.assemble_inherent_candidates(); + pcx.assemble_extension_candidates_for_traits_in_scope(ast::DUMMY_NODE_ID)?; + + let method_names = pcx.candidate_method_names(); + pcx.allow_similar_names = false; + let applicable_close_candidates: Vec = method_names + .iter() + .filter_map(|&method_name| { + pcx.reset(); + pcx.method_name = Some(method_name); + pcx.assemble_inherent_candidates(); + pcx.assemble_extension_candidates_for_traits_in_scope(ast::DUMMY_NODE_ID) + .ok().map_or(None, |_| { + pcx.pick_core() + .and_then(|pick| pick.ok()) + .and_then(|pick| Some(pick.item)) + }) + }) + .collect(); + + if applicable_close_candidates.is_empty() { + Ok(None) + } else { + let best_name = { + let names = applicable_close_candidates.iter().map(|cand| &cand.name); + find_best_match_for_name(names, + &self.method_name.unwrap().as_str(), + None) + }.unwrap(); + Ok(applicable_close_candidates + .into_iter() + .find(|method| method.name == best_name)) + } + }) + } + /////////////////////////////////////////////////////////////////////////// // MISCELLANY fn has_applicable_self(&self, item: &ty::AssociatedItem) -> bool { @@ -1340,23 +1212,23 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { item: &ty::AssociatedItem, impl_ty: Ty<'tcx>, substs: &Substs<'tcx>) - -> Ty<'tcx> { + -> (Ty<'tcx>, Option>) { if item.kind == ty::AssociatedKind::Method && self.mode == Mode::MethodCall { - self.xform_method_self_ty(item.def_id, impl_ty, substs) + let sig = self.xform_method_sig(item.def_id, substs); + (sig.inputs()[0], Some(sig.output())) } else { - impl_ty + (impl_ty, None) } } - fn xform_method_self_ty(&self, - method: DefId, - impl_ty: Ty<'tcx>, - substs: &Substs<'tcx>) - -> Ty<'tcx> { - let self_ty = self.tcx.fn_sig(method).input(0); - debug!("xform_self_ty(impl_ty={:?}, self_ty={:?}, substs={:?})", - impl_ty, - self_ty, + fn xform_method_sig(&self, + method: DefId, + substs: &Substs<'tcx>) + -> ty::FnSig<'tcx> + { + let fn_sig = self.tcx.fn_sig(method); + debug!("xform_self_ty(fn_sig={:?}, substs={:?})", + fn_sig, substs); assert!(!substs.has_escaping_regions()); @@ -1372,10 +1244,10 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // Erase any late-bound regions from the method and substitute // in the values from the substitution. - let xform_self_ty = self.erase_late_bound_regions(&self_ty); + let xform_fn_sig = self.erase_late_bound_regions(&fn_sig); if generics.types.is_empty() && generics.regions.is_empty() { - xform_self_ty.subst(self.tcx, substs) + xform_fn_sig.subst(self.tcx, substs) } else { let substs = Substs::for_item(self.tcx, method, |def, _| { let i = def.index as usize; @@ -1394,22 +1266,22 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { self.type_var_for_def(self.span, def, cur_substs) } }); - xform_self_ty.subst(self.tcx, substs) + xform_fn_sig.subst(self.tcx, substs) } } /// Get the type of an impl and generate substitutions with placeholders. fn impl_ty_and_substs(&self, impl_def_id: DefId) -> (Ty<'tcx>, &'tcx Substs<'tcx>) { - let impl_ty = self.tcx.type_of(impl_def_id); - - let substs = Substs::for_item(self.tcx, - impl_def_id, - |_, _| self.tcx.types.re_erased, - |_, _| self.next_ty_var( - TypeVariableOrigin::SubstitutionPlaceholder( - self.tcx.def_span(impl_def_id)))); + (self.tcx.type_of(impl_def_id), self.fresh_item_substs(impl_def_id)) + } - (impl_ty, substs) + fn fresh_item_substs(&self, def_id: DefId) -> &'tcx Substs<'tcx> { + Substs::for_item(self.tcx, + def_id, + |_, _| self.tcx.types.re_erased, + |_, _| self.next_ty_var( + TypeVariableOrigin::SubstitutionPlaceholder( + self.tcx.def_span(def_id)))) } /// Replace late-bound-regions bound by `value` with `'static` using @@ -1436,19 +1308,23 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { self.tcx.erase_late_bound_regions(value) } - /// Find the method with the appropriate name (or return type, as the case may be). + /// Find the method with the appropriate name (or return type, as the case may be). If + /// `allow_similar_names` is set, find methods with close-matching names. fn impl_or_trait_item(&self, def_id: DefId) -> Vec { - match self.looking_for { - LookingFor::MethodName(name) => { - self.fcx.associated_item(def_id, name).map_or(Vec::new(), |x| vec![x]) - } - LookingFor::ReturnType(return_ty) => { - self.tcx - .associated_items(def_id) - .map(|did| self.tcx.associated_item(did.def_id)) - .filter(|m| self.matches_return_type(m, return_ty)) + if let Some(name) = self.method_name { + if self.allow_similar_names { + let max_dist = max(name.as_str().len(), 3) / 3; + self.tcx.associated_items(def_id) + .filter(|x| { + let dist = lev_distance(&*name.as_str(), &x.name.as_str()); + dist > 0 && dist <= max_dist + }) .collect() + } else { + self.fcx.associated_item(def_id, name).map_or(Vec::new(), |x| vec![x]) } + } else { + self.tcx.associated_items(def_id).collect() } } } @@ -1459,9 +1335,8 @@ impl<'tcx> Candidate<'tcx> { item: self.item.clone(), kind: match self.kind { InherentImplCandidate(..) => InherentImplPick, - ExtensionImplCandidate(def_id, ..) => ExtensionImplPick(def_id), ObjectCandidate => ObjectPick, - TraitCandidate => TraitPick, + TraitCandidate(_) => TraitPick, WhereClauseCandidate(ref trait_ref) => { // Only trait derived from where-clauses should // appear here, so they should not contain any @@ -1479,14 +1354,4 @@ impl<'tcx> Candidate<'tcx> { unsize: None, } } - - fn to_source(&self) -> CandidateSource { - match self.kind { - InherentImplCandidate(..) => ImplSource(self.item.container.id()), - ExtensionImplCandidate(def_id, ..) => ImplSource(def_id), - ObjectCandidate | - TraitCandidate | - WhereClauseCandidate(_) => TraitSource(self.item.container.id()), - } - } } diff --git a/src/librustc_typeck/check/method/suggest.rs b/src/librustc_typeck/check/method/suggest.rs index c8b828f3a4..90c5297b39 100644 --- a/src/librustc_typeck/check/method/suggest.rs +++ b/src/librustc_typeck/check/method/suggest.rs @@ -45,7 +45,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty::TyFnPtr(_) => true, // If it's not a simple function, look for things which implement FnOnce _ => { - let fn_once = match tcx.lang_items.require(FnOnceTraitLangItem) { + let fn_once = match tcx.lang_items().require(FnOnceTraitLangItem) { Ok(fn_once) => fn_once, Err(..) => return false, }; @@ -164,6 +164,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { MethodError::NoMatch(NoMatchData { static_candidates: static_sources, unsatisfied_predicates, out_of_scope_traits, + lev_candidate, mode, .. }) => { let tcx = self.tcx; @@ -282,6 +283,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { item_name, rcvr_expr, out_of_scope_traits); + + if let Some(lev_candidate) = lev_candidate { + err.help(&format!("did you mean `{}`?", lev_candidate.name)); + } err.emit(); } @@ -296,22 +301,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { err.emit(); } - MethodError::ClosureAmbiguity(trait_def_id) => { - let msg = format!("the `{}` method from the `{}` trait cannot be explicitly \ - invoked on this closure as we have not yet inferred what \ - kind of closure it is", - item_name, - self.tcx.item_path_str(trait_def_id)); - let msg = if let Some(callee) = rcvr_expr { - format!("{}; use overloaded call notation instead (e.g., `{}()`)", - msg, - self.tcx.hir.node_to_pretty_string(callee.id)) - } else { - msg - }; - self.sess().span_err(span, &msg); - } - MethodError::PrivateMatch(def, out_of_scope_traits) => { let mut err = struct_span_err!(self.tcx.sess, span, E0624, "{} `{}` is private", def.kind_name(), item_name); @@ -337,6 +326,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } err.emit(); } + + MethodError::BadReturnType => { + bug!("no return type expectations but got BadReturnType") + } } } @@ -564,14 +557,14 @@ pub fn all_traits<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> AllTraits<'a> if !external_mods.insert(def_id) { return; } - for child in tcx.sess.cstore.item_children(def_id, tcx.sess) { + for child in tcx.item_children(def_id).iter() { handle_external_def(tcx, traits, external_mods, child.def) } } _ => {} } } - for cnum in tcx.sess.cstore.crates() { + for &cnum in tcx.crates().iter() { let def_id = DefId { krate: cnum, index: CRATE_DEF_INDEX, diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 516fae37b2..9c6a4abfbd 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -85,13 +85,12 @@ use self::method::MethodCallee; use self::TupleArgumentsFlag::*; use astconv::AstConv; -use fmt_macros::{Parser, Piece, Position}; use hir::def::{Def, CtorKind}; use hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc_back::slice::ref_slice; use rustc::infer::{self, InferCtxt, InferOk, RegionVariableOrigin}; use rustc::infer::type_variable::{TypeVariableOrigin}; -use rustc::middle::region::CodeExtent; +use rustc::middle::region; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::traits::{self, FulfillmentContext, ObligationCause, ObligationCauseCode}; use rustc::ty::{ParamTy, LvaluePreference, NoPreference, PreferMutLvalue}; @@ -129,7 +128,6 @@ use rustc::hir::map::Node; use rustc::hir::{self, PatKind}; use rustc::middle::lang_items; use rustc_back::slice; -use rustc::middle::const_val::eval_length; use rustc_const_math::ConstInt; mod autoderef; @@ -146,6 +144,7 @@ mod cast; mod closure; mod callee; mod compare_method; +mod generator_interior; mod intrinsic; mod op; @@ -205,6 +204,8 @@ pub struct Inherited<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { deferred_cast_checks: RefCell>>, + deferred_generator_interiors: RefCell)>>, + // Anonymized types found in explicit return types and their // associated fresh inference variable. Writeback resolves these // variables to get the concrete type, which can be used to @@ -503,6 +504,8 @@ pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { ret_coercion: Option>>, + yield_ty: Option>, + ps: RefCell, /// Whether the last checked node generates a divergence (e.g., @@ -601,8 +604,9 @@ impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> { let tcx = infcx.tcx; let item_id = tcx.hir.as_local_node_id(def_id); let body_id = item_id.and_then(|id| tcx.hir.maybe_body_owned_by(id)); - let implicit_region_bound = body_id.map(|body| { - tcx.mk_region(ty::ReScope(CodeExtent::CallSiteScope(body))) + let implicit_region_bound = body_id.map(|body_id| { + let body = tcx.hir.body(body_id); + tcx.mk_region(ty::ReScope(region::Scope::CallSite(body.value.hir_id.local_id))) }); Inherited { @@ -614,6 +618,7 @@ impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> { locals: RefCell::new(NodeMap()), deferred_call_resolutions: RefCell::new(DefIdMap()), deferred_cast_checks: RefCell::new(Vec::new()), + deferred_generator_interiors: RefCell::new(Vec::new()), anon_types: RefCell::new(NodeMap()), implicit_region_bound, body_id, @@ -734,11 +739,20 @@ pub fn provide(providers: &mut Providers) { typeck_tables_of, has_typeck_tables, closure_kind, + generator_sig, adt_destructor, ..*providers }; } +fn generator_sig<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> Option> { + let node_id = tcx.hir.as_local_node_id(def_id).unwrap(); + let hir_id = tcx.hir.node_to_hir_id(node_id); + tcx.typeck_tables_of(def_id).generator_sigs()[hir_id].map(|s| ty::Binder(s)) +} + fn closure_kind<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::ClosureKind { @@ -865,7 +879,7 @@ fn typeck_tables_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env, &fn_sig); - check_fn(&inh, param_env, fn_sig, decl, id, body) + check_fn(&inh, param_env, fn_sig, decl, id, body, false).0 } else { let fcx = FnCtxt::new(&inh, param_env, body.value.id); let expected_type = tcx.type_of(def_id); @@ -887,6 +901,7 @@ fn typeck_tables_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fcx.closure_analyze(body); fcx.select_obligations_where_possible(); fcx.check_casts(); + fcx.resolve_generator_interiors(def_id); fcx.select_all_obligations_or_error(); if fn_decl.is_some() { @@ -986,8 +1001,9 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, fn_sig: ty::FnSig<'tcx>, decl: &'gcx hir::FnDecl, fn_id: ast::NodeId, - body: &'gcx hir::Body) - -> FnCtxt<'a, 'gcx, 'tcx> + body: &'gcx hir::Body, + can_be_generator: bool) + -> (FnCtxt<'a, 'gcx, 'tcx>, Option>) { let mut fn_sig = fn_sig.clone(); @@ -1010,12 +1026,19 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, fn_sig.abi ); + let span = body.value.span; + + if body.is_generator && can_be_generator { + fcx.yield_ty = Some(fcx.next_ty_var(TypeVariableOrigin::TypeInference(span))); + } + GatherLocalsVisitor { fcx: &fcx, }.visit_body(body); // Add formal parameters. for (arg_ty, arg) in fn_sig.inputs().iter().zip(&body.arguments) { // Check the pattern. - fcx.check_pat_arg(&arg.pat, arg_ty, true); + fcx.check_pat_walk(&arg.pat, arg_ty, + ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), true); // Check that argument is Sized. // The check for a non-trivial pattern is a hack to avoid duplicate warnings @@ -1029,6 +1052,24 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, } let fn_hir_id = fcx.tcx.hir.node_to_hir_id(fn_id); + let gen_ty = if can_be_generator && body.is_generator { + let gen_sig = ty::GenSig { + yield_ty: fcx.yield_ty.unwrap(), + return_ty: ret_ty, + }; + inherited.tables.borrow_mut().generator_sigs_mut().insert(fn_hir_id, Some(gen_sig)); + + let witness = fcx.next_ty_var(TypeVariableOrigin::MiscVariable(span)); + fcx.deferred_generator_interiors.borrow_mut().push((body.id(), witness)); + let interior = ty::GeneratorInterior::new(witness); + + inherited.tables.borrow_mut().generator_interiors_mut().insert(fn_hir_id, interior); + + Some(interior) + } else { + inherited.tables.borrow_mut().generator_sigs_mut().insert(fn_hir_id, None); + None + }; inherited.tables.borrow_mut().liberated_fn_sigs_mut().insert(fn_hir_id, fn_sig); fcx.check_return_expr(&body.value); @@ -1060,11 +1101,11 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, let mut actual_return_ty = coercion.complete(&fcx); if actual_return_ty.is_never() { actual_return_ty = fcx.next_diverging_ty_var( - TypeVariableOrigin::DivergingFn(body.value.span)); + TypeVariableOrigin::DivergingFn(span)); } - fcx.demand_suptype(body.value.span, ret_ty, actual_return_ty); + fcx.demand_suptype(span, ret_ty, actual_return_ty); - fcx + (fcx, gen_ty) } fn check_struct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -1173,55 +1214,11 @@ pub fn check_item_type<'a,'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &'tcx hir::Item } fn check_on_unimplemented<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - def_id: DefId, + trait_def_id: DefId, item: &hir::Item) { - let generics = tcx.generics_of(def_id); - if let Some(ref attr) = item.attrs.iter().find(|a| { - a.check_name("rustc_on_unimplemented") - }) { - if let Some(istring) = attr.value_str() { - let istring = istring.as_str(); - let name = tcx.item_name(def_id).as_str(); - let parser = Parser::new(&istring); - let types = &generics.types; - for token in parser { - match token { - Piece::String(_) => (), // Normal string, no need to check it - Piece::NextArgument(a) => match a.position { - // `{Self}` is allowed - Position::ArgumentNamed(s) if s == "Self" => (), - // `{ThisTraitsName}` is allowed - Position::ArgumentNamed(s) if s == name => (), - // So is `{A}` if A is a type parameter - Position::ArgumentNamed(s) => match types.iter().find(|t| { - t.name == s - }) { - Some(_) => (), - None => { - span_err!(tcx.sess, attr.span, E0230, - "there is no type parameter \ - {} on trait {}", - s, name); - } - }, - // `{:1}` and `{}` are not to be used - Position::ArgumentIs(_) => { - span_err!(tcx.sess, attr.span, E0231, - "only named substitution \ - parameters are allowed"); - } - } - } - } - } else { - struct_span_err!( - tcx.sess, attr.span, E0232, - "this attribute must have a value") - .span_label(attr.span, "attribute requires a value") - .note(&format!("eg `#[rustc_on_unimplemented = \"foo\"]`")) - .emit(); - } - } + let item_def_id = tcx.hir.local_def_id(item.id); + // an error would be reported if this fails. + let _ = traits::OnUnimplementedDirective::of_item(tcx, trait_def_id, item_def_id); } fn report_forbidden_specialization<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -1252,6 +1249,7 @@ fn report_forbidden_specialization<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn check_specialization_validity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_def: &ty::TraitDef, + trait_item: &ty::AssociatedItem, impl_id: DefId, impl_item: &hir::ImplItem) { @@ -1262,7 +1260,8 @@ fn check_specialization_validity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, hir::ImplItemKind::Method(..) => ty::AssociatedKind::Method, hir::ImplItemKind::Type(_) => ty::AssociatedKind::Type }; - let parent = ancestors.defs(tcx, impl_item.name, kind).skip(1).next() + + let parent = ancestors.defs(tcx, trait_item.name, kind, trait_def.def_id).skip(1).next() .map(|node_item| node_item.map(|parent| parent.defaultness)); if let Some(parent) = parent { @@ -1294,7 +1293,7 @@ fn check_impl_items_against_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, for impl_item in impl_items() { let ty_impl_item = tcx.associated_item(tcx.hir.local_def_id(impl_item.id)); let ty_trait_item = tcx.associated_items(impl_trait_ref.def_id) - .find(|ac| ac.name == ty_impl_item.name); + .find(|ac| tcx.hygienic_eq(ty_impl_item.name, ac.name, impl_trait_ref.def_id)); // Check that impl definition matches trait definition if let Some(ty_trait_item) = ty_trait_item { @@ -1375,9 +1374,9 @@ fn check_impl_items_against_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } } - } - check_specialization_validity(tcx, trait_def, impl_id, impl_item); + check_specialization_validity(tcx, trait_def, &ty_trait_item, impl_id, impl_item); + } } // Check for missing items from trait @@ -1386,7 +1385,7 @@ fn check_impl_items_against_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let associated_type_overridden = overridden_associated_type.is_some(); for trait_item in tcx.associated_items(impl_trait_ref.def_id) { let is_implemented = trait_def.ancestors(tcx, impl_id) - .defs(tcx, trait_item.name, trait_item.kind) + .defs(tcx, trait_item.name, trait_item.kind, impl_trait_ref.def_id) .next() .map(|node_item| !node_item.node.is_from_trait()) .unwrap_or(false); @@ -1557,9 +1556,12 @@ pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let repr_type_ty = def.repr.discr_type().to_ty(tcx); if repr_type_ty == tcx.types.i128 || repr_type_ty == tcx.types.u128 { - if !tcx.sess.features.borrow().i128_type { + if !tcx.sess.features.borrow().repr128 { emit_feature_err(&tcx.sess.parse_sess, - "i128_type", sp, GateIssue::Language, "128-bit type is unstable"); + "repr128", + sp, + GateIssue::Language, + "repr with 128-bit type is unstable"); } } @@ -1623,7 +1625,7 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { fn re_infer(&self, span: Span, def: Option<&ty::RegionParameterDef>) -> Option> { let v = match def { - Some(def) => infer::EarlyBoundRegion(span, def.name, def.issue_32330), + Some(def) => infer::EarlyBoundRegion(span, def.name), None => infer::MiscVariable(span) }; Some(self.next_region_var(v)) @@ -1666,6 +1668,10 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { fn set_tainted_by_errors(&self) { self.infcx.set_tainted_by_errors() } + + fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) { + self.write_ty(hir_id, ty) + } } /// Controls whether the arguments are tupled. This is used for the call @@ -1700,6 +1706,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { param_env, err_count_on_creation: inh.tcx.sess.err_count(), ret_coercion: None, + yield_ty: None, ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, ast::CRATE_NODE_ID)), diverges: Cell::new(Diverges::Maybe), @@ -2005,7 +2012,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> { match self.tables.borrow().node_types().get(id) { Some(&t) => t, - None if self.err_count_since_creation() != 0 => self.tcx.types.err, + None if self.is_tainted_by_errors() => self.tcx.types.err, None => { let node_id = self.tcx.hir.definitions().find_node_for_hir_id(id); bug!("no type for node {}: {} in fcx {}", @@ -2089,6 +2096,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } + fn resolve_generator_interiors(&self, def_id: DefId) { + let mut deferred_generator_interiors = self.deferred_generator_interiors.borrow_mut(); + for (body_id, witness) in deferred_generator_interiors.drain(..) { + generator_interior::resolve_interior(self, def_id, body_id, witness); + } + } + /// Apply "fallbacks" to some types /// unconstrained types get replaced with ! or () (depending on whether /// feature(never_type) is enabled), unconstrained ints with i32, and @@ -2225,7 +2239,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { adjusted_ty, index_ty); - // First, try built-in indexing. match (adjusted_ty.builtin_index(), &index_ty.sty) { (Some(ty), &ty::TyUint(ast::UintTy::Us)) | (Some(ty), &ty::TyInfer(ty::IntVar(_))) => { @@ -2291,13 +2304,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn resolve_lvalue_op(&self, op: LvalueOp, is_mut: bool) -> (Option, Symbol) { let (tr, name) = match (op, is_mut) { (LvalueOp::Deref, false) => - (self.tcx.lang_items.deref_trait(), "deref"), + (self.tcx.lang_items().deref_trait(), "deref"), (LvalueOp::Deref, true) => - (self.tcx.lang_items.deref_mut_trait(), "deref_mut"), + (self.tcx.lang_items().deref_mut_trait(), "deref_mut"), (LvalueOp::Index, false) => - (self.tcx.lang_items.index_trait(), "index"), + (self.tcx.lang_items().index_trait(), "index"), (LvalueOp::Index, true) => - (self.tcx.lang_items.index_mut_trait(), "index_mut"), + (self.tcx.lang_items().index_mut_trait(), "index_mut"), }; (tr, Symbol::intern(name)) } @@ -2423,7 +2436,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn parameter_count_error<'tcx>(sess: &Session, sp: Span, expected_count: usize, arg_count: usize, error_code: &str, variadic: bool, - def_span: Option) { + def_span: Option, sugg_unit: bool) { let mut err = sess.struct_span_err_with_code(sp, &format!("this function takes {}{} parameter{} but {} parameter{} supplied", if variadic {"at least "} else {""}, @@ -2433,13 +2446,23 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if arg_count == 1 {" was"} else {"s were"}), error_code); - err.span_label(sp, format!("expected {}{} parameter{}", - if variadic {"at least "} else {""}, - expected_count, - if expected_count == 1 {""} else {"s"})); if let Some(def_s) = def_span { err.span_label(def_s, "defined here"); } + if sugg_unit { + let sugg_span = sp.end_point(); + // remove closing `)` from the span + let sugg_span = sugg_span.with_hi(sugg_span.lo()); + err.span_suggestion( + sugg_span, + "expected the unit value `()`. You can create one with a pair of parenthesis", + String::from("()")); + } else { + err.span_label(sp, format!("expected {}{} parameter{}", + if variadic {"at least "} else {""}, + expected_count, + if expected_count == 1 {""} else {"s"})); + } err.emit(); } @@ -2448,7 +2471,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match tuple_type.sty { ty::TyTuple(arg_types, _) if arg_types.len() != args.len() => { parameter_count_error(tcx.sess, sp_args, arg_types.len(), args.len(), - "E0057", false, def_span); + "E0057", false, def_span, false); expected_arg_tys = &[]; self.err_args(args.len()) } @@ -2477,13 +2500,21 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn_inputs.to_vec() } else { parameter_count_error(tcx.sess, sp_args, expected_arg_count, - supplied_arg_count, "E0060", true, def_span); + supplied_arg_count, "E0060", true, def_span, false); expected_arg_tys = &[]; self.err_args(supplied_arg_count) } } else { + // is the missing argument of type `()`? + let sugg_unit = if expected_arg_tys.len() == 1 && supplied_arg_count == 0 { + self.resolve_type_vars_if_possible(&expected_arg_tys[0]).is_nil() + } else if fn_inputs.len() == 1 && supplied_arg_count == 0 { + self.resolve_type_vars_if_possible(&fn_inputs[0]).is_nil() + } else { + false + }; parameter_count_error(tcx.sess, sp_args, expected_arg_count, - supplied_arg_count, "E0061", false, def_span); + supplied_arg_count, "E0061", false, def_span, sugg_unit); expected_arg_tys = &[]; self.err_args(supplied_arg_count) }; @@ -2610,7 +2641,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ast::LitKind::Str(..) => tcx.mk_static_str(), ast::LitKind::ByteStr(ref v) => { tcx.mk_imm_ref(tcx.types.re_static, - tcx.mk_array(tcx.types.u8, v.len())) + tcx.mk_array(tcx.types.u8, v.len() as u64)) } ast::LitKind::Byte(_) => tcx.types.u8, ast::LitKind::Char(_) => tcx.types.char, @@ -2751,6 +2782,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { formal_ret: Ty<'tcx>, formal_args: &[Ty<'tcx>]) -> Vec> { + let formal_ret = self.resolve_type_vars_with_obligations(formal_ret); let expected_args = expected_ret.only_has_type(self).and_then(|ret_ty| { self.fudge_regions_if_ok(&RegionVariableOrigin::Coercion(call_span), || { // Attempt to apply a subtyping relationship between the formal @@ -3070,7 +3102,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { debug!("tuple struct named {:?}", base_t); let ident = ast::Ident { name: Symbol::intern(&idx.node.to_string()), - ctxt: idx.span.ctxt.modern(), + ctxt: idx.span.ctxt().modern(), }; let (ident, def_scope) = self.tcx.adjust_ident(ident, base_def.did, self.body_id); @@ -3114,8 +3146,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if tuple_like { type_error_struct!(self.tcx().sess, expr.span, expr_t, E0612, - "attempted out-of-bounds tuple index `{}` on type `{}`", - idx.node, expr_t).emit(); + "attempted out-of-bounds tuple index `{}` on type `{}`", + idx.node, expr_t).emit(); } else { self.no_such_field_err(expr.span, idx.node, expr_t).emit(); } @@ -3193,7 +3225,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let adt_ty_hint = self.expected_inputs_for_expected_output(span, expected, adt_ty, &[adt_ty]) - .get(0).cloned().unwrap_or(adt_ty); + .get(0).cloned().unwrap_or(adt_ty); // re-link the regions that EIfEO can erase. self.demand_eqtype(span, adt_ty_hint, adt_ty); @@ -3231,10 +3263,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { error_happened = true; if let Some(_) = variant.find_field_named(field.name.node) { let mut err = struct_span_err!(self.tcx.sess, - field.name.span, - E0062, - "field `{}` specified more than once", - field.name.node); + field.name.span, + E0062, + "field `{}` specified more than once", + field.name.node); err.span_label(field.name.span, "used more than once"); @@ -3287,10 +3319,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { remaining_fields_names, truncated_fields_error, adt_ty) - .span_label(span, format!("missing {}{}", - remaining_fields_names, - truncated_fields_error)) - .emit(); + .span_label(span, format!("missing {}{}", + remaining_fields_names, + truncated_fields_error)) + .emit(); } } @@ -3725,13 +3757,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Only check this if not in an `if` condition, as the // mistyped comparison help is more appropriate. if !self.tcx.expr_is_lval(&lhs) { - struct_span_err!( - self.tcx.sess, expr.span, E0070, - "invalid left-hand side expression") - .span_label( - expr.span, - "left-hand of expression not valid") - .emit(); + struct_span_err!(self.tcx.sess, expr.span, E0070, + "invalid left-hand side expression") + .span_label(expr.span, "left-hand of expression not valid") + .emit(); } } } @@ -3806,7 +3835,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { hir::ExprMatch(ref discrim, ref arms, match_src) => { self.check_match(expr, &discrim, arms, expected, match_src) } - hir::ExprClosure(capture, ref decl, body_id, _) => { + hir::ExprClosure(capture, ref decl, body_id, _, _) => { self.check_expr_closure(expr, capture, &decl, body_id, expected) } hir::ExprBlock(ref body) => { @@ -3871,11 +3900,17 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } else { self.next_ty_var(TypeVariableOrigin::TypeInference(expr.span)) }; - tcx.mk_array(element_ty, args.len()) + tcx.mk_array(element_ty, args.len() as u64) } hir::ExprRepeat(ref element, count) => { - let count = eval_length(self.tcx, count, "repeat count") - .unwrap_or(0); + let count_def_id = tcx.hir.body_owner_def_id(count); + let param_env = ty::ParamEnv::empty(traits::Reveal::UserFacing); + let substs = Substs::identity_for_item(tcx.global_tcx(), count_def_id); + let count = tcx.const_eval(param_env.and((count_def_id, substs))); + + if let Err(ref err) = count { + err.report(tcx, tcx.def_span(count_def_id), "constant expression"); + } let uty = match expected { ExpectHasType(uty) => { @@ -3899,21 +3934,29 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }; - if count > 1 { - // For [foo, ..n] where n > 1, `foo` must have - // Copy type: - let lang_item = self.tcx.require_lang_item(lang_items::CopyTraitLangItem); - self.require_type_meets(t, expr.span, traits::RepeatVec, lang_item); + if let Ok(count) = count { + let zero_or_one = count.val.to_const_int().and_then(|count| { + count.to_u64().map(|count| count <= 1) + }).unwrap_or(false); + if !zero_or_one { + // For [foo, ..n] where n > 1, `foo` must have + // Copy type: + let lang_item = self.tcx.require_lang_item(lang_items::CopyTraitLangItem); + self.require_type_meets(t, expr.span, traits::RepeatVec, lang_item); + } } if element_ty.references_error() { tcx.types.err + } else if let Ok(count) = count { + tcx.mk_ty(ty::TyArray(t, count)) } else { - tcx.mk_array(t, count) + tcx.types.err } } hir::ExprTup(ref elts) => { let flds = expected.only_has_type(self).and_then(|ty| { + let ty = self.resolve_type_vars_with_obligations(ty); match ty.sty { ty::TyTuple(ref flds, _) => Some(&flds[..]), _ => None @@ -3997,6 +4040,18 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } } + hir::ExprYield(ref value) => { + match self.yield_ty { + Some(ty) => { + self.check_expr_coercable_to_type(&value, ty); + } + None => { + struct_span_err!(self.tcx.sess, expr.span, E0627, + "yield statement outside of generator literal").emit(); + } + } + tcx.mk_nil() + } } } @@ -4052,6 +4107,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { (self.to_ty(qself), segment) } }; + let hir_id = self.tcx.hir.node_to_hir_id(node_id); + if let Some(cached_def) = self.tables.borrow().type_dependent_defs().get(hir_id) { + // Return directly on cache hit. This is useful to avoid doubly reporting + // errors with default match binding modes. See #44614. + return (*cached_def, Some(ty), slice::ref_slice(&**item_segment)) + } let item_name = item_segment.name; let def = match self.resolve_ufcs(span, item_name, ty, node_id) { Ok(def) => def, @@ -4068,7 +4129,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; // Write back the new resolution. - let hir_id = self.tcx.hir.node_to_hir_id(node_id); self.tables.borrow_mut().type_dependent_defs_mut().insert(hir_id, def); (def, Some(ty), slice::ref_slice(&**item_segment)) } @@ -4078,7 +4138,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { init: &'gcx hir::Expr) -> Ty<'tcx> { // FIXME(tschottdorf): contains_explicit_ref_binding() must be removed - // for #42640. + // for #42640 (default match binding modes). + // + // See #44848. let ref_bindings = local.pat.contains_explicit_ref_binding(); let local_ty = self.local_ty(init.span, local.id); @@ -4110,7 +4172,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - self.check_pat(&local.pat, t); + self.check_pat_walk(&local.pat, t, + ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), + true); let pat_ty = self.node_ty(local.pat.hir_id); if pat_ty.references_error() { self.write_ty(local.hir_id, pat_ty); @@ -4448,11 +4512,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { return; } let original_span = original_sp(last_stmt.span, blk.span); - let span_semi = Span { - lo: original_span.hi - BytePos(1), - hi: original_span.hi, - ctxt: original_span.ctxt, - }; + let span_semi = original_span.with_lo(original_span.hi() - BytePos(1)); err.span_suggestion(span_semi, "consider removing this semicolon", "".to_string()); } @@ -4582,8 +4642,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { AstConv::prohibit_type_params(self, &segments[..segments.len() - poly_segments]); match def { - Def::Local(def_id) | Def::Upvar(def_id, ..) => { - let nid = self.tcx.hir.as_local_node_id(def_id).unwrap(); + Def::Local(nid) | Def::Upvar(nid, ..) => { let ty = self.local_ty(span, nid); let ty = self.normalize_associated_types_in(span, &ty); self.write_ty(self.tcx.hir.node_to_hir_id(node_id), ty); @@ -4600,6 +4659,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // a problem. self.check_path_parameter_count(span, &mut type_segment, false); self.check_path_parameter_count(span, &mut fn_segment, false); + self.check_impl_trait(span, &mut fn_segment); let (fn_start, has_self) = match (type_segment, fn_segment) { (_, Some((_, generics))) => { @@ -4620,7 +4680,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { i -= fn_start; fn_segment }; - let lifetimes = segment.map_or(&[][..], |(s, _)| &s.parameters.lifetimes[..]); + let lifetimes = segment.map_or(&[][..], |(s, _)| { + s.parameters.as_ref().map_or(&[][..], |p| &p.lifetimes[..]) + }); if let Some(lifetime) = lifetimes.get(i) { AstConv::ast_region_to_region(self, lifetime, Some(def)) @@ -4644,7 +4706,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn_segment }; let (types, infer_types) = segment.map_or((&[][..], true), |(s, _)| { - (&s.parameters.types[..], s.parameters.infer_types) + (s.parameters.as_ref().map_or(&[][..], |p| &p.types[..]), s.infer_types) }); // Skip over the lifetimes in the same segment. @@ -4721,8 +4783,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { is_method_call: bool) { let (lifetimes, types, infer_types, bindings) = segment.map_or( (&[][..], &[][..], true, &[][..]), - |(s, _)| (&s.parameters.lifetimes[..], &s.parameters.types[..], - s.parameters.infer_types, &s.parameters.bindings[..])); + |(s, _)| s.parameters.as_ref().map_or( + (&[][..], &[][..], s.infer_types, &[][..]), + |p| (&p.lifetimes[..], &p.types[..], + s.infer_types, &p.bindings[..]))); let infer_lifetimes = lifetimes.len() == 0; let count_lifetime_params = |n| { @@ -4820,6 +4884,36 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } + /// Report error if there is an explicit type parameter when using `impl Trait`. + fn check_impl_trait(&self, + span: Span, + segment: &mut Option<(&hir::PathSegment, &ty::Generics)>) { + use hir::SyntheticTyParamKind::*; + + segment.map(|(path_segment, generics)| { + let explicit = !path_segment.infer_types; + let impl_trait = generics.types.iter() + .any(|ty_param| { + match ty_param.synthetic { + Some(ImplTrait) => true, + _ => false, + } + }); + + if explicit && impl_trait { + let mut err = struct_span_err! { + self.tcx.sess, + span, + E0632, + "cannot provide explicit type parameters when `impl Trait` is \ + used in argument position." + }; + + err.emit(); + } + }); + } + fn structurally_resolve_type_or_else(&self, sp: Span, ty: Ty<'tcx>, f: F) -> Ty<'tcx> where F: Fn() -> Ty<'tcx> diff --git a/src/librustc_typeck/check/op.rs b/src/librustc_typeck/check/op.rs index a4e1fdaf39..a3dd81fddd 100644 --- a/src/librustc_typeck/check/op.rs +++ b/src/librustc_typeck/check/op.rs @@ -362,7 +362,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn lookup_op_method(&self, lhs_ty: Ty<'tcx>, other_tys: &[Ty<'tcx>], op: Op) -> Result, ()> { - let lang = &self.tcx.lang_items; + let lang = self.tcx.lang_items(); let span = match op { Op::Binary(op, _) => op.span, diff --git a/src/librustc_typeck/check/regionck.rs b/src/librustc_typeck/check/regionck.rs index fdbdf925e4..609af638e9 100644 --- a/src/librustc_typeck/check/regionck.rs +++ b/src/librustc_typeck/check/regionck.rs @@ -87,7 +87,7 @@ use check::FnCtxt; use middle::free_region::FreeRegionMap; use middle::mem_categorization as mc; use middle::mem_categorization::Categorization; -use middle::region::{CodeExtent, RegionMaps}; +use middle::region; use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::traits; @@ -179,7 +179,7 @@ pub struct RegionCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { region_bound_pairs: Vec<(ty::Region<'tcx>, GenericKind<'tcx>)>, - pub region_maps: Rc, + pub region_scope_tree: Rc, free_region_map: FreeRegionMap<'tcx>, @@ -187,7 +187,7 @@ pub struct RegionCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { body_id: ast::NodeId, // call_site scope of innermost fn - call_site_scope: Option, + call_site_scope: Option, // id of innermost fn or loop repeating_scope: ast::NodeId, @@ -230,10 +230,10 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { RepeatingScope(initial_repeating_scope): RepeatingScope, initial_body_id: ast::NodeId, Subject(subject): Subject) -> RegionCtxt<'a, 'gcx, 'tcx> { - let region_maps = fcx.tcx.region_maps(subject); + let region_scope_tree = fcx.tcx.region_scope_tree(subject); RegionCtxt { fcx, - region_maps, + region_scope_tree, repeating_scope: initial_repeating_scope, body_id: initial_body_id, call_site_scope: None, @@ -243,8 +243,8 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { } } - fn set_call_site_scope(&mut self, call_site_scope: Option) - -> Option { + fn set_call_site_scope(&mut self, call_site_scope: Option) + -> Option { mem::replace(&mut self.call_site_scope, call_site_scope) } @@ -305,7 +305,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { let body_id = body.id(); - let call_site = CodeExtent::CallSiteScope(body_id); + let call_site = region::Scope::CallSite(body.value.hir_id.local_id); let old_call_site_scope = self.set_call_site_scope(Some(call_site)); let fn_sig = { @@ -330,7 +330,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { let old_body_id = self.set_body_id(body_id.node_id); self.relate_free_regions(&fn_sig_tys[..], body_id.node_id, span); - self.link_fn_args(CodeExtent::Misc(body_id.node_id), &body.arguments); + self.link_fn_args(region::Scope::Node(body.value.hir_id.local_id), &body.arguments); self.visit_body(body); self.visit_region_obligations(body_id.node_id); @@ -511,7 +511,8 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { ty::Predicate::Subtype(..) | ty::Predicate::Projection(..) | ty::Predicate::ClosureKind(..) | - ty::Predicate::ObjectSafe(..) => + ty::Predicate::ObjectSafe(..) | + ty::Predicate::ConstEvaluatable(..) => vec![], ty::Predicate::WellFormed(subty) => { @@ -580,7 +581,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn resolve_regions_and_report_errors(&self) { self.fcx.resolve_regions_and_report_errors(self.subject_def_id, - &self.region_maps, + &self.region_scope_tree, &self.free_region_map); } @@ -610,11 +611,11 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { // that the lifetime of any regions that appear in a // variable's type enclose at least the variable's scope. - let var_scope = self.region_maps.var_scope(id); + let hir_id = self.tcx.hir.node_to_hir_id(id); + let var_scope = self.region_scope_tree.var_scope(hir_id.local_id); let var_region = self.tcx.mk_region(ty::ReScope(var_scope)); let origin = infer::BindingTypeIsNotValidAtDecl(span); - let hir_id = self.tcx.hir.node_to_hir_id(id); self.type_of_node_must_outlive(origin, hir_id, var_region); let typ = self.resolve_node_type(hir_id); @@ -668,7 +669,8 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> { // scope of that expression. This also guarantees basic WF. let expr_ty = self.resolve_node_type(expr.hir_id); // the region corresponding to this expression - let expr_region = self.tcx.node_scope_region(expr.id); + let expr_region = self.tcx.mk_region(ty::ReScope( + region::Scope::Node(expr.hir_id.local_id))); self.type_must_outlive(infer::ExprTypeIsNotInScope(expr_ty, expr.span), expr_ty, expr_region); @@ -825,7 +827,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> { intravisit::walk_expr(self, expr); } - hir::ExprClosure(.., body_id, _) => { + hir::ExprClosure(.., body_id, _, _) => { self.check_expr_fn_block(expr, body_id); } @@ -950,7 +952,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { // call occurs. // // FIXME(#6268) to support nested method calls, should be callee_id - let callee_scope = CodeExtent::Misc(call_expr.id); + let callee_scope = region::Scope::Node(call_expr.hir_id.local_id); let callee_region = self.tcx.mk_region(ty::ReScope(callee_scope)); debug!("callee_region={:?}", callee_region); @@ -979,7 +981,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { where F: for<'b> FnOnce(mc::MemCategorizationContext<'b, 'gcx, 'tcx>) -> R { f(mc::MemCategorizationContext::with_infer(&self.infcx, - &self.region_maps, + &self.region_scope_tree, &self.tables.borrow())) } @@ -1002,7 +1004,8 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { // expression. self.check_safety_of_rvalue_destructor_if_necessary(cmt.clone(), expr.span); - let expr_region = self.tcx.node_scope_region(expr.id); + let expr_region = self.tcx.mk_region(ty::ReScope( + region::Scope::Node(expr.hir_id.local_id))); for adjustment in adjustments { debug!("constrain_adjustments: adjustment={:?}, cmt={:?}", adjustment, cmt); @@ -1095,7 +1098,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { debug!("constrain_index(index_expr=?, indexed_ty={}", self.ty_to_string(indexed_ty)); - let r_index_expr = ty::ReScope(CodeExtent::Misc(index_expr.id)); + let r_index_expr = ty::ReScope(region::Scope::Node(index_expr.hir_id.local_id)); if let ty::TyRef(r_ptr, mt) = indexed_ty.sty { match mt.ty.sty { ty::TySlice(_) | ty::TyStr => { @@ -1176,7 +1179,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { /// Computes the guarantors for any ref bindings in a match and /// then ensures that the lifetime of the resulting pointer is /// linked to the lifetime of its guarantor (if any). - fn link_fn_args(&self, body_scope: CodeExtent, args: &[hir::Arg]) { + fn link_fn_args(&self, body_scope: region::Scope, args: &[hir::Arg]) { debug!("regionck::link_fn_args(body_scope={:?})", body_scope); for arg in args { let arg_ty = self.node_ty(arg.hir_id); @@ -1232,7 +1235,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { } adjustment::AutoBorrow::RawPtr(m) => { - let r = self.tcx.node_scope_region(expr.id); + let r = self.tcx.mk_region(ty::ReScope(region::Scope::Node(expr.hir_id.local_id))); self.link_region(expr.span, r, ty::BorrowKind::from_mutbl(m), expr_cmt); } } diff --git a/src/librustc_typeck/check/upvar.rs b/src/librustc_typeck/check/upvar.rs index 6db5c5b1cb..d179b390a2 100644 --- a/src/librustc_typeck/check/upvar.rs +++ b/src/librustc_typeck/check/upvar.rs @@ -45,7 +45,7 @@ use super::FnCtxt; use middle::expr_use_visitor as euv; use middle::mem_categorization as mc; use middle::mem_categorization::Categorization; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::infer::UpvarRegion; use syntax::ast; use syntax_pos::Span; @@ -76,10 +76,14 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for InferBorrowKindVisitor<'a, 'gcx, 'tcx> { fn visit_expr(&mut self, expr: &'gcx hir::Expr) { match expr.node { - hir::ExprClosure(cc, _, body_id, _) => { + hir::ExprClosure(cc, _, body_id, _, is_generator) => { let body = self.fcx.tcx.hir.body(body_id); self.visit_body(body); - self.fcx.analyze_closure((expr.id, expr.hir_id), expr.span, body, cc); + self.fcx.analyze_closure((expr.id, expr.hir_id), + expr.span, + body, + cc, + is_generator); } _ => { } @@ -94,22 +98,27 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { (closure_node_id, closure_hir_id): (ast::NodeId, hir::HirId), span: Span, body: &hir::Body, - capture_clause: hir::CaptureClause) { + capture_clause: hir::CaptureClause, + gen: bool) { /*! * Analysis starting point. */ debug!("analyze_closure(id={:?}, body.id={:?})", closure_node_id, body.id()); - let infer_kind = match self.tables - .borrow_mut() - .closure_kinds_mut() - .entry(closure_hir_id) { - Entry::Occupied(_) => false, - Entry::Vacant(entry) => { - debug!("check_closure: adding closure {:?} as Fn", closure_node_id); - entry.insert((ty::ClosureKind::Fn, None)); - true + let infer_kind = if gen { + false + } else { + match self.tables + .borrow_mut() + .closure_kinds_mut() + .entry(closure_hir_id) { + Entry::Occupied(_) => false, + Entry::Vacant(entry) => { + debug!("check_closure: adding closure {:?} as Fn", closure_node_id); + entry.insert((ty::ClosureKind::Fn, None)); + true + } } }; @@ -117,9 +126,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.tcx.with_freevars(closure_node_id, |freevars| { for freevar in freevars { - let var_def_id = freevar.def.def_id(); let upvar_id = ty::UpvarId { - var_id: var_def_id.index, + var_id: self.tcx.hir.node_to_hir_id(freevar.var_id()), closure_expr_id: closure_def_id.index, }; debug!("seed upvar_id {:?}", upvar_id); @@ -143,7 +151,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { { let body_owner_def_id = self.tcx.hir.body_owner_def_id(body.id()); - let region_maps = &self.tcx.region_maps(body_owner_def_id); + let region_scope_tree = &self.tcx.region_scope_tree(body_owner_def_id); let mut delegate = InferBorrowKind { fcx: self, adjust_closure_kinds: FxHashMap(), @@ -152,7 +160,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { euv::ExprUseVisitor::with_infer(&mut delegate, &self.infcx, self.param_env, - region_maps, + region_scope_tree, &self.tables.borrow()) .consume_body(body); @@ -184,7 +192,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Extract the type variables UV0...UVn. let (def_id, closure_substs) = match self.node_ty(closure_hir_id).sty { - ty::TyClosure(def_id, substs) => (def_id, substs), + ty::TyClosure(def_id, substs) | + ty::TyGenerator(def_id, substs, _) => (def_id, substs), ref t => { span_bug!( span, @@ -226,11 +235,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { tcx.with_freevars(closure_id, |freevars| { freevars.iter().map(|freevar| { - let var_def_id = freevar.def.def_id(); - let var_node_id = tcx.hir.as_local_node_id(var_def_id).unwrap(); - let freevar_ty = self.node_ty(tcx.hir.node_to_hir_id(var_node_id)); + let var_node_id = freevar.var_id(); + let var_hir_id = tcx.hir.node_to_hir_id(var_node_id); + let freevar_ty = self.node_ty(var_hir_id); let upvar_id = ty::UpvarId { - var_id: var_def_id.index, + var_id: var_hir_id, closure_expr_id: closure_def_index, }; let capture = self.tables.borrow().upvar_capture(upvar_id); @@ -577,7 +586,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'gcx, 'tcx> { } } -fn var_name(tcx: ty::TyCtxt, var_def_index: DefIndex) -> ast::Name { - let var_node_id = tcx.hir.def_index_to_node_id(var_def_index); +fn var_name(tcx: TyCtxt, var_hir_id: hir::HirId) -> ast::Name { + let var_node_id = tcx.hir.hir_to_node_id(var_hir_id); tcx.hir.name(var_node_id) } diff --git a/src/librustc_typeck/check/wfcheck.rs b/src/librustc_typeck/check/wfcheck.rs index 69f045ab4e..ddbdd20430 100644 --- a/src/librustc_typeck/check/wfcheck.rs +++ b/src/librustc_typeck/check/wfcheck.rs @@ -449,7 +449,7 @@ impl<'a, 'gcx> CheckTypeWellFormedVisitor<'a, 'gcx> { fcx: &FnCtxt<'fcx, 'gcx, 'tcx>, method_sig: &hir::MethodSig, method: &ty::AssociatedItem, - self_ty: ty::Ty<'tcx>) + self_ty: Ty<'tcx>) { // check that the type of the method's receiver matches the // method's first parameter. @@ -523,7 +523,7 @@ impl<'a, 'gcx> CheckTypeWellFormedVisitor<'a, 'gcx> { let (span, name) = if index < ast_generics.lifetimes.len() { (ast_generics.lifetimes[index].lifetime.span, - ast_generics.lifetimes[index].lifetime.name) + ast_generics.lifetimes[index].lifetime.name.name()) } else { let index = index - ast_generics.lifetimes.len(); (ast_generics.ty_params[index].span, @@ -539,7 +539,7 @@ impl<'a, 'gcx> CheckTypeWellFormedVisitor<'a, 'gcx> { { let mut err = error_392(self.tcx, span, param_name); - let suggested_marker_id = self.tcx.lang_items.phantom_data(); + let suggested_marker_id = self.tcx.lang_items().phantom_data(); match suggested_marker_id { Some(def_id) => { err.help( diff --git a/src/librustc_typeck/check/writeback.rs b/src/librustc_typeck/check/writeback.rs index 36c72fc4b1..b3648d357e 100644 --- a/src/librustc_typeck/check/writeback.rs +++ b/src/librustc_typeck/check/writeback.rs @@ -45,6 +45,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { wbcx.visit_anon_types(); wbcx.visit_cast_types(); wbcx.visit_free_region_map(); + wbcx.visit_generator_sigs(); + wbcx.visit_generator_interiors(); let used_trait_imports = mem::replace(&mut self.tables.borrow_mut().used_trait_imports, DefIdSet()); @@ -164,7 +166,7 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { self.visit_node_id(e.span, e.hir_id); - if let hir::ExprClosure(_, _, body, _) = e.node { + if let hir::ExprClosure(_, _, body, _, _) = e.node { let body = self.fcx.tcx.hir.body(body); for arg in &body.arguments { self.visit_node_id(e.span, arg.hir_id); @@ -195,6 +197,8 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { _ => {} }; + self.visit_pat_adjustments(p.span, p.hir_id); + self.visit_node_id(p.span, p.hir_id); intravisit::walk_pat(self, p); } @@ -205,6 +209,13 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { let var_ty = self.resolve(&var_ty, &l.span); self.write_ty_to_tables(l.hir_id, var_ty); } + + fn visit_ty(&mut self, hir_ty: &'gcx hir::Ty) { + intravisit::walk_ty(self, hir_ty); + let ty = self.fcx.node_ty(hir_ty.hir_id); + let ty = self.resolve(&ty, &hir_ty.span); + self.write_ty_to_tables(hir_ty.hir_id, ty); + } } impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { @@ -357,6 +368,52 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { } } + fn visit_pat_adjustments(&mut self, span: Span, hir_id: hir::HirId) { + let adjustment = self.fcx + .tables + .borrow_mut() + .pat_adjustments_mut() + .remove(hir_id); + match adjustment { + None => { + debug!("No pat_adjustments for node {:?}", hir_id); + } + + Some(adjustment) => { + let resolved_adjustment = self.resolve(&adjustment, &span); + debug!("pat_adjustments for node {:?}: {:?}", hir_id, resolved_adjustment); + self.tables.pat_adjustments_mut().insert(hir_id, resolved_adjustment); + } + } + } + + fn visit_generator_interiors(&mut self) { + let common_local_id_root = self.fcx.tables.borrow().local_id_root.unwrap(); + for (&id, interior) in self.fcx.tables.borrow().generator_interiors().iter() { + let hir_id = hir::HirId { + owner: common_local_id_root.index, + local_id: id, + }; + let interior = self.resolve(interior, &hir_id); + self.tables.generator_interiors_mut().insert(hir_id, interior); + } + } + + fn visit_generator_sigs(&mut self) { + let common_local_id_root = self.fcx.tables.borrow().local_id_root.unwrap(); + for (&id, gen_sig) in self.fcx.tables.borrow().generator_sigs().iter() { + let hir_id = hir::HirId { + owner: common_local_id_root.index, + local_id: id, + }; + let gen_sig = gen_sig.map(|s| ty::GenSig { + yield_ty: self.resolve(&s.yield_ty, &hir_id), + return_ty: self.resolve(&s.return_ty, &hir_id), + }); + self.tables.generator_sigs_mut().insert(hir_id, gen_sig); + } + } + fn visit_liberated_fn_sigs(&mut self) { let fcx_tables = self.fcx.tables.borrow(); debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root); diff --git a/src/librustc_typeck/check_unused.rs b/src/librustc_typeck/check_unused.rs index 3da154e068..0c35b5e683 100644 --- a/src/librustc_typeck/check_unused.rs +++ b/src/librustc_typeck/check_unused.rs @@ -14,8 +14,9 @@ use rustc::ty::TyCtxt; use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; -use rustc::hir; +use rustc::hir::def_id::LOCAL_CRATE; use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir; use rustc::util::nodemap::DefIdSet; struct CheckVisitor<'a, 'tcx: 'a> { @@ -25,7 +26,8 @@ struct CheckVisitor<'a, 'tcx: 'a> { impl<'a, 'tcx> CheckVisitor<'a, 'tcx> { fn check_import(&self, id: ast::NodeId, span: Span) { - if !self.tcx.maybe_unused_trait_imports.contains(&id) { + let def_id = self.tcx.hir.local_def_id(id); + if !self.tcx.maybe_unused_trait_import(def_id) { return; } @@ -73,14 +75,22 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let mut visitor = CheckVisitor { tcx, used_trait_imports }; tcx.hir.krate().visit_all_item_likes(&mut visitor); - for &(id, span) in &tcx.maybe_unused_extern_crates { - let cnum = tcx.sess.cstore.extern_mod_stmt_cnum(id).unwrap().as_def_id(); - if !tcx.is_compiler_builtins(cnum) - && !tcx.is_panic_runtime(cnum) - && !tcx.has_global_allocator(cnum) { - let lint = lint::builtin::UNUSED_EXTERN_CRATES; - let msg = "unused extern crate"; - tcx.lint_node(lint, id, span, msg); - } + for &(def_id, span) in tcx.maybe_unused_extern_crates(LOCAL_CRATE).iter() { + let cnum = tcx.extern_mod_stmt_cnum(def_id).unwrap(); + if tcx.is_compiler_builtins(cnum) { + continue + } + if tcx.is_panic_runtime(cnum) { + continue + } + if tcx.has_global_allocator(cnum) { + continue + } + assert_eq!(def_id.krate, LOCAL_CRATE); + let hir_id = tcx.hir.definitions().def_index_to_hir_id(def_id.index); + let id = tcx.hir.definitions().find_node_for_hir_id(hir_id); + let lint = lint::builtin::UNUSED_EXTERN_CRATES; + let msg = "unused extern crate"; + tcx.lint_node(lint, id, span, msg); } } diff --git a/src/librustc_typeck/coherence/builtin.rs b/src/librustc_typeck/coherence/builtin.rs index 9305eff143..fedfa51d61 100644 --- a/src/librustc_typeck/coherence/builtin.rs +++ b/src/librustc_typeck/coherence/builtin.rs @@ -12,7 +12,7 @@ //! up data structures required by type-checking/translation. use rustc::middle::free_region::FreeRegionMap; -use rustc::middle::region::RegionMaps; +use rustc::middle::region; use rustc::middle::lang_items::UnsizeTraitLangItem; use rustc::traits::{self, ObligationCause}; @@ -28,9 +28,9 @@ use rustc::hir::{self, ItemImpl}; pub fn check_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_def_id: DefId) { Checker { tcx, trait_def_id } - .check(tcx.lang_items.drop_trait(), visit_implementation_of_drop) - .check(tcx.lang_items.copy_trait(), visit_implementation_of_copy) - .check(tcx.lang_items.coerce_unsized_trait(), + .check(tcx.lang_items().drop_trait(), visit_implementation_of_drop) + .check(tcx.lang_items().copy_trait(), visit_implementation_of_copy) + .check(tcx.lang_items().coerce_unsized_trait(), visit_implementation_of_coerce_unsized); } @@ -176,9 +176,9 @@ pub fn coerce_unsized_info<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: DefId) -> CoerceUnsizedInfo { debug!("compute_coerce_unsized_info(impl_did={:?})", impl_did); - let coerce_unsized_trait = tcx.lang_items.coerce_unsized_trait().unwrap(); + let coerce_unsized_trait = tcx.lang_items().coerce_unsized_trait().unwrap(); - let unsize_trait = match tcx.lang_items.require(UnsizeTraitLangItem) { + let unsize_trait = match tcx.lang_items().require(UnsizeTraitLangItem) { Ok(id) => id, Err(err) => { tcx.sess.fatal(&format!("`CoerceUnsized` implementation {}", err)); @@ -390,10 +390,10 @@ pub fn coerce_unsized_info<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } // Finally, resolve all regions. - let region_maps = RegionMaps::new(); + let region_scope_tree = region::ScopeTree::default(); let mut free_regions = FreeRegionMap::new(); free_regions.relate_free_regions_from_predicates(¶m_env.caller_bounds); - infcx.resolve_regions_and_report_errors(impl_did, ®ion_maps, &free_regions); + infcx.resolve_regions_and_report_errors(impl_did, ®ion_scope_tree, &free_regions); CoerceUnsizedInfo { custom_kind: kind diff --git a/src/librustc_typeck/coherence/inherent_impls.rs b/src/librustc_typeck/coherence/inherent_impls.rs index e24d766002..15e15abfb3 100644 --- a/src/librustc_typeck/coherence/inherent_impls.rs +++ b/src/librustc_typeck/coherence/inherent_impls.rs @@ -112,6 +112,7 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for InherentCollect<'a, 'tcx> { let def_id = self.tcx.hir.local_def_id(item.id); let self_ty = self.tcx.type_of(def_id); + let lang_items = self.tcx.lang_items(); match self_ty.sty { ty::TyAdt(def, _) => { self.check_def_id(item, def.did); @@ -121,133 +122,133 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for InherentCollect<'a, 'tcx> { } ty::TyChar => { self.check_primitive_impl(def_id, - self.tcx.lang_items.char_impl(), + lang_items.char_impl(), "char", "char", item.span); } ty::TyStr => { self.check_primitive_impl(def_id, - self.tcx.lang_items.str_impl(), + lang_items.str_impl(), "str", "str", item.span); } ty::TySlice(_) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.slice_impl(), + lang_items.slice_impl(), "slice", "[T]", item.span); } ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.const_ptr_impl(), + lang_items.const_ptr_impl(), "const_ptr", "*const T", item.span); } ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.mut_ptr_impl(), + lang_items.mut_ptr_impl(), "mut_ptr", "*mut T", item.span); } ty::TyInt(ast::IntTy::I8) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.i8_impl(), + lang_items.i8_impl(), "i8", "i8", item.span); } ty::TyInt(ast::IntTy::I16) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.i16_impl(), + lang_items.i16_impl(), "i16", "i16", item.span); } ty::TyInt(ast::IntTy::I32) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.i32_impl(), + lang_items.i32_impl(), "i32", "i32", item.span); } ty::TyInt(ast::IntTy::I64) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.i64_impl(), + lang_items.i64_impl(), "i64", "i64", item.span); } ty::TyInt(ast::IntTy::I128) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.i128_impl(), + lang_items.i128_impl(), "i128", "i128", item.span); } ty::TyInt(ast::IntTy::Is) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.isize_impl(), + lang_items.isize_impl(), "isize", "isize", item.span); } ty::TyUint(ast::UintTy::U8) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.u8_impl(), + lang_items.u8_impl(), "u8", "u8", item.span); } ty::TyUint(ast::UintTy::U16) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.u16_impl(), + lang_items.u16_impl(), "u16", "u16", item.span); } ty::TyUint(ast::UintTy::U32) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.u32_impl(), + lang_items.u32_impl(), "u32", "u32", item.span); } ty::TyUint(ast::UintTy::U64) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.u64_impl(), + lang_items.u64_impl(), "u64", "u64", item.span); } ty::TyUint(ast::UintTy::U128) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.u128_impl(), + lang_items.u128_impl(), "u128", "u128", item.span); } ty::TyUint(ast::UintTy::Us) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.usize_impl(), + lang_items.usize_impl(), "usize", "usize", item.span); } ty::TyFloat(ast::FloatTy::F32) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.f32_impl(), + lang_items.f32_impl(), "f32", "f32", item.span); } ty::TyFloat(ast::FloatTy::F64) => { self.check_primitive_impl(def_id, - self.tcx.lang_items.f64_impl(), + lang_items.f64_impl(), "f64", "f64", item.span); diff --git a/src/librustc_typeck/coherence/inherent_impls_overlap.rs b/src/librustc_typeck/coherence/inherent_impls_overlap.rs index 078ae34bc5..76dcfe36e4 100644 --- a/src/librustc_typeck/coherence/inherent_impls_overlap.rs +++ b/src/librustc_typeck/coherence/inherent_impls_overlap.rs @@ -26,7 +26,8 @@ struct InherentOverlapChecker<'a, 'tcx: 'a> { } impl<'a, 'tcx> InherentOverlapChecker<'a, 'tcx> { - fn check_for_common_items_in_impls(&self, impl1: DefId, impl2: DefId) { + fn check_for_common_items_in_impls(&self, impl1: DefId, impl2: DefId, + overlap: traits::OverlapResult) { #[derive(Copy, Clone, PartialEq)] enum Namespace { Type, @@ -50,16 +51,22 @@ impl<'a, 'tcx> InherentOverlapChecker<'a, 'tcx> { for &item2 in &impl_items2[..] { if (name, namespace) == name_and_namespace(item2) { - struct_span_err!(self.tcx.sess, - self.tcx.span_of_impl(item1).unwrap(), - E0592, - "duplicate definitions with name `{}`", - name) - .span_label(self.tcx.span_of_impl(item1).unwrap(), - format!("duplicate definitions for `{}`", name)) - .span_label(self.tcx.span_of_impl(item2).unwrap(), - format!("other definition for `{}`", name)) - .emit(); + let mut err = struct_span_err!(self.tcx.sess, + self.tcx.span_of_impl(item1).unwrap(), + E0592, + "duplicate definitions with name `{}`", + name); + + err.span_label(self.tcx.span_of_impl(item1).unwrap(), + format!("duplicate definitions for `{}`", name)); + err.span_label(self.tcx.span_of_impl(item2).unwrap(), + format!("other definition for `{}`", name)); + + for cause in &overlap.intercrate_ambiguity_causes { + cause.add_intercrate_ambiguity_hint(&mut err); + } + + err.emit(); } } } @@ -71,8 +78,9 @@ impl<'a, 'tcx> InherentOverlapChecker<'a, 'tcx> { for (i, &impl1_def_id) in impls.iter().enumerate() { for &impl2_def_id in &impls[(i + 1)..] { self.tcx.infer_ctxt().enter(|infcx| { - if traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id).is_some() { - self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id) + if let Some(overlap) = + traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id) { + self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id, overlap) } }); } diff --git a/src/librustc_typeck/coherence/mod.rs b/src/librustc_typeck/coherence/mod.rs index 165be49f76..6109fc57b0 100644 --- a/src/librustc_typeck/coherence/mod.rs +++ b/src/librustc_typeck/coherence/mod.rs @@ -51,7 +51,7 @@ fn check_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, node_id: ast::NodeId) { fn enforce_trait_manually_implementable(tcx: TyCtxt, impl_def_id: DefId, trait_def_id: DefId) { let did = Some(trait_def_id); - let li = &tcx.lang_items; + let li = tcx.lang_items(); // Disallow *all* explicit impls of `Sized` and `Unsize` for now. if did == li.sized_trait() { diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index 8f14e765df..25a37a2c48 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -8,50 +8,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* - -# Collect phase - -The collect phase of type check has the job of visiting all items, -determining their type, and writing that type into the `tcx.types` -table. Despite its name, this table does not really operate as a -*cache*, at least not for the types of items defined within the -current crate: we assume that after the collect phase, the types of -all local items will be present in the table. - -Unlike most of the types that are present in Rust, the types computed -for each item are in fact type schemes. This means that they are -generic types that may have type parameters. TypeSchemes are -represented by a pair of `Generics` and `Ty`. Type -parameters themselves are represented as `ty_param()` instances. - -The phasing of type conversion is somewhat complicated. There is no -clear set of phases we can enforce (e.g., converting traits first, -then types, or something like that) because the user can introduce -arbitrary interdependencies. So instead we generally convert things -lazilly and on demand, and include logic that checks for cycles. -Demand is driven by calls to `AstConv::get_item_type_scheme` or -`AstConv::trait_def`. - -Currently, we "convert" types and traits in two phases (note that -conversion only affects the types of items / enum variants / methods; -it does not e.g. compute the types of individual expressions): - -0. Intrinsics -1. Trait/Type definitions - -Conversion itself is done by simply walking each of the items in turn -and invoking an appropriate function (e.g., `trait_def_of_item` or -`convert_item`). However, it is possible that while converting an -item, we may need to compute the *type scheme* or *trait definition* -for other items. - -There are some shortcomings in this design: -- Because the item generics include defaults, cycles through type - parameter defaults are illegal even if those defaults are never - employed. This is not necessarily a bug. - -*/ +//! "Collection" is the process of determining the type and other external +//! details of each item in Rust. Collection is specifically concerned +//! with *interprocedural* things -- for example, for a function +//! definition, collection will figure out the type and signature of the +//! function, but it will not visit the *body* of the function in any way, +//! nor examine type annotations on local variables (that's the job of +//! type *checking*). +//! +//! Collecting is ultimately defined by a bundle of queries that +//! inquire after various facts about the items in the crate (e.g., +//! `type_of`, `generics_of`, `predicates_of`, etc). See the `provide` function +//! for the full set. +//! +//! At present, however, we do run collection across all items in the +//! crate as a kind of pass. This should eventually be factored away. use astconv::{AstConv, Bounds}; use lint; @@ -250,6 +221,10 @@ impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> { fn set_tainted_by_errors(&self) { // no obvious place to track this, just let it go } + + fn record_ty(&self, _hir_id: hir::HirId, _ty: Ty<'tcx>, _span: Span) { + // no place to record types from signatures? + } } fn type_param_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -572,7 +547,7 @@ fn convert_enum_variant_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } match result { - Ok(ConstVal::Integral(x)) => Some(x), + Ok(&ty::Const { val: ConstVal::Integral(x), .. }) => Some(x), _ => None } } else if let Some(discr) = repr_type.disr_incr(tcx, prev_discr) { @@ -812,7 +787,8 @@ fn has_late_bound_regions<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) { if self.has_late_bound_regions.is_some() { return } - match self.tcx.named_region_map.defs.get(<.id).cloned() { + let hir_id = self.tcx.hir.node_to_hir_id(lt.id); + match self.tcx.named_region(hir_id) { Some(rl::Region::Static) | Some(rl::Region::EarlyBound(..)) => {} Some(rl::Region::LateBound(debruijn, _)) | Some(rl::Region::LateBoundAnon(debruijn, _)) @@ -830,7 +806,8 @@ fn has_late_bound_regions<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx, binder_depth: 1, has_late_bound_regions: None }; for lifetime in &generics.lifetimes { - if tcx.named_region_map.late_bound.contains(&lifetime.lifetime.id) { + let hir_id = tcx.hir.node_to_hir_id(lifetime.lifetime.id); + if tcx.is_late_bound(hir_id) { return Some(lifetime.lifetime.span); } } @@ -945,6 +922,7 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, has_default: false, object_lifetime_default: rl::Set1::Empty, pure_wrt_drop: false, + synthetic: None, }); allow_defaults = true; @@ -979,18 +957,16 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let early_lifetimes = early_bound_lifetimes_from_generics(tcx, ast_generics); let regions = early_lifetimes.enumerate().map(|(i, l)| { - let issue_32330 = tcx.named_region_map.issue_32330.get(&l.lifetime.id).cloned(); ty::RegionParameterDef { - name: l.lifetime.name, + name: l.lifetime.name.name(), index: own_start + i as u32, def_id: tcx.hir.local_def_id(l.lifetime.id), pure_wrt_drop: l.pure_wrt_drop, - issue_32330: issue_32330, } }).collect::>(); - let object_lifetime_defaults = - tcx.named_region_map.object_lifetime_defaults.get(&node_id); + let hir_id = tcx.hir.node_to_hir_id(node_id); + let object_lifetime_defaults = tcx.object_lifetime_defaults(hir_id); // Now create the real type parameters. let type_start = own_start + regions.len() as u32; @@ -1016,8 +992,9 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: tcx.hir.local_def_id(p.id), has_default: p.default.is_some(), object_lifetime_default: - object_lifetime_defaults.map_or(rl::Set1::Empty, |o| o[i]), + object_lifetime_defaults.as_ref().map_or(rl::Set1::Empty, |o| o[i]), pure_wrt_drop: p.pure_wrt_drop, + synthetic: p.synthetic, } }); let mut types: Vec<_> = opt_self.into_iter().chain(types).collect(); @@ -1034,6 +1011,7 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, has_default: false, object_lifetime_default: rl::Set1::Empty, pure_wrt_drop: false, + synthetic: None, })); }); } @@ -1155,7 +1133,12 @@ fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, NodeField(field) => icx.to_ty(&field.ty), - NodeExpr(&hir::Expr { node: hir::ExprClosure(..), .. }) => { + NodeExpr(&hir::Expr { node: hir::ExprClosure(.., is_generator), .. }) => { + if is_generator { + let hir_id = tcx.hir.node_to_hir_id(node_id); + return tcx.typeck_tables_of(def_id).node_id_to_type(hir_id); + } + tcx.mk_closure(def_id, Substs::for_item( tcx, def_id, |def, _| { @@ -1304,7 +1287,7 @@ fn is_unsized<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>, } } - let kind_id = tcx.lang_items.require(SizedTraitLangItem); + let kind_id = tcx.lang_items().require(SizedTraitLangItem); match unbound { Some(ref tpb) => { // FIXME(#8559) currently requires the unbound to be built-in. @@ -1340,12 +1323,21 @@ fn early_bound_lifetimes_from_generics<'a, 'tcx>( ast_generics .lifetimes .iter() - .filter(move |l| !tcx.named_region_map.late_bound.contains(&l.lifetime.id)) + .filter(move |l| { + let hir_id = tcx.hir.node_to_hir_id(l.lifetime.id); + !tcx.is_late_bound(hir_id) + }) } fn predicates_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::GenericPredicates<'tcx> { + explicit_predicates_of(tcx, def_id) +} + +fn explicit_predicates_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> ty::GenericPredicates<'tcx> { use rustc::hir::map::*; use rustc::hir::*; @@ -1444,7 +1436,7 @@ fn predicates_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let region = tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { def_id: tcx.hir.local_def_id(param.lifetime.id), index, - name: param.lifetime.name + name: param.lifetime.name.name(), })); index += 1; @@ -1572,7 +1564,7 @@ pub enum SizedByDefault { Yes, No, } /// a region) to ty's notion of ty param bounds, which can either be user-defined traits, or the /// built-in trait (formerly known as kind): Send. pub fn compute_bounds<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>, - param_ty: ty::Ty<'tcx>, + param_ty: Ty<'tcx>, ast_bounds: &[hir::TyParamBound], sized_by_default: SizedByDefault, span: Span) @@ -1665,7 +1657,7 @@ fn compute_sig_of_foreign_fn_decl<'a, 'tcx>( // ABIs are handled at all correctly. if abi != abi::Abi::RustIntrinsic && abi != abi::Abi::PlatformIntrinsic && !tcx.sess.features.borrow().simd_ffi { - let check = |ast_ty: &hir::Ty, ty: ty::Ty| { + let check = |ast_ty: &hir::Ty, ty: Ty| { if ty.is_simd() { tcx.sess.struct_span_err(ast_ty.span, &format!("use of SIMD type `{}` in FFI is highly experimental and \ diff --git a/src/librustc_typeck/constrained_type_params.rs b/src/librustc_typeck/constrained_type_params.rs index 09c7487e63..5f55b9b06e 100644 --- a/src/librustc_typeck/constrained_type_params.rs +++ b/src/librustc_typeck/constrained_type_params.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc::ty::{self, Ty}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::fold::{TypeFoldable, TypeVisitor}; use rustc::util::nodemap::FxHashSet; @@ -86,7 +86,7 @@ impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { } } -pub fn identify_constrained_type_params<'tcx>(tcx: ty::TyCtxt, +pub fn identify_constrained_type_params<'tcx>(tcx: TyCtxt, predicates: &[ty::Predicate<'tcx>], impl_trait_ref: Option>, input_parameters: &mut FxHashSet) @@ -136,7 +136,7 @@ pub fn identify_constrained_type_params<'tcx>(tcx: ty::TyCtxt, /// which is determined by 1, which requires `U`, that is determined /// by 0. I should probably pick a less tangled example, but I can't /// think of any. -pub fn setup_constraining_predicates<'tcx>(tcx: ty::TyCtxt, +pub fn setup_constraining_predicates<'tcx>(tcx: TyCtxt, predicates: &mut [ty::Predicate<'tcx>], impl_trait_ref: Option>, input_parameters: &mut FxHashSet) diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index cb430efd95..8df9735557 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -2505,50 +2505,6 @@ fn baz(x: &::A) where I: Foo {} ``` "##, -E0230: r##" -The trait has more type parameters specified than appear in its definition. - -Erroneous example code: - -```compile_fail,E0230 -#![feature(on_unimplemented)] -#[rustc_on_unimplemented = "Trait error on `{Self}` with `<{A},{B},{C}>`"] -// error: there is no type parameter C on trait TraitWithThreeParams -trait TraitWithThreeParams -{} -``` - -Include the correct number of type parameters and the compilation should -proceed: - -``` -#![feature(on_unimplemented)] -#[rustc_on_unimplemented = "Trait error on `{Self}` with `<{A},{B},{C}>`"] -trait TraitWithThreeParams // ok! -{} -``` -"##, - -E0232: r##" -The attribute must have a value. Erroneous code example: - -```compile_fail,E0232 -#![feature(on_unimplemented)] - -#[rustc_on_unimplemented] // error: this attribute must have a value -trait Bar {} -``` - -Please supply the missing value of the attribute. Example: - -``` -#![feature(on_unimplemented)] - -#[rustc_on_unimplemented = "foo"] // ok! -trait Bar {} -``` -"##, - E0243: r##" This error indicates that not enough type parameters were found in a type or trait. @@ -4690,7 +4646,6 @@ register_diagnostics! { E0224, // at least one non-builtin train is required for an object type E0227, // ambiguous lifetime bound, explicit lifetime bound required E0228, // explicit lifetime bound required - E0231, // only named substitution parameters are allowed // E0233, // E0234, // E0235, // structure constructor specifies a structure of type but @@ -4721,4 +4676,7 @@ register_diagnostics! { E0588, // packed struct cannot transitively contain a `[repr(align)]` struct E0592, // duplicate definitions with name `{}` // E0613, // Removed (merged with E0609) + E0627, // yield statement outside of generator literal + E0632, // cannot provide explicit type parameters when `impl Trait` is used in + // argument position. } diff --git a/src/librustc_typeck/impl_wf_check.rs b/src/librustc_typeck/impl_wf_check.rs index 14e48b9302..15708ab766 100644 --- a/src/librustc_typeck/impl_wf_check.rs +++ b/src/librustc_typeck/impl_wf_check.rs @@ -132,7 +132,7 @@ fn enforce_impl_params_are_constrained<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, !input_parameters.contains(¶m) { report_unused_parameter(tcx, lifetime.lifetime.span, - "lifetime", &lifetime.lifetime.name.to_string()); + "lifetime", &lifetime.lifetime.name.name().to_string()); } } diff --git a/src/librustc_typeck/lib.rs b/src/librustc_typeck/lib.rs index 86feea13b1..7a6ee73b9b 100644 --- a/src/librustc_typeck/lib.rs +++ b/src/librustc_typeck/lib.rs @@ -84,7 +84,6 @@ This API is completely unstable and subject to change. extern crate syntax_pos; extern crate arena; -extern crate fmt_macros; #[macro_use] extern crate rustc; extern crate rustc_platform_intrinsics as intrinsics; extern crate rustc_back; diff --git a/src/librustc_typeck/variance/constraints.rs b/src/librustc_typeck/variance/constraints.rs index 40474a7933..c1653cfb43 100644 --- a/src/librustc_typeck/variance/constraints.rs +++ b/src/librustc_typeck/variance/constraints.rs @@ -14,7 +14,8 @@ //! We walk the set of items and, for each member, generate new constraints. use hir::def_id::DefId; -use rustc::dep_graph::{AssertDepGraphSafe, DepKind}; +use rustc::dep_graph::{DepGraphSafe, DepKind, DepNodeColor}; +use rustc::ich::StableHashingContext; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty, TyCtxt}; use syntax::ast; @@ -22,6 +23,7 @@ use rustc::hir; use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc_data_structures::transitive_relation::TransitiveRelation; +use rustc_data_structures::stable_hasher::StableHashingContextProvider; use super::terms::*; use super::terms::VarianceTerm::*; @@ -138,6 +140,16 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for ConstraintContext<'a, 'tcx> { } } +impl<'a, 'tcx> StableHashingContextProvider for ConstraintContext<'a, 'tcx> { + type ContextType = StableHashingContext<'tcx>; + + fn create_stable_hashing_context(&self) -> Self::ContextType { + self.terms_cx.tcx.create_stable_hashing_context() + } +} + +impl<'a, 'tcx> DepGraphSafe for ConstraintContext<'a, 'tcx> {} + impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { fn visit_node_helper(&mut self, id: ast::NodeId) { let tcx = self.terms_cx.tcx; @@ -150,15 +162,27 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { // See README.md for a detailed discussion // on dep-graph management. let dep_node = def_id.to_dep_node(tcx, DepKind::ItemVarianceConstraints); - tcx.dep_graph.with_task(dep_node, - AssertDepGraphSafe(self), - def_id, - visit_item_task); - fn visit_item_task<'a, 'tcx>(ccx: AssertDepGraphSafe<&mut ConstraintContext<'a, 'tcx>>, + if let Some(DepNodeColor::Green(_)) = tcx.dep_graph.node_color(&dep_node) { + // If the corresponding node has already been marked as green, the + // appropriate portion of the DepGraph has already been loaded from + // the previous graph, so we don't do any dep-tracking. Since we + // don't cache any values though, we still have to re-run the + // computation. + tcx.dep_graph.with_ignore(|| { + self.build_constraints_for_item(def_id); + }); + } else { + tcx.dep_graph.with_task(dep_node, + self, + def_id, + visit_item_task); + } + + fn visit_item_task<'a, 'tcx>(ccx: &mut ConstraintContext<'a, 'tcx>, def_id: DefId) { - ccx.0.build_constraints_for_item(def_id); + ccx.build_constraints_for_item(def_id); } } @@ -294,6 +318,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { } ty::TyFnDef(..) | + ty::TyGenerator(..) | ty::TyClosure(..) => { bug!("Unexpected closure type in variance computation"); } diff --git a/src/librustc_typeck/variance/terms.rs b/src/librustc_typeck/variance/terms.rs index c624b11c5e..ac3d575b64 100644 --- a/src/librustc_typeck/variance/terms.rs +++ b/src/librustc_typeck/variance/terms.rs @@ -94,18 +94,10 @@ pub fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx> } fn lang_items(tcx: TyCtxt) -> Vec<(ast::NodeId, Vec)> { + let lang_items = tcx.lang_items(); let all = vec![ - (tcx.lang_items.phantom_data(), vec![ty::Covariant]), - (tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]), - - // Deprecated: - (tcx.lang_items.covariant_type(), vec![ty::Covariant]), - (tcx.lang_items.contravariant_type(), vec![ty::Contravariant]), - (tcx.lang_items.invariant_type(), vec![ty::Invariant]), - (tcx.lang_items.covariant_lifetime(), vec![ty::Covariant]), - (tcx.lang_items.contravariant_lifetime(), vec![ty::Contravariant]), - (tcx.lang_items.invariant_lifetime(), vec![ty::Invariant]), - + (lang_items.phantom_data(), vec![ty::Covariant]), + (lang_items.unsafe_cell_type(), vec![ty::Invariant]), ]; all.into_iter() // iterating over (Option, Variance) diff --git a/src/librustdoc/Cargo.toml b/src/librustdoc/Cargo.toml index f9400e68a1..b295b414a0 100644 --- a/src/librustdoc/Cargo.toml +++ b/src/librustdoc/Cargo.toml @@ -7,12 +7,15 @@ build = "build.rs" [lib] name = "rustdoc" path = "lib.rs" +# SNAP/stage0(cargo) +doctest = false [dependencies] env_logger = { version = "0.4", default-features = false } log = "0.3" pulldown-cmark = { version = "0.0.14", default-features = false } +html-diff = "0.0.4" [build-dependencies] build_helper = { path = "../build_helper" } -gcc = "0.3.50" +cc = "1.0" diff --git a/src/librustdoc/build.rs b/src/librustdoc/build.rs index 4189e3d2ac..97c9ca1e2d 100644 --- a/src/librustdoc/build.rs +++ b/src/librustdoc/build.rs @@ -9,12 +9,12 @@ // except according to those terms. extern crate build_helper; -extern crate gcc; +extern crate cc; fn main() { let src_dir = std::path::Path::new("../rt/hoedown/src"); build_helper::rerun_if_changed_anything_in_dir(src_dir); - let mut cfg = gcc::Config::new(); + let mut cfg = cc::Build::new(); cfg.file("../rt/hoedown/src/autolink.c") .file("../rt/hoedown/src/buffer.c") .file("../rt/hoedown/src/document.c") @@ -24,7 +24,9 @@ fn main() { .file("../rt/hoedown/src/html_smartypants.c") .file("../rt/hoedown/src/stack.c") .file("../rt/hoedown/src/version.c") + .warnings(false) .include(src_dir) + .warnings(false) .compile("libhoedown.a"); } diff --git a/src/librustdoc/clean/cfg.rs b/src/librustdoc/clean/cfg.rs index da8c3a5cf2..e3ce403f3c 100644 --- a/src/librustdoc/clean/cfg.rs +++ b/src/librustdoc/clean/cfg.rs @@ -337,7 +337,6 @@ impl<'a> fmt::Display for Html<'a> { "l4re" => "L4Re", "linux" => "Linux", "macos" => "macOS", - "nacl" => "NaCl", "netbsd" => "NetBSD", "openbsd" => "OpenBSD", "redox" => "Redox", @@ -886,4 +885,4 @@ mod test { only." ); } -} \ No newline at end of file +} diff --git a/src/librustdoc/clean/inline.rs b/src/librustdoc/clean/inline.rs index 5d39d1d27f..3a4dcc3217 100644 --- a/src/librustdoc/clean/inline.rs +++ b/src/librustdoc/clean/inline.rs @@ -120,7 +120,7 @@ pub fn load_attrs(cx: &DocContext, did: DefId) -> clean::Attributes { /// These names are used later on by HTML rendering to generate things like /// source links back to the original item. pub fn record_extern_fqn(cx: &DocContext, did: DefId, kind: clean::TypeKind) { - let crate_name = cx.tcx.sess.cstore.crate_name(did.krate).to_string(); + let crate_name = cx.tcx.crate_name(did.krate).to_string(); let relative = cx.tcx.def_path(did).data.into_iter().filter_map(|elem| { // extern blocks have an empty name let s = elem.data.to_string(); @@ -236,31 +236,34 @@ pub fn build_impls(cx: &DocContext, did: DefId) -> Vec { cx.populated_all_crate_impls.set(true); - for did in tcx.sess.cstore.implementations_of_trait(None) { - build_impl(cx, did, &mut impls); + for &cnum in tcx.crates().iter() { + for did in tcx.all_trait_implementations(cnum).iter() { + build_impl(cx, *did, &mut impls); + } } // Also try to inline primitive impls from other crates. + let lang_items = tcx.lang_items(); let primitive_impls = [ - tcx.lang_items.isize_impl(), - tcx.lang_items.i8_impl(), - tcx.lang_items.i16_impl(), - tcx.lang_items.i32_impl(), - tcx.lang_items.i64_impl(), - tcx.lang_items.i128_impl(), - tcx.lang_items.usize_impl(), - tcx.lang_items.u8_impl(), - tcx.lang_items.u16_impl(), - tcx.lang_items.u32_impl(), - tcx.lang_items.u64_impl(), - tcx.lang_items.u128_impl(), - tcx.lang_items.f32_impl(), - tcx.lang_items.f64_impl(), - tcx.lang_items.char_impl(), - tcx.lang_items.str_impl(), - tcx.lang_items.slice_impl(), - tcx.lang_items.const_ptr_impl(), - tcx.lang_items.mut_ptr_impl(), + lang_items.isize_impl(), + lang_items.i8_impl(), + lang_items.i16_impl(), + lang_items.i32_impl(), + lang_items.i64_impl(), + lang_items.i128_impl(), + lang_items.usize_impl(), + lang_items.u8_impl(), + lang_items.u16_impl(), + lang_items.u32_impl(), + lang_items.u64_impl(), + lang_items.u128_impl(), + lang_items.f32_impl(), + lang_items.f64_impl(), + lang_items.char_impl(), + lang_items.str_impl(), + lang_items.slice_impl(), + lang_items.const_ptr_impl(), + lang_items.mut_ptr_impl(), ]; for def_id in primitive_impls.iter().filter_map(|&def_id| def_id) { @@ -399,7 +402,7 @@ pub fn build_impl(cx: &DocContext, did: DefId, ret: &mut Vec) { clean::RegionBound(..) => unreachable!(), } }); - if trait_.def_id() == tcx.lang_items.deref_trait() { + if trait_.def_id() == tcx.lang_items().deref_trait() { super::build_deref_target_impls(cx, &trait_items, ret); } @@ -443,9 +446,9 @@ fn build_module(cx: &DocContext, did: DefId) -> clean::Module { // two namespaces, so the target may be listed twice. Make sure we only // visit each node at most once. let mut visited = FxHashSet(); - for item in cx.tcx.sess.cstore.item_children(did, cx.tcx.sess) { + for &item in cx.tcx.item_children(did).iter() { let def_id = item.def.def_id(); - if cx.tcx.sess.cstore.visibility(def_id) == ty::Visibility::Public { + if cx.tcx.visibility(def_id) == ty::Visibility::Public { if !visited.insert(def_id) { continue } if let Some(i) = try_inline(cx, item.def, item.ident.name) { items.extend(i) @@ -470,10 +473,10 @@ impl hir::print::PpAnn for InlinedConst { } } -fn print_inlined_const(cx: &DocContext, did: DefId) -> String { - let body = cx.tcx.sess.cstore.item_body(cx.tcx, did); +pub fn print_inlined_const(cx: &DocContext, did: DefId) -> String { + let body = cx.tcx.extern_const_body(did).body; let inlined = InlinedConst { - nested_bodies: cx.tcx.item_body_nested_bodies(did) + nested_bodies: cx.tcx.item_body_nested_bodies(did).nested_bodies }; hir::print::to_string(&inlined, |s| s.print_expr(&body.value)) } diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 7d6ad5286d..424f48a17e 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -27,19 +27,22 @@ use syntax::ptr::P; use syntax::symbol::keywords; use syntax_pos::{self, DUMMY_SP, Pos}; +use rustc::middle::const_val::ConstVal; use rustc::middle::privacy::AccessLevels; use rustc::middle::resolve_lifetime as rl; use rustc::middle::lang_items; use rustc::hir::def::{Def, CtorKind}; use rustc::hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; +use rustc::traits::Reveal; use rustc::ty::subst::Substs; -use rustc::ty::{self, AdtKind}; +use rustc::ty::{self, Ty, AdtKind}; use rustc::middle::stability; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_typeck::hir_ty_to_ty; use rustc::hir; +use rustc_const_math::ConstInt; use std::{mem, slice, vec}; use std::path::PathBuf; use std::rc::Rc; @@ -117,6 +120,7 @@ pub struct Crate { // These are later on moved into `CACHEKEY`, leaving the map empty. // Only here so that they can be filtered through the rustdoc passes. pub external_traits: FxHashMap, + pub masked_crates: FxHashSet, } impl<'a, 'tcx> Clean for visit_ast::RustdocVisitor<'a, 'tcx> { @@ -125,13 +129,13 @@ impl<'a, 'tcx> Clean for visit_ast::RustdocVisitor<'a, 'tcx> { { let mut r = cx.renderinfo.borrow_mut(); - r.deref_trait_did = cx.tcx.lang_items.deref_trait(); - r.deref_mut_trait_did = cx.tcx.lang_items.deref_mut_trait(); - r.owned_box_did = cx.tcx.lang_items.owned_box(); + r.deref_trait_did = cx.tcx.lang_items().deref_trait(); + r.deref_mut_trait_did = cx.tcx.lang_items().deref_mut_trait(); + r.owned_box_did = cx.tcx.lang_items().owned_box(); } let mut externs = Vec::new(); - for cnum in cx.sess().cstore.crates() { + for &cnum in cx.tcx.crates().iter() { externs.push((cnum, cnum.clean(cx))); // Analyze doc-reachability for extern items LibEmbargoVisitor::new(cx).visit_lib(cnum); @@ -141,6 +145,18 @@ impl<'a, 'tcx> Clean for visit_ast::RustdocVisitor<'a, 'tcx> { // Clean the crate, translating the entire libsyntax AST to one that is // understood by rustdoc. let mut module = self.module.clean(cx); + let mut masked_crates = FxHashSet(); + + match module.inner { + ModuleItem(ref module) => { + for it in &module.items { + if it.is_extern_crate() && it.attrs.has_doc_masked() { + masked_crates.insert(it.def_id.krate); + } + } + } + _ => unreachable!(), + } let ExternalCrate { name, src, primitives, .. } = LOCAL_CRATE.clean(cx); { @@ -173,6 +189,7 @@ impl<'a, 'tcx> Clean for visit_ast::RustdocVisitor<'a, 'tcx> { primitives, access_levels: Arc::new(mem::replace(&mut access_levels, Default::default())), external_traits: mem::replace(&mut external_traits, Default::default()), + masked_crates, } } } @@ -244,7 +261,7 @@ impl Clean for CrateNum { } }).collect() } else { - cx.tcx.sess.cstore.item_children(root, cx.tcx.sess).iter().map(|item| item.def) + cx.tcx.item_children(root).iter().map(|item| item.def) .filter_map(as_primitive).collect() }; @@ -326,6 +343,9 @@ impl Item { pub fn is_import(&self) -> bool { self.type_() == ItemType::Import } + pub fn is_extern_crate(&self) -> bool { + self.type_() == ItemType::ExternCrate + } pub fn is_stripped(&self) -> bool { match self.inner { StrippedItem(..) => true, _ => false } @@ -457,8 +477,8 @@ impl Clean for doctree::Module { // the outer `mod` item for the source code. let whence = { let cm = cx.sess().codemap(); - let outer = cm.lookup_char_pos(self.where_outer.lo); - let inner = cm.lookup_char_pos(self.where_inner.lo); + let outer = cm.lookup_char_pos(self.where_outer.lo()); + let inner = cm.lookup_char_pos(self.where_inner.lo()); if outer.file.start_pos == inner.file.start_pos { // mod foo { ... } self.where_outer @@ -571,6 +591,20 @@ impl Attributes { None } + pub fn has_doc_masked(&self) -> bool { + for attr in &self.other_attrs { + if !attr.check_name("doc") { continue; } + + if let Some(items) = attr.meta_item_list() { + if items.iter().filter_map(|i| i.meta_item()).any(|it| it.check_name("masked")) { + return true; + } + } + } + + false + } + pub fn from_ast(diagnostic: &::errors::Handler, attrs: &[ast::Attribute]) -> Attributes { let mut doc_strings = vec![]; let mut sp = None; @@ -672,7 +706,7 @@ impl TyParamBound { fn maybe_sized(cx: &DocContext) -> TyParamBound { let did = cx.tcx.require_lang_item(lang_items::SizedTraitLangItem); let empty = cx.tcx.intern_substs(&[]); - let path = external_path(cx, &cx.tcx.item_name(did).as_str(), + let path = external_path(cx, &cx.tcx.item_name(did), Some(did), false, vec![], empty); inline::record_extern_fqn(cx, did, TypeKind::Trait); TraitBound(PolyTrait { @@ -689,7 +723,7 @@ impl TyParamBound { fn is_sized_bound(&self, cx: &DocContext) -> bool { use rustc::hir::TraitBoundModifier as TBM; if let TyParamBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self { - if trait_.def_id() == cx.tcx.lang_items.sized_trait() { + if trait_.def_id() == cx.tcx.lang_items().sized_trait() { return true; } } @@ -713,7 +747,7 @@ fn external_path_params(cx: &DocContext, trait_did: Option, has_self: boo match trait_did { // Attempt to sugar an external path like Fn<(A, B,), C> to Fn(A, B) -> C - Some(did) if cx.tcx.lang_items.fn_trait_kind(did).is_some() => { + Some(did) if cx.tcx.lang_items().fn_trait_kind(did).is_some() => { assert_eq!(types.len(), 1); let inputs = match types[0].sty { ty::TyTuple(ref tys, _) => tys.iter().map(|t| t.clean(cx)).collect(), @@ -763,7 +797,7 @@ fn external_path(cx: &DocContext, name: &str, trait_did: Option, has_self impl<'tcx> Clean for ty::TraitRef<'tcx> { fn clean(&self, cx: &DocContext) -> TyParamBound { inline::record_extern_fqn(cx, self.def_id, TypeKind::Trait); - let path = external_path(cx, &cx.tcx.item_name(self.def_id).as_str(), + let path = external_path(cx, &cx.tcx.item_name(self.def_id), Some(self.def_id), true, vec![], self.substs); debug!("ty::TraitRef\n subst: {:?}\n", self.substs); @@ -830,7 +864,8 @@ impl Lifetime { impl Clean for hir::Lifetime { fn clean(&self, cx: &DocContext) -> Lifetime { - let def = cx.tcx.named_region_map.defs.get(&self.id).cloned(); + let hir_id = cx.tcx.hir.node_to_hir_id(self.id); + let def = cx.tcx.named_region(hir_id); match def { Some(rl::Region::EarlyBound(_, node_id)) | Some(rl::Region::LateBound(_, node_id)) | @@ -841,7 +876,7 @@ impl Clean for hir::Lifetime { } _ => {} } - Lifetime(self.name.to_string()) + Lifetime(self.name.name().to_string()) } } @@ -849,14 +884,14 @@ impl Clean for hir::LifetimeDef { fn clean(&self, _: &DocContext) -> Lifetime { if self.bounds.len() > 0 { let mut s = format!("{}: {}", - self.lifetime.name.to_string(), - self.bounds[0].name.to_string()); + self.lifetime.name.name(), + self.bounds[0].name.name()); for bound in self.bounds.iter().skip(1) { - s.push_str(&format!(" + {}", bound.name.to_string())); + s.push_str(&format!(" + {}", bound.name.name())); } Lifetime(s) } else { - Lifetime(self.lifetime.name.to_string()) + Lifetime(self.lifetime.name.name().to_string()) } } } @@ -933,6 +968,7 @@ impl<'a> Clean for ty::Predicate<'a> { Predicate::WellFormed(_) => panic!("not user writable"), Predicate::ObjectSafe(_) => panic!("not user writable"), Predicate::ClosureKind(..) => panic!("not user writable"), + Predicate::ConstEvaluatable(..) => panic!("not user writable"), } } } @@ -973,7 +1009,7 @@ impl<'tcx> Clean for ty::OutlivesPredicate, ty: } } -impl<'tcx> Clean for ty::OutlivesPredicate, ty::Region<'tcx>> { +impl<'tcx> Clean for ty::OutlivesPredicate, ty::Region<'tcx>> { fn clean(&self, cx: &DocContext) -> WherePredicate { let ty::OutlivesPredicate(ref ty, ref lt) = *self; @@ -1554,7 +1590,7 @@ pub enum Type { BareFunction(Box), Tuple(Vec), Slice(Box), - Array(Box, usize), + Array(Box, String), Never, Unique(Box), RawPointer(Mutability, Box), @@ -1651,6 +1687,16 @@ impl GetDefId for Type { fn def_id(&self) -> Option { match *self { ResolvedPath { did, .. } => Some(did), + Primitive(p) => ::html::render::cache().primitive_locations.get(&p).cloned(), + BorrowedRef { type_: box Generic(..), .. } => + Primitive(PrimitiveType::Reference).def_id(), + BorrowedRef { ref type_, .. } => type_.def_id(), + Tuple(..) => Primitive(PrimitiveType::Tuple).def_id(), + BareFunction(..) => Primitive(PrimitiveType::Fn).def_id(), + Slice(..) => Primitive(PrimitiveType::Slice).def_id(), + Array(..) => Primitive(PrimitiveType::Array).def_id(), + RawPointer(..) => Primitive(PrimitiveType::RawPointer).def_id(), + QPath { ref self_type, .. } => self_type.def_id(), _ => None, } } @@ -1771,9 +1817,22 @@ impl Clean for hir::Ty { type_: box m.ty.clean(cx)} } TySlice(ref ty) => Slice(box ty.clean(cx)), - TyArray(ref ty, length) => { - use rustc::middle::const_val::eval_length; - let n = eval_length(cx.tcx, length, "array length").unwrap(); + TyArray(ref ty, n) => { + let def_id = cx.tcx.hir.body_owner_def_id(n); + let param_env = ty::ParamEnv::empty(Reveal::UserFacing); + let substs = Substs::identity_for_item(cx.tcx, def_id); + let n = cx.tcx.const_eval(param_env.and((def_id, substs))).unwrap(); + let n = if let ConstVal::Integral(ConstInt::Usize(n)) = n.val { + n.to_string() + } else if let ConstVal::Unevaluated(def_id, _) = n.val { + if let Some(node_id) = cx.tcx.hir.as_local_node_id(def_id) { + print_const_expr(cx, cx.tcx.hir.body_owned_by(node_id)) + } else { + inline::print_inlined_const(cx, def_id) + } + } else { + format!("{:?}", n) + }; Array(box ty.clean(cx), n) }, TyTup(ref tys) => Tuple(tys.clean(cx)), @@ -1793,24 +1852,27 @@ impl Clean for hir::Ty { }; if let Some(&hir::ItemTy(ref ty, ref generics)) = alias { - let provided_params = &path.segments.last().unwrap().parameters; + let provided_params = &path.segments.last().unwrap(); let mut ty_substs = FxHashMap(); let mut lt_substs = FxHashMap(); - for (i, ty_param) in generics.ty_params.iter().enumerate() { - let ty_param_def = Def::TyParam(cx.tcx.hir.local_def_id(ty_param.id)); - if let Some(ty) = provided_params.types.get(i).cloned() { - ty_substs.insert(ty_param_def, ty.unwrap().clean(cx)); - } else if let Some(default) = ty_param.default.clone() { - ty_substs.insert(ty_param_def, default.unwrap().clean(cx)); + provided_params.with_parameters(|provided_params| { + for (i, ty_param) in generics.ty_params.iter().enumerate() { + let ty_param_def = Def::TyParam(cx.tcx.hir.local_def_id(ty_param.id)); + if let Some(ty) = provided_params.types.get(i).cloned() { + ty_substs.insert(ty_param_def, ty.unwrap().clean(cx)); + } else if let Some(default) = ty_param.default.clone() { + ty_substs.insert(ty_param_def, default.unwrap().clean(cx)); + } } - } - for (i, lt_param) in generics.lifetimes.iter().enumerate() { - if let Some(lt) = provided_params.lifetimes.get(i).cloned() { - if !lt.is_elided() { - lt_substs.insert(lt_param.lifetime.id, lt.clean(cx)); + for (i, lt_param) in generics.lifetimes.iter().enumerate() { + if let Some(lt) = provided_params.lifetimes.get(i).cloned() { + if !lt.is_elided() { + let lt_def_id = cx.tcx.hir.local_def_id(lt_param.lifetime.id); + lt_substs.insert(lt_def_id, lt.clean(cx)); + } } } - } + }); return cx.enter_alias(ty_substs, lt_substs, || ty.clean(cx)); } resolve_type(cx, path.clean(cx), self.id) @@ -1873,7 +1935,7 @@ impl Clean for hir::Ty { } } -impl<'tcx> Clean for ty::Ty<'tcx> { +impl<'tcx> Clean for Ty<'tcx> { fn clean(&self, cx: &DocContext) -> Type { match self.sty { ty::TyNever => Never, @@ -1884,7 +1946,20 @@ impl<'tcx> Clean for ty::Ty<'tcx> { ty::TyFloat(float_ty) => Primitive(float_ty.into()), ty::TyStr => Primitive(PrimitiveType::Str), ty::TySlice(ty) => Slice(box ty.clean(cx)), - ty::TyArray(ty, n) => Array(box ty.clean(cx), n), + ty::TyArray(ty, n) => { + let n = if let ConstVal::Integral(ConstInt::Usize(n)) = n.val { + n.to_string() + } else if let ConstVal::Unevaluated(def_id, _) = n.val { + if let Some(node_id) = cx.tcx.hir.as_local_node_id(def_id) { + print_const_expr(cx, cx.tcx.hir.body_owned_by(node_id)) + } else { + inline::print_inlined_const(cx, def_id) + } + } else { + format!("{:?}", n) + }; + Array(box ty.clean(cx), n) + } ty::TyRawPtr(mt) => RawPointer(mt.mutbl.clean(cx), box mt.ty.clean(cx)), ty::TyRef(r, mt) => BorrowedRef { lifetime: r.clean(cx), @@ -1914,7 +1989,7 @@ impl<'tcx> Clean for ty::Ty<'tcx> { AdtKind::Enum => TypeKind::Enum, }; inline::record_extern_fqn(cx, did, kind); - let path = external_path(cx, &cx.tcx.item_name(did).as_str(), + let path = external_path(cx, &cx.tcx.item_name(did), None, false, vec![], substs); ResolvedPath { path, @@ -1932,7 +2007,7 @@ impl<'tcx> Clean for ty::Ty<'tcx> { reg.clean(cx).map(|b| typarams.push(RegionBound(b))); for did in obj.auto_traits() { let empty = cx.tcx.intern_substs(&[]); - let path = external_path(cx, &cx.tcx.item_name(did).as_str(), + let path = external_path(cx, &cx.tcx.item_name(did), Some(did), false, vec![], empty); inline::record_extern_fqn(cx, did, TypeKind::Trait); let bound = TraitBound(PolyTrait { @@ -1955,7 +2030,7 @@ impl<'tcx> Clean for ty::Ty<'tcx> { }); } - let path = external_path(cx, &cx.tcx.item_name(did).as_str(), Some(did), + let path = external_path(cx, &cx.tcx.item_name(did), Some(did), false, bindings, principal.0.substs); ResolvedPath { path, @@ -1984,7 +2059,7 @@ impl<'tcx> Clean for ty::Ty<'tcx> { }).collect()) } - ty::TyClosure(..) => Tuple(vec![]), // FIXME(pcwalton) + ty::TyClosure(..) | ty::TyGenerator(..) => Tuple(vec![]), // FIXME(pcwalton) ty::TyInfer(..) => panic!("TyInfer"), ty::TyError => panic!("TyError"), @@ -2251,8 +2326,8 @@ impl Clean for syntax_pos::Span { let cm = cx.sess().codemap(); let filename = cm.span_to_filename(*self); - let lo = cm.lookup_char_pos(self.lo); - let hi = cm.lookup_char_pos(self.hi); + let lo = cm.lookup_char_pos(self.lo()); + let hi = cm.lookup_char_pos(self.hi()); Span { filename: filename.to_string(), loline: lo.line, @@ -2346,7 +2421,7 @@ impl Clean for hir::PathSegment { fn clean(&self, cx: &DocContext) -> PathSegment { PathSegment { name: self.name.clean(cx), - params: self.parameters.clean(cx) + params: self.with_parameters(|parameters| parameters.clean(cx)) } } } @@ -2416,7 +2491,7 @@ impl Clean for hir::BareFnTy { type_params: Vec::new(), where_predicates: Vec::new() }, - decl: (&*self.decl, &[][..]).clean(cx), + decl: (&*self.decl, &self.arg_names[..]).clean(cx), abi: self.abi, } } @@ -2525,7 +2600,7 @@ impl Clean> for doctree::Impl { // If this impl block is an implementation of the Deref trait, then we // need to try inlining the target's inherent impl blocks as well. - if trait_.def_id() == cx.tcx.lang_items.deref_trait() { + if trait_.def_id() == cx.tcx.lang_items().deref_trait() { build_deref_target_impls(cx, &items, &mut ret); } @@ -2581,27 +2656,27 @@ fn build_deref_target_impls(cx: &DocContext, } }; let did = match primitive { - Isize => tcx.lang_items.isize_impl(), - I8 => tcx.lang_items.i8_impl(), - I16 => tcx.lang_items.i16_impl(), - I32 => tcx.lang_items.i32_impl(), - I64 => tcx.lang_items.i64_impl(), - I128 => tcx.lang_items.i128_impl(), - Usize => tcx.lang_items.usize_impl(), - U8 => tcx.lang_items.u8_impl(), - U16 => tcx.lang_items.u16_impl(), - U32 => tcx.lang_items.u32_impl(), - U64 => tcx.lang_items.u64_impl(), - U128 => tcx.lang_items.u128_impl(), - F32 => tcx.lang_items.f32_impl(), - F64 => tcx.lang_items.f64_impl(), - Char => tcx.lang_items.char_impl(), + Isize => tcx.lang_items().isize_impl(), + I8 => tcx.lang_items().i8_impl(), + I16 => tcx.lang_items().i16_impl(), + I32 => tcx.lang_items().i32_impl(), + I64 => tcx.lang_items().i64_impl(), + I128 => tcx.lang_items().i128_impl(), + Usize => tcx.lang_items().usize_impl(), + U8 => tcx.lang_items().u8_impl(), + U16 => tcx.lang_items().u16_impl(), + U32 => tcx.lang_items().u32_impl(), + U64 => tcx.lang_items().u64_impl(), + U128 => tcx.lang_items().u128_impl(), + F32 => tcx.lang_items().f32_impl(), + F64 => tcx.lang_items().f64_impl(), + Char => tcx.lang_items().char_impl(), Bool => None, - Str => tcx.lang_items.str_impl(), - Slice => tcx.lang_items.slice_impl(), - Array => tcx.lang_items.slice_impl(), + Str => tcx.lang_items().str_impl(), + Slice => tcx.lang_items().slice_impl(), + Array => tcx.lang_items().slice_impl(), Tuple => None, - RawPointer => tcx.lang_items.const_ptr_impl(), + RawPointer => tcx.lang_items().const_ptr_impl(), Reference => None, Fn => None, }; diff --git a/src/librustdoc/clean/simplify.rs b/src/librustdoc/clean/simplify.rs index be02d24e44..dd36b28bb3 100644 --- a/src/librustdoc/clean/simplify.rs +++ b/src/librustdoc/clean/simplify.rs @@ -15,13 +15,8 @@ //! the AST (e.g. see all of `clean::inline`), but this is not always a //! non-lossy transformation. The current format of storage for where clauses //! for functions and such is simply a list of predicates. One example of this -//! is that the AST predicate of: -//! -//! where T: Trait -//! -//! is encoded as: -//! -//! where T: Trait, ::Foo = Bar +//! is that the AST predicate of: `where T: Trait` is encoded as: +//! `where T: Trait, ::Foo = Bar`. //! //! This module attempts to reconstruct the original where and/or parameter //! bounds by special casing scenarios such as these. Fun! diff --git a/src/librustdoc/core.rs b/src/librustdoc/core.rs index 58de0e1cae..2ecb7b546f 100644 --- a/src/librustdoc/core.rs +++ b/src/librustdoc/core.rs @@ -11,7 +11,6 @@ use rustc_lint; use rustc_driver::{driver, target_features, abort_on_err}; use rustc_driver::pretty::ReplaceBodyWithLoop; -use rustc::dep_graph::DepGraph; use rustc::session::{self, config}; use rustc::hir::def_id::DefId; use rustc::hir::def::Def; @@ -25,7 +24,7 @@ use rustc_trans::back::link; use rustc_resolve as resolve; use rustc_metadata::cstore::CStore; -use syntax::{ast, codemap}; +use syntax::codemap; use syntax::feature_gate::UnstableFeatures; use syntax::fold::Folder; use errors; @@ -66,7 +65,7 @@ pub struct DocContext<'a, 'tcx: 'a> { /// Table type parameter definition -> substituted type pub ty_substs: RefCell>, /// Table node id of lifetime parameter definition -> substituted lifetime - pub lt_substs: RefCell>, + pub lt_substs: RefCell>, } impl<'a, 'tcx> DocContext<'a, 'tcx> { @@ -78,7 +77,7 @@ impl<'a, 'tcx> DocContext<'a, 'tcx> { /// the substitutions for a type alias' RHS. pub fn enter_alias(&self, ty_substs: FxHashMap, - lt_substs: FxHashMap, + lt_substs: FxHashMap, f: F) -> R where F: FnOnce() -> R { let (old_tys, old_lts) = @@ -144,11 +143,9 @@ pub fn run_core(search_paths: SearchPaths, false, Some(codemap.clone())); - let dep_graph = DepGraph::new(false); - let _ignore = dep_graph.in_ignore(); - let cstore = Rc::new(CStore::new(&dep_graph, box rustc_trans::LlvmMetadataLoader)); + let cstore = Rc::new(CStore::new(box rustc_trans::LlvmMetadataLoader)); let mut sess = session::build_session_( - sessopts, &dep_graph, cpath, diagnostic_handler, codemap, cstore.clone() + sessopts, cpath, diagnostic_handler, codemap, ); rustc_trans::init(&sess); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); @@ -178,15 +175,22 @@ pub fn run_core(search_paths: SearchPaths, let arena = DroplessArena::new(); let arenas = GlobalArenas::new(); - let hir_map = hir_map::map_crate(&mut hir_forest, defs); + let hir_map = hir_map::map_crate(&sess, &*cstore, &mut hir_forest, &defs); + let output_filenames = driver::build_output_filenames(&input, + &None, + &None, + &[], + &sess); abort_on_err(driver::phase_3_run_analysis_passes(&sess, + &*cstore, hir_map, analysis, resolutions, &arena, &arenas, &name, + &output_filenames, |tcx, analysis, _, result| { if let Err(_) = result { sess.fatal("Compilation failed, aborting rustdoc"); @@ -214,7 +218,7 @@ pub fn run_core(search_paths: SearchPaths, debug!("crate: {:?}", tcx.hir.krate()); let krate = { - let mut v = RustdocVisitor::new(&ctxt); + let mut v = RustdocVisitor::new(&*cstore, &ctxt); v.visit(tcx.hir.krate()); v.clean(&ctxt) }; diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index 988890ffed..6303fd662b 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -228,7 +228,7 @@ impl<'a> fmt::Display for WhereClause<'a> { } if end_newline { - //add a space so stripping
tags and breaking spaces still renders properly + // add a space so stripping
tags and breaking spaces still renders properly if f.alternate() { clause.push(' '); } else { @@ -633,7 +633,7 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: fmt::Display::fmt(t, f)?; primitive_link(f, PrimitiveType::Slice, "]") } - clean::Array(ref t, n) => { + clean::Array(ref t, ref n) => { primitive_link(f, PrimitiveType::Array, "[")?; fmt::Display::fmt(t, f)?; primitive_link(f, PrimitiveType::Array, &format!("; {}]", n)) diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index eb27fa3abf..98863b229b 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -34,12 +34,18 @@ use syntax_pos::Span; /// Highlights `src`, returning the HTML output. pub fn render_with_highlighting(src: &str, class: Option<&str>, id: Option<&str>, - extension: Option<&str>) -> String { + extension: Option<&str>, + tooltip: Option<(&str, &str)>) -> String { debug!("highlighting: ================\n{}\n==============", src); let sess = parse::ParseSess::new(FilePathMapping::empty()); let fm = sess.codemap().new_filemap("".to_string(), src.to_string()); let mut out = Vec::new(); + if let Some((tooltip, class)) = tooltip { + write!(out, "
⚠{}
", + class, tooltip).unwrap(); + } write_header(class, id, &mut out).unwrap(); let mut classifier = Classifier::new(lexer::StringReader::new(&sess, fm), sess.codemap()); @@ -172,6 +178,21 @@ impl<'a> Classifier<'a> { } } + /// Gets the next token out of the lexer, emitting fatal errors if lexing fails. + fn try_next_token(&mut self) -> io::Result { + match self.lexer.try_next_token() { + Ok(tas) => Ok(tas), + Err(_) => { + self.lexer.emit_fatal_errors(); + self.lexer.sess.span_diagnostic + .struct_warn("Backing out of syntax highlighting") + .note("You probably did not intend to render this as a rust code-block") + .emit(); + Err(io::Error::new(io::ErrorKind::Other, "")) + } + } + } + /// Exhausts the `lexer` writing the output into `out`. /// /// The general structure for this method is to iterate over each token, @@ -183,18 +204,7 @@ impl<'a> Classifier<'a> { out: &mut W) -> io::Result<()> { loop { - let next = match self.lexer.try_next_token() { - Ok(tas) => tas, - Err(_) => { - self.lexer.emit_fatal_errors(); - self.lexer.sess.span_diagnostic - .struct_warn("Backing out of syntax highlighting") - .note("You probably did not intend to render this as a rust code-block") - .emit(); - return Err(io::Error::new(io::ErrorKind::Other, "")); - } - }; - + let next = self.try_next_token()?; if next.tok == token::Eof { break; } @@ -239,8 +249,8 @@ impl<'a> Classifier<'a> { token::BinOpEq(..) | token::FatArrow => Class::Op, // Miscellaneous, no highlighting. - token::Dot | token::DotDot | token::DotDotDot | token::Comma | token::Semi | - token::Colon | token::ModSep | token::LArrow | token::OpenDelim(_) | + token::Dot | token::DotDot | token::DotDotDot | token::DotDotEq | token::Comma | + token::Semi | token::Colon | token::ModSep | token::LArrow | token::OpenDelim(_) | token::CloseDelim(token::Brace) | token::CloseDelim(token::Paren) | token::CloseDelim(token::NoDelim) => Class::None, @@ -255,13 +265,37 @@ impl<'a> Classifier<'a> { } } - // This is the start of an attribute. We're going to want to + // This might be the start of an attribute. We're going to want to // continue highlighting it as an attribute until the ending ']' is // seen, so skip out early. Down below we terminate the attribute // span when we see the ']'. token::Pound => { - self.in_attribute = true; - out.enter_span(Class::Attribute)?; + // We can't be sure that our # begins an attribute (it could + // just be appearing in a macro) until we read either `#![` or + // `#[` from the input stream. + // + // We don't want to start highlighting as an attribute until + // we're confident there is going to be a ] coming up, as + // otherwise # tokens in macros highlight the rest of the input + // as an attribute. + + // Case 1: #![inner_attribute] + if self.lexer.peek().tok == token::Not { + self.try_next_token()?; // NOTE: consumes `!` token! + if self.lexer.peek().tok == token::OpenDelim(token::Bracket) { + self.in_attribute = true; + out.enter_span(Class::Attribute)?; + } + out.string("#", Class::None, None)?; + out.string("!", Class::None, None)?; + return Ok(()); + } + + // Case 2: #[outer_attribute] + if self.lexer.peek().tok == token::OpenDelim(token::Bracket) { + self.in_attribute = true; + out.enter_span(Class::Attribute)?; + } out.string("#", Class::None, None)?; return Ok(()); } @@ -319,7 +353,7 @@ impl<'a> Classifier<'a> { token::Lifetime(..) => Class::Lifetime, token::Underscore | token::Eof | token::Interpolated(..) | - token::Tilde | token::At => Class::None, + token::Tilde | token::At | token::DotEq => Class::None, }; // Anything that didn't return above is the simple case where we the diff --git a/src/librustdoc/html/markdown.rs b/src/librustdoc/html/markdown.rs index 2d14c02bf8..80d1f0b01c 100644 --- a/src/librustdoc/html/markdown.rs +++ b/src/librustdoc/html/markdown.rs @@ -16,10 +16,12 @@ //! of `fmt::Display`. Example usage: //! //! ``` -//! use rustdoc::html::markdown::Markdown; +//! #![feature(rustc_private)] +//! +//! use rustdoc::html::markdown::{RenderType, Markdown}; //! //! let s = "My *markdown* _text_"; -//! let html = format!("{}", Markdown(s)); +//! let html = format!("{}", Markdown(s, RenderType::Pulldown)); //! // ... something using html //! ``` @@ -158,10 +160,15 @@ impl<'a, I: Iterator>> Iterator for CodeBlocks<'a, I> { fn next(&mut self) -> Option { let event = self.inner.next(); + let compile_fail; + let ignore; if let Some(Event::Start(Tag::CodeBlock(lang))) = event { - if !LangString::parse(&lang).rust { + let parse_result = LangString::parse(&lang); + if !parse_result.rust { return Some(Event::Start(Tag::CodeBlock(lang))); } + compile_fail = parse_result.compile_fail; + ignore = parse_result.ignore; } else { return event; } @@ -220,11 +227,22 @@ impl<'a, I: Iterator>> Iterator for CodeBlocks<'a, I> { url, test_escaped, channel )) }); + let tooltip = if ignore { + Some(("Be careful when using this code, it's not being tested!", "ignore")) + } else if compile_fail { + Some(("This code doesn't compile so be extra careful!", "compile_fail")) + } else { + None + }; s.push_str(&highlight::render_with_highlighting( &text, - Some("rust-example-rendered"), + Some(&format!("rust-example-rendered{}", + if ignore { " ignore" } + else if compile_fail { " compile_fail" } + else { "" })), None, - playground_button.as_ref().map(String::as_str))); + playground_button.as_ref().map(String::as_str), + tooltip)); Some(Event::Html(s.into())) }) } @@ -554,12 +572,18 @@ pub fn render(w: &mut fmt::Formatter, let origtext = str::from_utf8(text).unwrap(); let origtext = origtext.trim_left(); debug!("docblock: ==============\n{:?}\n=======", text); + let mut compile_fail = false; + let mut ignore = false; + let rendered = if lang.is_null() || origtext.is_empty() { false } else { let rlang = (*lang).as_bytes(); let rlang = str::from_utf8(rlang).unwrap(); - if !LangString::parse(rlang).rust { + let parse_result = LangString::parse(rlang); + compile_fail = parse_result.compile_fail; + ignore = parse_result.ignore; + if !parse_result.rust { (my_opaque.dfltblk)(ob, orig_text, lang, opaque as *const hoedown_renderer_data, line); @@ -614,11 +638,22 @@ pub fn render(w: &mut fmt::Formatter, url, test_escaped, channel )) }); + let tooltip = if ignore { + Some(("Be careful when using this code, it's not being tested!", "ignore")) + } else if compile_fail { + Some(("This code doesn't compile so be extra careful!", "compile_fail")) + } else { + None + }; s.push_str(&highlight::render_with_highlighting( &text, - Some("rust-example-rendered"), + Some(&format!("rust-example-rendered{}", + if ignore { " ignore" } + else if compile_fail { " compile_fail" } + else { "" })), None, - playground_button.as_ref().map(String::as_str))); + playground_button.as_ref().map(String::as_str), + tooltip)); hoedown_buffer_put(ob, s.as_ptr(), s.len()); }) } @@ -909,10 +944,8 @@ impl LangString { let mut seen_rust_tags = false; let mut seen_other_tags = false; let mut data = LangString::all_false(); - let mut allow_compile_fail = false; let mut allow_error_code_check = false; if UnstableFeatures::from_environment().is_nightly_build() { - allow_compile_fail = true; allow_error_code_check = true; } @@ -936,7 +969,7 @@ impl LangString { data.test_harness = true; seen_rust_tags = !seen_other_tags || seen_rust_tags; } - "compile_fail" if allow_compile_fail => { + "compile_fail" => { data.compile_fail = true; seen_rust_tags = !seen_other_tags || seen_rust_tags; data.no_run = true; diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index 5457f69cb6..a3f446885f 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -54,16 +54,14 @@ use externalfiles::ExternalHtml; use serialize::json::{ToJson, Json, as_json}; use syntax::{abi, ast}; -use syntax::feature_gate::UnstableFeatures; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc::middle::stability; use rustc::hir; use rustc::util::nodemap::{FxHashMap, FxHashSet}; -use rustc::session::config::nightly_options::is_nightly_build; use rustc_data_structures::flock; -use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability}; +use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability, Span}; use doctree; use fold::DocFolder; use html::escape::Escape; @@ -75,6 +73,8 @@ use html::item_type::ItemType; use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine, RenderType}; use html::{highlight, layout}; +use html_diff; + /// A pair of name and its optional document. pub type NameDoc = (String, Option); @@ -122,6 +122,9 @@ pub struct SharedContext { /// The given user css file which allow to customize the generated /// documentation theme. pub css_file_extension: Option, + /// Warnings for the user if rendering would differ using different markdown + /// parsers. + pub markdown_warnings: RefCell)>>, } /// Indicates where an external crate can be found. @@ -263,6 +266,7 @@ pub struct Cache { deref_trait_did: Option, deref_mut_trait_did: Option, owned_box_did: Option, + masked_crates: FxHashSet, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, @@ -455,6 +459,7 @@ pub fn run(mut krate: clean::Crate, krate: krate.name.clone(), }, css_file_extension: css_file_extension.clone(), + markdown_warnings: RefCell::new(vec![]), }; // If user passed in `--playground-url` arg, we fill in crate name here @@ -534,6 +539,7 @@ pub fn run(mut krate: clean::Crate, deref_trait_did, deref_mut_trait_did, owned_box_did, + masked_crates: mem::replace(&mut krate.masked_crates, FxHashSet()), typarams: external_typarams, }; @@ -577,8 +583,134 @@ pub fn run(mut krate: clean::Crate, write_shared(&cx, &krate, &*cache, index)?; + let scx = cx.shared.clone(); + // And finally render the whole crate's documentation - cx.krate(krate) + let result = cx.krate(krate); + + let markdown_warnings = scx.markdown_warnings.borrow(); + if !markdown_warnings.is_empty() { + let mut intro_msg = false; + for &(ref span, ref text, ref diffs) in &*markdown_warnings { + for d in diffs { + render_difference(d, &mut intro_msg, span, text); + } + } + } + + result +} + +// A short, single-line view of `s`. +fn concise_str(mut s: &str) -> String { + if s.contains('\n') { + s = s.lines().next().expect("Impossible! We just found a newline"); + } + if s.len() > 70 { + let mut lo = 50; + let mut hi = s.len() - 20; + while !s.is_char_boundary(lo) { + lo -= 1; + } + while !s.is_char_boundary(hi) { + hi += 1; + } + return format!("{} ... {}", &s[..lo], &s[hi..]); + } + s.to_owned() +} + +// Returns short versions of s1 and s2, starting from where the strings differ. +fn concise_compared_strs(s1: &str, s2: &str) -> (String, String) { + let s1 = s1.trim(); + let s2 = s2.trim(); + if !s1.contains('\n') && !s2.contains('\n') && s1.len() <= 70 && s2.len() <= 70 { + return (s1.to_owned(), s2.to_owned()); + } + + let mut start_byte = 0; + for (c1, c2) in s1.chars().zip(s2.chars()) { + if c1 != c2 { + break; + } + + start_byte += c1.len_utf8(); + } + + if start_byte == 0 { + return (concise_str(s1), concise_str(s2)); + } + + let s1 = &s1[start_byte..]; + let s2 = &s2[start_byte..]; + (format!("...{}", concise_str(s1)), format!("...{}", concise_str(s2))) +} + + +fn print_message(msg: &str, intro_msg: &mut bool, span: &Span, text: &str) { + if !*intro_msg { + println!("WARNING: documentation for this crate may be rendered \ + differently using the new Pulldown renderer."); + println!(" See https://github.com/rust-lang/rust/issues/44229 for details."); + *intro_msg = true; + } + println!("WARNING: rendering difference in `{}`", concise_str(text)); + println!(" --> {}:{}:{}", span.filename, span.loline, span.locol); + println!("{}", msg); +} + +fn render_difference(diff: &html_diff::Difference, intro_msg: &mut bool, span: &Span, text: &str) { + match *diff { + html_diff::Difference::NodeType { ref elem, ref opposite_elem } => { + print_message(&format!(" {} Types differ: expected: `{}`, found: `{}`", + elem.path, elem.element_name, opposite_elem.element_name), + intro_msg, span, text); + } + html_diff::Difference::NodeName { ref elem, ref opposite_elem } => { + print_message(&format!(" {} Tags differ: expected: `{}`, found: `{}`", + elem.path, elem.element_name, opposite_elem.element_name), + intro_msg, span, text); + } + html_diff::Difference::NodeAttributes { ref elem, + ref elem_attributes, + ref opposite_elem_attributes, + .. } => { + print_message(&format!(" {} Attributes differ in `{}`: expected: `{:?}`, \ + found: `{:?}`", + elem.path, elem.element_name, elem_attributes, + opposite_elem_attributes), + intro_msg, span, text); + } + html_diff::Difference::NodeText { ref elem, ref elem_text, ref opposite_elem_text, .. } => { + if elem_text.split("\n") + .zip(opposite_elem_text.split("\n")) + .any(|(a, b)| a.trim() != b.trim()) { + let (s1, s2) = concise_compared_strs(elem_text, opposite_elem_text); + print_message(&format!(" {} Text differs:\n expected: `{}`\n \ + found: `{}`", + elem.path, s1, s2), + intro_msg, span, text); + } + } + html_diff::Difference::NotPresent { ref elem, ref opposite_elem } => { + if let Some(ref elem) = *elem { + print_message(&format!(" {} One element is missing: expected: `{}`", + elem.path, elem.element_name), + intro_msg, span, text); + } else if let Some(ref elem) = *opposite_elem { + if elem.element_name.is_empty() { + print_message(&format!(" {} One element is missing: expected: `{}`", + elem.path, concise_str(&elem.element_content)), + intro_msg, span, text); + } else { + print_message(&format!(" {} Unexpected element `{}`: found: `{}`", + elem.path, elem.element_name, + concise_str(&elem.element_content)), + intro_msg, span, text); + } + } + } + } } /// Build the search index from the collected metadata @@ -1004,12 +1136,16 @@ impl DocFolder for Cache { // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { - if let Some(did) = i.trait_.def_id() { - self.implementors.entry(did).or_insert(vec![]).push(Implementor { - def_id: item.def_id, - stability: item.stability.clone(), - impl_: i.clone(), - }); + if !self.masked_crates.contains(&item.def_id.krate) { + if let Some(did) = i.trait_.def_id() { + if i.for_.def_id().map_or(true, |d| !self.masked_crates.contains(&d.krate)) { + self.implementors.entry(did).or_insert(vec![]).push(Implementor { + def_id: item.def_id, + stability: item.stability.clone(), + impl_: i.clone(), + }); + } + } } } @@ -1171,18 +1307,24 @@ impl DocFolder for Cache { // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let did = if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { - match i.for_ { - clean::ResolvedPath { did, .. } | - clean::BorrowedRef { - type_: box clean::ResolvedPath { did, .. }, .. - } => { - Some(did) - } - ref t => { - t.primitive_type().and_then(|t| { - self.primitive_locations.get(&t).cloned() - }) + let masked_trait = i.trait_.def_id().map_or(false, + |d| self.masked_crates.contains(&d.krate)); + if !masked_trait { + match i.for_ { + clean::ResolvedPath { did, .. } | + clean::BorrowedRef { + type_: box clean::ResolvedPath { did, .. }, .. + } => { + Some(did) + } + ref t => { + t.primitive_type().and_then(|t| { + self.primitive_locations.get(&t).cloned() + }) + } } + } else { + None } } else { unreachable!() @@ -1523,8 +1665,7 @@ impl<'a> fmt::Display for Item<'a> { } else { write!(fmt, "Module ")?; }, - clean::FunctionItem(..) | clean::ForeignFunctionItem(..) => - write!(fmt, "Function ")?, + clean::FunctionItem(..) | clean::ForeignFunctionItem(..) => write!(fmt, "Function ")?, clean::TraitItem(..) => write!(fmt, "Trait ")?, clean::StructItem(..) => write!(fmt, "Struct ")?, clean::UnionItem(..) => write!(fmt, "Union ")?, @@ -1532,8 +1673,7 @@ impl<'a> fmt::Display for Item<'a> { clean::TypedefItem(..) => write!(fmt, "Type Definition ")?, clean::MacroItem(..) => write!(fmt, "Macro ")?, clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?, - clean::StaticItem(..) | clean::ForeignStaticItem(..) => - write!(fmt, "Static ")?, + clean::StaticItem(..) | clean::ForeignStaticItem(..) => write!(fmt, "Static ")?, clean::ConstantItem(..) => write!(fmt, "Constant ")?, _ => { // We don't generate pages for any other type. @@ -1641,12 +1781,54 @@ fn plain_summary_line(s: Option<&str>) -> String { fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { document_stability(w, cx, item)?; let prefix = render_assoc_const_value(item); - document_full(w, item, cx.render_type, &prefix)?; + document_full(w, item, cx, &prefix)?; Ok(()) } +/// Render md_text as markdown. Warns the user if there are difference in +/// rendering between Pulldown and Hoedown. +fn render_markdown(w: &mut fmt::Formatter, + md_text: &str, + span: Span, + render_type: RenderType, + prefix: &str, + scx: &SharedContext) + -> fmt::Result { + // We only emit warnings if the user has opted-in to Pulldown rendering. + let output = if render_type == RenderType::Pulldown { + // Save the state of USED_ID_MAP so it only gets updated once even + // though we're rendering twice. + let orig_used_id_map = USED_ID_MAP.with(|map| map.borrow().clone()); + let hoedown_output = format!("{}", Markdown(md_text, RenderType::Hoedown)); + USED_ID_MAP.with(|map| *map.borrow_mut() = orig_used_id_map); + let pulldown_output = format!("{}", Markdown(md_text, RenderType::Pulldown)); + let mut differences = html_diff::get_differences(&pulldown_output, &hoedown_output); + differences.retain(|s| { + match *s { + html_diff::Difference::NodeText { ref elem_text, + ref opposite_elem_text, + .. } + if elem_text.split_whitespace().eq(opposite_elem_text.split_whitespace()) => { + false + } + _ => true, + } + }); + + if !differences.is_empty() { + scx.markdown_warnings.borrow_mut().push((span, md_text.to_owned(), differences)); + } + + pulldown_output + } else { + format!("{}", Markdown(md_text, RenderType::Hoedown)) + }; + + write!(w, "
{}{}
", prefix, output) +} + fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink, - render_type: RenderType, prefix: &str) -> fmt::Result { + cx: &Context, prefix: &str) -> fmt::Result { if let Some(s) = item.doc_value() { let markdown = if s.contains('\n') { format!("{} [Read more]({})", @@ -1654,7 +1836,7 @@ fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLin } else { format!("{}", &plain_summary_line(Some(s))) }; - write!(w, "
{}{}
", prefix, Markdown(&markdown, render_type))?; + render_markdown(w, &markdown, item.source.clone(), cx.render_type, prefix, &cx.shared)?; } else if !prefix.is_empty() { write!(w, "
{}
", prefix)?; } @@ -1669,6 +1851,7 @@ fn render_assoc_const_value(item: &clean::Item) -> String { None, None, None, + None, ) } _ => String::new(), @@ -1676,9 +1859,9 @@ fn render_assoc_const_value(item: &clean::Item) -> String { } fn document_full(w: &mut fmt::Formatter, item: &clean::Item, - render_type: RenderType, prefix: &str) -> fmt::Result { + cx: &Context, prefix: &str) -> fmt::Result { if let Some(s) = item.doc_value() { - write!(w, "
{}{}
", prefix, Markdown(s, render_type))?; + render_markdown(w, s, item.source.clone(), cx.render_type, prefix, &cx.shared)?; } else if !prefix.is_empty() { write!(w, "
{}
", prefix)?; } @@ -2044,14 +2227,9 @@ fn item_static(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, f: &clean::Function) -> fmt::Result { - // FIXME(#24111): remove when `const_fn` is stabilized - let vis_constness = match UnstableFeatures::from_environment() { - UnstableFeatures::Allow => f.constness, - _ => hir::Constness::NotConst - }; let name_len = format!("{}{}{}{:#}fn {}{:#}", VisSpace(&it.visibility), - ConstnessSpace(vis_constness), + ConstnessSpace(f.constness), UnsafetySpace(f.unsafety), AbiSpace(f.abi), it.name.as_ref().unwrap(), @@ -2061,7 +2239,7 @@ fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, write!(w, "{vis}{constness}{unsafety}{abi}fn \ {name}{generics}{decl}{where_clause}", vis = VisSpace(&it.visibility), - constness = ConstnessSpace(vis_constness), + constness = ConstnessSpace(f.constness), unsafety = UnsafetySpace(f.unsafety), abi = AbiSpace(f.abi), name = it.name.as_ref().unwrap(), @@ -2075,6 +2253,18 @@ fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, document(w, cx, it) } +fn implementor2item<'a>(cache: &'a Cache, imp : &Implementor) -> Option<&'a clean::Item> { + if let Some(t_did) = imp.impl_.for_.def_id() { + if let Some(impl_item) = cache.impls.get(&t_did).and_then(|i| i.iter() + .find(|i| i.impl_item.def_id == imp.def_id)) + { + let i = &impl_item.impl_item; + return Some(i); + } + } + None +} + fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut bounds = String::new(); @@ -2246,12 +2436,12 @@ fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; let cache = cache(); - write!(w, " + let impl_header = "

Implementors

    - ")?; + "; if let Some(implementors) = cache.implementors.get(&it.def_id) { // The DefId is for the first Type found with that name. The bool is // if any Types with the same name but different DefId have been found. @@ -2273,8 +2463,42 @@ fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, } } - for implementor in implementors { - write!(w, "
  • ")?; + let (local, foreign) = implementors.iter() + .partition::, _>(|i| i.impl_.for_.def_id() + .map_or(true, |d| cache.paths.contains_key(&d))); + + if !foreign.is_empty() { + write!(w, " +

    + Implementations on Foreign Types +

    + ")?; + + for implementor in foreign { + if let Some(i) = implementor2item(&cache, implementor) { + let impl_ = Impl { impl_item: i.clone() }; + let assoc_link = AssocItemLink::GotoSource( + i.def_id, &implementor.impl_.provided_trait_methods + ); + render_impl(w, cx, &impl_, assoc_link, + RenderMode::Normal, i.stable_since(), false)?; + } + } + } + + write!(w, "{}", impl_header)?; + + for implementor in local { + write!(w, "
  • ")?; + if let Some(item) = implementor2item(&cache, implementor) { + if let Some(l) = (Item { cx, item }).src_href() { + write!(w, "
    ")?; + write!(w, "[src]", + l, "goto source code")?; + write!(w, "
    ")?; + } + } + write!(w, "")?; // If there's already another implementor that has the same abbridged name, use the // full path, for example in `std::iter::ExactSizeIterator` let use_absolute = match implementor.impl_.for_ { @@ -2295,6 +2519,10 @@ fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, } writeln!(w, "
  • ")?; } + } else { + // even without any implementations to write in, we still want the heading and list, so the + // implementors javascript file pulled in below has somewhere to write the impls into + write!(w, "{}", impl_header)?; } write!(w, "
")?; write!(w, r#" + + + + + + diff --git a/src/tools/clippy/util/gh-pages/versions.html b/src/tools/clippy/util/gh-pages/versions.html new file mode 100644 index 0000000000..310a387369 --- /dev/null +++ b/src/tools/clippy/util/gh-pages/versions.html @@ -0,0 +1,86 @@ + + + + + + + Clippy + + + + + +
+ + +
+ + + + +
+
+ + + + + + + + + + diff --git a/src/tools/clippy/util/lintlib.py b/src/tools/clippy/util/lintlib.py new file mode 100644 index 0000000000..c46706352b --- /dev/null +++ b/src/tools/clippy/util/lintlib.py @@ -0,0 +1,95 @@ +# Common utils for the several housekeeping scripts. + +import os +import re +import collections + +import logging as log +log.basicConfig(level=log.INFO, format='%(levelname)s: %(message)s') + +Lint = collections.namedtuple('Lint', 'name level doc sourcefile') +Config = collections.namedtuple('Config', 'name ty doc default') + +lintname_re = re.compile(r'''pub\s+([A-Z_][A-Z_0-9]*)''') +level_re = re.compile(r'''(Forbid|Deny|Warn|Allow)''') +conf_re = re.compile(r'''define_Conf! {\n([^}]*)\n}''', re.MULTILINE) +confvar_re = re.compile( + r'''/// Lint: (\w+). (.*).*\n\s*\([^,]+,\s+"([^"]+)",\s+([^=\)]+)=>\s+(.*)\),''', re.MULTILINE) + + +def parse_lints(lints, filepath): + last_comment = [] + comment = True + + with open(filepath) as fp: + for line in fp: + if comment: + if line.startswith("/// "): + last_comment.append(line[4:]) + elif line.startswith("///"): + last_comment.append(line[3:]) + elif line.startswith("declare_lint!"): + comment = False + deprecated = False + restriction = False + elif line.startswith("declare_restriction_lint!"): + comment = False + deprecated = False + restriction = True + elif line.startswith("declare_deprecated_lint!"): + comment = False + deprecated = True + else: + last_comment = [] + if not comment: + m = lintname_re.search(line) + if m: + name = m.group(1).lower() + + if deprecated: + level = "Deprecated" + elif restriction: + level = "Allow" + else: + while True: + m = level_re.search(line) + if m: + level = m.group(0) + break + line = next(fp) + + log.info("found %s with level %s in %s", + name, level, filepath) + lints.append(Lint(name, level, last_comment, filepath)) + last_comment = [] + comment = True + if "}" in line: + log.warn("Warning: missing Lint-Name in %s", filepath) + comment = True + + +def parse_configs(path): + configs = {} + with open(os.path.join(path, 'utils/conf.rs')) as fp: + contents = fp.read() + + match = re.search(conf_re, contents) + confvars = re.findall(confvar_re, match.group(1)) + + for (lint, doc, name, default, ty) in confvars: + configs[lint.lower()] = Config(name, ty, doc, default) + + return configs + + +def parse_all(path="clippy_lints/src"): + lints = [] + for filename in os.listdir(path): + if filename.endswith(".rs"): + parse_lints(lints, os.path.join(path, filename)) + log.info("got %s lints", len(lints)) + + configs = parse_configs(path) + log.info("got %d configs", len(configs)) + + return lints, configs diff --git a/src/tools/clippy/util/update_lints.py b/src/tools/clippy/util/update_lints.py new file mode 100755 index 0000000000..6c08f575d0 --- /dev/null +++ b/src/tools/clippy/util/update_lints.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python +# Generate a Markdown table of all lints, and put it in README.md. +# With -n option, only print the new table to stdout. +# With -c option, print a warning and set exit status to 1 if a file would be +# changed. + +import os +import re +import sys + +declare_lint_re = re.compile(r''' + declare_lint! \s* [{(] \s* + pub \s+ (?P[A-Z_][A-Z_0-9]*) \s*,\s* + (?PForbid|Deny|Warn|Allow) \s*,\s* + " (?P(?:[^"\\]+|\\.)*) " \s* [})] +''', re.VERBOSE | re.DOTALL) + +declare_deprecated_lint_re = re.compile(r''' + declare_deprecated_lint! \s* [{(] \s* + pub \s+ (?P[A-Z_][A-Z_0-9]*) \s*,\s* + " (?P(?:[^"\\]+|\\.)*) " \s* [})] +''', re.VERBOSE | re.DOTALL) + +declare_restriction_lint_re = re.compile(r''' + declare_restriction_lint! \s* [{(] \s* + pub \s+ (?P[A-Z_][A-Z_0-9]*) \s*,\s* + " (?P(?:[^"\\]+|\\.)*) " \s* [})] +''', re.VERBOSE | re.DOTALL) + +nl_escape_re = re.compile(r'\\\n\s*') + +docs_link = 'https://rust-lang-nursery.github.io/rust-clippy/master/index.html' + + +def collect(lints, deprecated_lints, restriction_lints, fn): + """Collect all lints from a file. + + Adds entries to the lints list as `(module, name, level, desc)`. + """ + with open(fn) as fp: + code = fp.read() + for match in declare_lint_re.finditer(code): + # remove \-newline escapes from description string + desc = nl_escape_re.sub('', match.group('desc')) + lints.append((os.path.splitext(os.path.basename(fn))[0], + match.group('name').lower(), + match.group('level').lower(), + desc.replace('\\"', '"'))) + + for match in declare_deprecated_lint_re.finditer(code): + # remove \-newline escapes from description string + desc = nl_escape_re.sub('', match.group('desc')) + deprecated_lints.append((os.path.splitext(os.path.basename(fn))[0], + match.group('name').lower(), + desc.replace('\\"', '"'))) + + for match in declare_restriction_lint_re.finditer(code): + # remove \-newline escapes from description string + desc = nl_escape_re.sub('', match.group('desc')) + restriction_lints.append((os.path.splitext(os.path.basename(fn))[0], + match.group('name').lower(), + "allow", + desc.replace('\\"', '"'))) + + +def gen_group(lints, levels=None): + """Write lint group (list of all lints in the form module::NAME).""" + if levels: + lints = [tup for tup in lints if tup[2] in levels] + for (module, name, _, _) in sorted(lints): + yield ' %s::%s,\n' % (module, name.upper()) + + +def gen_mods(lints): + """Declare modules""" + + for module in sorted(set(lint[0] for lint in lints)): + yield 'pub mod %s;\n' % module + + +def gen_deprecated(lints): + """Declare deprecated lints""" + + for lint in lints: + yield ' store.register_removed(\n' + yield ' "%s",\n' % lint[1] + yield ' "%s",\n' % lint[2] + yield ' );\n' + + +def replace_region(fn, region_start, region_end, callback, + replace_start=True, write_back=True): + """Replace a region in a file delimited by two lines matching regexes. + + A callback is called to write the new region. If `replace_start` is true, + the start delimiter line is replaced as well. The end delimiter line is + never replaced. + """ + # read current content + with open(fn) as fp: + lines = list(fp) + + # replace old region with new region + new_lines = [] + in_old_region = False + for line in lines: + if in_old_region: + if re.search(region_end, line): + in_old_region = False + new_lines.extend(callback()) + new_lines.append(line) + elif re.search(region_start, line): + if not replace_start: + new_lines.append(line) + # old region starts here + in_old_region = True + else: + new_lines.append(line) + + # write back to file + if write_back: + with open(fn, 'w') as fp: + fp.writelines(new_lines) + + # if something changed, return true + return lines != new_lines + + +def main(print_only=False, check=False): + lints = [] + deprecated_lints = [] + restriction_lints = [] + + # check directory + if not os.path.isfile('clippy_lints/src/lib.rs'): + print('Error: call this script from clippy checkout directory!') + return + + # collect all lints from source files + for fn in os.listdir('clippy_lints/src'): + if fn.endswith('.rs'): + collect(lints, deprecated_lints, restriction_lints, + os.path.join('clippy_lints', 'src', fn)) + + # determine version + with open('Cargo.toml') as fp: + for line in fp: + if line.startswith('version ='): + clippy_version = line.split()[2].strip('"') + break + else: + print('Error: version not found in Cargo.toml!') + return + + if print_only: + sys.stdout.writelines(gen_table(lints + restriction_lints)) + return + + # update the lint counter in README.md + changed = replace_region( + 'README.md', + r'^\[There are \d+ lints included in this crate\]\(https://rust-lang-nursery.github.io/rust-clippy/master/index.html\)$', "", + lambda: ['[There are %d lints included in this crate](https://rust-lang-nursery.github.io/rust-clippy/master/index.html)\n' % + (len(lints) + len(restriction_lints))], + write_back=not check) + + # update the links in the CHANGELOG + changed |= replace_region( + 'CHANGELOG.md', + "", + "", + lambda: ["[`{0}`]: {1}#{0}\n".format(l[1], docs_link) for l in + sorted(lints + restriction_lints + deprecated_lints, + key=lambda l: l[1])], + replace_start=False, write_back=not check) + + # update version of clippy_lints in Cargo.toml + changed |= replace_region( + 'Cargo.toml', r'# begin automatic update', '# end automatic update', + lambda: ['clippy_lints = { version = "%s", path = "clippy_lints" }\n' % + clippy_version], + replace_start=False, write_back=not check) + + # update version of clippy_lints in Cargo.toml + changed |= replace_region( + 'clippy_lints/Cargo.toml', r'# begin automatic update', '# end automatic update', + lambda: ['version = "%s"\n' % clippy_version], + replace_start=False, write_back=not check) + + # update the `pub mod` list + changed |= replace_region( + 'clippy_lints/src/lib.rs', r'begin lints modules', r'end lints modules', + lambda: gen_mods(lints + restriction_lints), + replace_start=False, write_back=not check) + + # same for "clippy" lint collection + changed |= replace_region( + 'clippy_lints/src/lib.rs', r'reg.register_lint_group\("clippy"', r'\]\);', + lambda: gen_group(lints, levels=('warn', 'deny')), + replace_start=False, write_back=not check) + + # same for "deprecated" lint collection + changed |= replace_region( + 'clippy_lints/src/lib.rs', r'let mut store', r'end deprecated lints', + lambda: gen_deprecated(deprecated_lints), + replace_start=False, + write_back=not check) + + # same for "clippy_pedantic" lint collection + changed |= replace_region( + 'clippy_lints/src/lib.rs', r'reg.register_lint_group\("clippy_pedantic"', r'\]\);', + lambda: gen_group(lints, levels=('allow',)), + replace_start=False, write_back=not check) + + # same for "clippy_restrictions" lint collection + changed |= replace_region( + 'clippy_lints/src/lib.rs', r'reg.register_lint_group\("clippy_restrictions"', + r'\]\);', lambda: gen_group(restriction_lints), + replace_start=False, write_back=not check) + + if check and changed: + print('Please run util/update_lints.py to regenerate lints lists.') + return 1 + + +if __name__ == '__main__': + sys.exit(main(print_only='-n' in sys.argv, check='-c' in sys.argv)) diff --git a/src/tools/compiletest/src/common.rs b/src/tools/compiletest/src/common.rs index 0d6b350a1d..cee7e52c7f 100644 --- a/src/tools/compiletest/src/common.rs +++ b/src/tools/compiletest/src/common.rs @@ -83,117 +83,117 @@ impl fmt::Display for Mode { #[derive(Clone)] pub struct Config { - // The library paths required for running the compiler + /// The library paths required for running the compiler pub compile_lib_path: PathBuf, - // The library paths required for running compiled programs + /// The library paths required for running compiled programs pub run_lib_path: PathBuf, - // The rustc executable + /// The rustc executable pub rustc_path: PathBuf, - // The rustdoc executable + /// The rustdoc executable pub rustdoc_path: Option, - // The python executable to use for LLDB + /// The python executable to use for LLDB pub lldb_python: String, - // The python executable to use for htmldocck + /// The python executable to use for htmldocck pub docck_python: String, - // The llvm FileCheck binary path + /// The llvm FileCheck binary path pub llvm_filecheck: Option, - // The valgrind path + /// The valgrind path pub valgrind_path: Option, - // Whether to fail if we can't run run-pass-valgrind tests under valgrind - // (or, alternatively, to silently run them like regular run-pass tests). + /// Whether to fail if we can't run run-pass-valgrind tests under valgrind + /// (or, alternatively, to silently run them like regular run-pass tests). pub force_valgrind: bool, - // The directory containing the tests to run + /// The directory containing the tests to run pub src_base: PathBuf, - // The directory where programs should be built + /// The directory where programs should be built pub build_base: PathBuf, - // The name of the stage being built (stage1, etc) + /// The name of the stage being built (stage1, etc) pub stage_id: String, - // The test mode, compile-fail, run-fail, run-pass + /// The test mode, compile-fail, run-fail, run-pass pub mode: Mode, - // Run ignored tests + /// Run ignored tests pub run_ignored: bool, - // Only run tests that match this filter + /// Only run tests that match this filter pub filter: Option, - // Exactly match the filter, rather than a substring + /// Exactly match the filter, rather than a substring pub filter_exact: bool, - // Write out a parseable log of tests that were run + /// Write out a parseable log of tests that were run pub logfile: Option, - // A command line to prefix program execution with, - // for running under valgrind + /// A command line to prefix program execution with, + /// for running under valgrind pub runtool: Option, - // Flags to pass to the compiler when building for the host + /// Flags to pass to the compiler when building for the host pub host_rustcflags: Option, - // Flags to pass to the compiler when building for the target + /// Flags to pass to the compiler when building for the target pub target_rustcflags: Option, - // Target system to be tested + /// Target system to be tested pub target: String, - // Host triple for the compiler being invoked + /// Host triple for the compiler being invoked pub host: String, - // Path to / name of the GDB executable + /// Path to / name of the GDB executable pub gdb: Option, - // Version of GDB, encoded as ((major * 1000) + minor) * 1000 + patch + /// Version of GDB, encoded as ((major * 1000) + minor) * 1000 + patch pub gdb_version: Option, - // Whether GDB has native rust support + /// Whether GDB has native rust support pub gdb_native_rust: bool, - // Version of LLDB + /// Version of LLDB pub lldb_version: Option, - // Version of LLVM + /// Version of LLVM pub llvm_version: Option, - // Is LLVM a system LLVM + /// Is LLVM a system LLVM pub system_llvm: bool, - // Path to the android tools + /// Path to the android tools pub android_cross_path: PathBuf, - // Extra parameter to run adb on arm-linux-androideabi + /// Extra parameter to run adb on arm-linux-androideabi pub adb_path: String, - // Extra parameter to run test suite on arm-linux-androideabi + /// Extra parameter to run test suite on arm-linux-androideabi pub adb_test_dir: String, - // status whether android device available or not + /// status whether android device available or not pub adb_device_status: bool, - // the path containing LLDB's Python module + /// the path containing LLDB's Python module pub lldb_python_dir: Option, - // Explain what's going on + /// Explain what's going on pub verbose: bool, - // Print one character per test instead of one line + /// Print one character per test instead of one line pub quiet: bool, - // Whether to use colors in test. + /// Whether to use colors in test. pub color: ColorConfig, - // where to find the remote test client process, if we're using it + /// where to find the remote test client process, if we're using it pub remote_test_client: Option, // Configuration for various run-make tests frobbing things like C compilers diff --git a/src/tools/compiletest/src/main.rs b/src/tools/compiletest/src/main.rs index 15216f52d9..26c447d01d 100644 --- a/src/tools/compiletest/src/main.rs +++ b/src/tools/compiletest/src/main.rs @@ -517,7 +517,7 @@ pub fn make_test_name(config: &Config, testpaths: &TestPaths) -> test::TestName // // run-pass/foo/bar/baz.rs let path = - PathBuf::from(config.mode.to_string()) + PathBuf::from(config.src_base.file_name().unwrap()) .join(&testpaths.relative_dir) .join(&testpaths.file.file_name().unwrap()); test::DynTestName(format!("[{}] {}", config.mode, path.display())) diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs index d2a0c776b3..10ef326d9d 100644 --- a/src/tools/compiletest/src/runtest.rs +++ b/src/tools/compiletest/src/runtest.rs @@ -2174,8 +2174,6 @@ actual:\n\ } fn run_ui_test(&self) { - println!("ui: {}", self.testpaths.file.display()); - let proc_res = self.compile_test(); let expected_stderr_path = self.expected_output_path("stderr"); diff --git a/src/tools/miri/.editorconfig b/src/tools/miri/.editorconfig new file mode 100644 index 0000000000..3c1f41bdcc --- /dev/null +++ b/src/tools/miri/.editorconfig @@ -0,0 +1,25 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# editorconfig.org + +root = true + + +[*] +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +indent_style = space +indent_size = 4 + +[*.rs] +indent_style = space +indent_size = 4 + +[*.toml] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/src/tools/miri/.travis.yml b/src/tools/miri/.travis.yml new file mode 100644 index 0000000000..86577702e9 --- /dev/null +++ b/src/tools/miri/.travis.yml @@ -0,0 +1,45 @@ +language: rust +rust: +- nightly +before_script: +- export PATH=$HOME/.local/bin:$PATH +- rustup target add i686-unknown-linux-gnu +- rustup target add i686-pc-windows-gnu +- rustup target add i686-pc-windows-msvc +- rustup component add rust-src +- cargo install --git https://github.com/japaric/xargo.git +- export RUST_SYSROOT=$HOME/rust +script: +- set -e +- | + # get ourselves a MIR-ful libstd + xargo/build.sh +- | + # Test plain miri + cargo build --locked --release --all-features && + cargo test --locked --release --all-features --all && + cargo install --locked --all-features +- | + # Test cargo miri + cd cargo-miri-test && + cargo miri && + cargo miri test && + cd .. +- | + # and run all tests with full mir + MIRI_SYSROOT=~/.xargo/HOST cargo test --locked --release +- | + # test that the rustc_tests binary compiles + cd rustc_tests && + cargo build --locked --release && + cd .. +notifications: + email: + on_success: never +branches: + only: + - master +env: + global: + - RUST_TEST_NOCAPTURE=1 + - TRAVIS_CARGO_NIGHTLY_FEATURE="" diff --git a/src/tools/miri/Cargo.lock b/src/tools/miri/Cargo.lock new file mode 100644 index 0000000000..2e0f1e3562 --- /dev/null +++ b/src/tools/miri/Cargo.lock @@ -0,0 +1,388 @@ +[root] +name = "rustc_miri" +version = "0.1.0" +dependencies = [ + "backtrace 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "aho-corasick" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "backtrace" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "backtrace-sys" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "gcc 0.3.53 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "bitflags" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "byteorder" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cargo_metadata" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cfg-if" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "compiletest_rs" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "conv" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "custom_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "dbghelp-sys" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dtoa" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "env_logger" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gcc" +version = "0.3.53" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "itoa" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lazy_static" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.30" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "log" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "log_settings" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "magenta" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "magenta-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "magenta-sys" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memchr" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "miri" +version = "0.1.0" +dependencies = [ + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo_metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "compiletest_rs 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_miri 0.1.0", +] + +[[package]] +name = "num-traits" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "quote" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rand" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)", + "magenta 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-demangle" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-serialize" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde_derive" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive_internals 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive_internals" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", + "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_json" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "0.11.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "synom" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tempdir" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unreachable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "utf8-ranges" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699" +"checksum backtrace 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "99f2ce94e22b8e664d95c57fff45b98a966c2252b60691d0b7aeeccd88d70983" +"checksum backtrace-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "afccc5772ba333abccdf60d55200fa3406f8c59dcf54d5f7998c9107d3799c7c" +"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d" +"checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d" +"checksum cargo_metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "be1057b8462184f634c3a208ee35b0f935cfd94b694b26deadccd98732088d7b" +"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" +"checksum compiletest_rs 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "2741d378feb7a434dba54228c89a70b4e427fee521de67cdda3750b8a0265f5a" +"checksum conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299" +"checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9" +"checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850" +"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab" +"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" +"checksum gcc 0.3.53 (registry+https://github.com/rust-lang/crates.io-index)" = "e8310f7e9c890398b0e80e301c4f474e9918d2b27fca8f48486ca775fa9ffc5a" +"checksum itoa 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f74cf6ca1bdbc28496a2b9798ab7fccc2ca5a42cace95bb2b219577216a5fb90" +"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +"checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf" +"checksum libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)" = "2370ca07ec338939e356443dac2296f581453c35fe1e3a3ed06023c49435f915" +"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" +"checksum log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3d382732ea0fbc09790c4899db3255bdea0fc78b54bf234bd18a63bb603915b6" +"checksum magenta 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf0336886480e671965f794bc9b6fce88503563013d1bfb7a502c81fe3ac527" +"checksum magenta-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40d014c7011ac470ae28e2f76a02bfea4a8480f73e701353b49ad7a8d75f4699" +"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4" +"checksum num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "99843c856d68d8b4313b03a17e33c4bb42ae8f6610ea81b28abe076ac721b9b0" +"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" +"checksum rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "eb250fd207a4729c976794d03db689c9be1d634ab5a1c9da9492a13d8fecbcdf" +"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" +"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" +"checksum rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "aee45432acc62f7b9a108cc054142dac51f979e69e71ddce7d6fc7adf29e817e" +"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" +"checksum serde 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)" = "f7726f29ddf9731b17ff113c461e362c381d9d69433f79de4f3dd572488823e9" +"checksum serde_derive 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)" = "cf823e706be268e73e7747b147aa31c8f633ab4ba31f115efb57e5047c3a76dd" +"checksum serde_derive_internals 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "37aee4e0da52d801acfbc0cc219eb1eda7142112339726e427926a6f6ee65d3a" +"checksum serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "48b04779552e92037212c3615370f6bd57a40ebba7f20e554ff9f55e41a69a7b" +"checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" +"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" +"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6" +"checksum thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1697c4b57aeeb7a536b647165a2825faddffb1d3bad386d507709bd51a90bb14" +"checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" +"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" +"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122" +"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" +"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" diff --git a/src/tools/miri/Cargo.toml b/src/tools/miri/Cargo.toml new file mode 100644 index 0000000000..5dbf4521c9 --- /dev/null +++ b/src/tools/miri/Cargo.toml @@ -0,0 +1,39 @@ +[package] +authors = ["Scott Olson "] +description = "An experimental interpreter for Rust MIR." +license = "MIT/Apache-2.0" +name = "miri" +repository = "https://github.com/solson/miri" +version = "0.1.0" +build = "build.rs" + +[[bin]] +doc = false +name = "miri" +path = "miri/bin/miri.rs" + +[[bin]] +doc = false +name = "cargo-miri" +path = "miri/bin/cargo-miri.rs" +required-features = ["cargo_miri"] + +[lib] +path = "miri/lib.rs" + +[dependencies] +byteorder = { version = "1.1", features = ["i128"]} +env_logger = "0.4.3" +log = "0.3.6" +log_settings = "0.1.1" +cargo_metadata = { version = "0.2", optional = true } +rustc_miri = { path = "src/librustc_mir" } + +[features] +cargo_miri = ["cargo_metadata"] + +[dev-dependencies] +compiletest_rs = { version = "0.2.10", features = ["tmp"] } + +[workspace] +exclude = ["xargo", "cargo-miri-test", "rustc_tests"] diff --git a/src/tools/miri/LICENSE-APACHE b/src/tools/miri/LICENSE-APACHE new file mode 100644 index 0000000000..a32595fa70 --- /dev/null +++ b/src/tools/miri/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2016 The Miri Developers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/vendor/unicode-segmentation/LICENSE-MIT b/src/tools/miri/LICENSE-MIT similarity index 95% rename from src/vendor/unicode-segmentation/LICENSE-MIT rename to src/tools/miri/LICENSE-MIT index e69282e381..1f9d89a586 100644 --- a/src/vendor/unicode-segmentation/LICENSE-MIT +++ b/src/tools/miri/LICENSE-MIT @@ -1,4 +1,4 @@ -Copyright (c) 2015 The Rust Project Developers +Copyright (c) 2016 The Miri Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/src/tools/miri/README.md b/src/tools/miri/README.md new file mode 100644 index 0000000000..8edaba77fb --- /dev/null +++ b/src/tools/miri/README.md @@ -0,0 +1,103 @@ +# Miri [[slides](https://solson.me/miri-slides.pdf)] [[report](https://solson.me/miri-report.pdf)] [![Build Status](https://travis-ci.org/solson/miri.svg?branch=master)](https://travis-ci.org/solson/miri) + + +An experimental interpreter for [Rust][rust]'s [mid-level intermediate +representation][mir] (MIR). This project began as part of my work for the +undergraduate research course at the [University of Saskatchewan][usask]. + +## Installing Rust + +I recommend that you install [rustup][rustup] and then use it to install the +current Rust nightly version: + +```sh +rustup update nightly +``` + +You should also make `nightly` the default version for your Miri directory by +running the following command while you're in it. If you don't do this, you can +run the later `cargo` commands by using `cargo +nightly` instead. + +```sh +rustup override add nightly +``` + +## Building Miri + +```sh +cargo build +``` + +If Miri fails to build, it's likely because a change in the latest nightly +compiler broke it. You could try an older nightly with `rustup update +nightly-` where `` is a few days or weeks ago, e.g. `2016-05-20` for +May 20th. Otherwise, you could notify me in an issue or on IRC. Or, if you know +how to fix it, you could send a PR. :smile: + +## Running tests + +```sh +cargo run --bin miri tests/run-pass/vecs.rs # Or whatever test you like. +``` + +## Debugging + +You can get detailed, statement-by-statement traces by setting the `MIRI_LOG` +environment variable to `trace`. These traces are indented based on call stack +depth. You can get a much less verbose set of information with other logging +levels such as `warn`. + +## Running miri on your own project('s test suite) + +Install miri as a cargo subcommand with `cargo install --debug`. +Then, inside your own project, use `cargo +nightly miri` to run your project, if it is +a bin project, or run `cargo +nightly miri test` to run all tests in your project +through miri. + +## Running miri with full libstd + +Per default libstd does not contain the MIR of non-polymorphic functions. When +miri hits a call to such a function, execution terminates. To fix this, it is +possible to compile libstd with full MIR: + +```sh +rustup component add rust-src +cargo install xargo +cd xargo/ +RUSTFLAGS='-Zalways-encode-mir' xargo build +``` + +Now you can run miri against the libstd compiled by xargo: + +```sh +MIRI_SYSROOT=~/.xargo/HOST cargo run --bin miri tests/run-pass-fullmir/vecs.rs +``` + +Notice that you will have to re-run the last step of the preparations above when +your toolchain changes (e.g., when you update the nightly). + +## Contributing and getting help + +Check out the issues on this GitHub repository for some ideas. There's lots that +needs to be done that I haven't documented in the issues yet, however. For more +ideas or help with running or hacking on Miri, you can contact me (`scott`) on +Mozilla IRC in any of the Rust IRC channels (`#rust`, `#rust-offtopic`, etc). + +## License + +Licensed under either of + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you shall be dual licensed as above, without any +additional terms or conditions. + +[rust]: https://www.rust-lang.org/ +[mir]: https://github.com/rust-lang/rfcs/blob/master/text/1211-mir.md +[usask]: https://www.usask.ca/ +[rustup]: https://www.rustup.rs diff --git a/src/tools/miri/appveyor.yml b/src/tools/miri/appveyor.yml new file mode 100644 index 0000000000..2fa7a74c7c --- /dev/null +++ b/src/tools/miri/appveyor.yml @@ -0,0 +1,35 @@ +environment: + global: + PROJECT_NAME: miri + matrix: + - TARGET: i686-pc-windows-msvc + MSYS2_BITS: 32 + - TARGET: x86_64-pc-windows-msvc + MSYS2_BITS: 64 + +install: + - set PATH=C:\Program Files\Git\mingw64\bin;%PATH% + - curl -sSf -o rustup-init.exe https://win.rustup.rs/ + - rustup-init.exe -y --default-host %TARGET% --default-toolchain nightly + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin;C:\Users\appveyor\.rustup\toolchains\nightly-%TARGET%\bin + - if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin + - rustc -V + - cargo -V + - rustup component add rust-src + - cargo install --git https://github.com/japaric/xargo.git + - cd xargo + - set RUSTFLAGS=-Zalways-encode-mir -Zmir-emit-validate=1 + - xargo build + - set RUSTFLAGS= + - cd .. + +build: false + +test_script: + - set RUST_BACKTRACE=1 + - cargo build --locked --release + - cargo test --locked --release + +notifications: + - provider: Email + on_build_success: false diff --git a/src/tools/miri/benches/fibonacci.rs b/src/tools/miri/benches/fibonacci.rs new file mode 100644 index 0000000000..90b231a32b --- /dev/null +++ b/src/tools/miri/benches/fibonacci.rs @@ -0,0 +1,26 @@ +#![feature(test, rustc_private)] + +extern crate test; +use test::Bencher; +mod helpers; +use helpers::*; + +#[bench] +fn fib(bencher: &mut Bencher) { + bencher.iter(|| { fibonacci_helper::main(); }) +} + +#[bench] +fn fib_miri(bencher: &mut Bencher) { + miri_helper::run("fibonacci_helper", bencher); +} + +#[bench] +fn fib_iter(bencher: &mut Bencher) { + bencher.iter(|| { fibonacci_helper_iterative::main(); }) +} + +#[bench] +fn fib_iter_miri(bencher: &mut Bencher) { + miri_helper::run("fibonacci_helper_iterative", bencher); +} diff --git a/src/tools/miri/benches/helpers/fibonacci_helper.rs b/src/tools/miri/benches/helpers/fibonacci_helper.rs new file mode 100644 index 0000000000..586f1ce7da --- /dev/null +++ b/src/tools/miri/benches/helpers/fibonacci_helper.rs @@ -0,0 +1,8 @@ +#[inline(never)] +pub fn main() { + assert_eq!(fib(10), 55); +} + +fn fib(n: usize) -> usize { + if n <= 2 { 1 } else { fib(n - 1) + fib(n - 2) } +} diff --git a/src/tools/miri/benches/helpers/fibonacci_helper_iterative.rs b/src/tools/miri/benches/helpers/fibonacci_helper_iterative.rs new file mode 100644 index 0000000000..59283be482 --- /dev/null +++ b/src/tools/miri/benches/helpers/fibonacci_helper_iterative.rs @@ -0,0 +1,15 @@ +#[inline(never)] +pub fn main() { + assert_eq!(fib(10), 55); +} + +fn fib(n: usize) -> usize { + let mut a = 0; + let mut b = 1; + for _ in 0..n { + let c = a; + a = b; + b = c + b; + } + a +} diff --git a/src/tools/miri/benches/helpers/miri_helper.rs b/src/tools/miri/benches/helpers/miri_helper.rs new file mode 100644 index 0000000000..6657ba1199 --- /dev/null +++ b/src/tools/miri/benches/helpers/miri_helper.rs @@ -0,0 +1,75 @@ +extern crate getopts; +extern crate miri; +extern crate rustc; +extern crate rustc_driver; +extern crate test; + +use self::miri::eval_main; +use self::rustc::session::Session; +use self::rustc_driver::{driver, CompilerCalls, Compilation}; +use std::cell::RefCell; +use std::rc::Rc; +use test::Bencher; + +pub struct MiriCompilerCalls<'a>(Rc>); + +fn find_sysroot() -> String { + // Taken from https://github.com/Manishearth/rust-clippy/pull/911. + let home = option_env!("RUSTUP_HOME").or(option_env!("MULTIRUST_HOME")); + let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN")); + match (home, toolchain) { + (Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain), + _ => { + option_env!("RUST_SYSROOT") + .expect( + "need to specify RUST_SYSROOT env var or use rustup or multirust", + ) + .to_owned() + } + } +} + +pub fn run(filename: &str, bencher: &mut Bencher) { + let args = &[ + "miri".to_string(), + format!("benches/helpers/{}.rs", filename), + "--sysroot".to_string(), + find_sysroot(), + ]; + let compiler_calls = &mut MiriCompilerCalls(Rc::new(RefCell::new(bencher))); + rustc_driver::run_compiler(args, compiler_calls, None, None); +} + +impl<'a> CompilerCalls<'a> for MiriCompilerCalls<'a> { + fn build_controller( + &mut self, + _: &Session, + _: &getopts::Matches, + ) -> driver::CompileController<'a> { + let mut control: driver::CompileController<'a> = driver::CompileController::basic(); + + let bencher = self.0.clone(); + + control.after_analysis.stop = Compilation::Stop; + control.after_analysis.callback = Box::new(move |state| { + state.session.abort_if_errors(); + + let tcx = state.tcx.unwrap(); + let (entry_node_id, _) = state.session.entry_fn.borrow().expect( + "no main or start function found", + ); + let entry_def_id = tcx.map.local_def_id(entry_node_id); + + let memory_size = 100 * 1024 * 1024; // 100MB + let step_limit = 1000_000; + let stack_limit = 100; + bencher.borrow_mut().iter(|| { + eval_main(tcx, entry_def_id, memory_size, step_limit, stack_limit); + }); + + state.session.abort_if_errors(); + }); + + control + } +} diff --git a/src/tools/miri/benches/helpers/mod.rs b/src/tools/miri/benches/helpers/mod.rs new file mode 100644 index 0000000000..27504a2cc0 --- /dev/null +++ b/src/tools/miri/benches/helpers/mod.rs @@ -0,0 +1,7 @@ +// This module gets included in multiple crates, and they each only use part of it. +#![allow(dead_code)] + +pub mod fibonacci_helper; +pub mod fibonacci_helper_iterative; +pub mod miri_helper; +pub mod smoke_helper; diff --git a/src/tools/miri/benches/helpers/repeat.rs b/src/tools/miri/benches/helpers/repeat.rs new file mode 100644 index 0000000000..0e8c5980b8 --- /dev/null +++ b/src/tools/miri/benches/helpers/repeat.rs @@ -0,0 +1,4 @@ +fn main() { + let data: [u8; 1024] = [42; 1024]; + assert_eq!(data.len(), 1024); +} diff --git a/src/tools/miri/benches/helpers/repeat_manual.rs b/src/tools/miri/benches/helpers/repeat_manual.rs new file mode 100644 index 0000000000..6ef6f724ef --- /dev/null +++ b/src/tools/miri/benches/helpers/repeat_manual.rs @@ -0,0 +1,7 @@ +fn main() { + let mut data: [u8; 1024] = unsafe { std::mem::uninitialized() }; + for i in 0..data.len() { + unsafe { std::ptr::write(&mut data[i], 0); } + } + assert_eq!(data.len(), 1024); +} diff --git a/src/tools/miri/benches/helpers/smoke_helper.rs b/src/tools/miri/benches/helpers/smoke_helper.rs new file mode 100644 index 0000000000..e81db817ae --- /dev/null +++ b/src/tools/miri/benches/helpers/smoke_helper.rs @@ -0,0 +1,2 @@ +#[inline(never)] +pub fn main() {} diff --git a/src/tools/miri/benches/repeat.rs b/src/tools/miri/benches/repeat.rs new file mode 100644 index 0000000000..f5920e83d9 --- /dev/null +++ b/src/tools/miri/benches/repeat.rs @@ -0,0 +1,16 @@ +#![feature(test, rustc_private)] + +extern crate test; +use test::Bencher; +mod helpers; +use helpers::*; + +#[bench] +fn repeat(bencher: &mut Bencher) { + miri_helper::run("repeat", bencher); +} + +#[bench] +fn repeat_manual(bencher: &mut Bencher) { + miri_helper::run("repeat_manual", bencher); +} diff --git a/src/tools/miri/benches/smoke.rs b/src/tools/miri/benches/smoke.rs new file mode 100644 index 0000000000..1dbc4fed82 --- /dev/null +++ b/src/tools/miri/benches/smoke.rs @@ -0,0 +1,35 @@ +#![feature(test, rustc_private)] + +extern crate test; +use test::Bencher; +mod helpers; +use helpers::*; + +#[bench] +fn noop(bencher: &mut Bencher) { + bencher.iter(|| { smoke_helper::main(); }) +} + +/* +// really slow +#[bench] +fn noop_miri_full(bencher: &mut Bencher) { + let path = std::env::var("RUST_SYSROOT").expect("env variable `RUST_SYSROOT` not set"); + bencher.iter(|| { + let mut process = std::process::Command::new("target/release/miri"); + process.arg("benches/smoke_helper.rs") + .arg("--sysroot").arg(&path); + let output = process.output().unwrap(); + if !output.status.success() { + println!("{}", String::from_utf8(output.stdout).unwrap()); + println!("{}", String::from_utf8(output.stderr).unwrap()); + panic!("failed to run miri"); + } + }) +} +*/ + +#[bench] +fn noop_miri_interpreter(bencher: &mut Bencher) { + miri_helper::run("smoke_helper", bencher); +} diff --git a/src/tools/miri/build.rs b/src/tools/miri/build.rs new file mode 100644 index 0000000000..2f74f7f4f6 --- /dev/null +++ b/src/tools/miri/build.rs @@ -0,0 +1,8 @@ +use std::env; + +fn main() { + // Forward the profile to the main compilation + println!("cargo:rustc-env=PROFILE={}", env::var("PROFILE").unwrap()); + // Don't rebuild miri even if nothing changed + println!("cargo:rerun-if-changed=build.rs"); +} diff --git a/src/tools/miri/cargo-miri-test/Cargo.lock b/src/tools/miri/cargo-miri-test/Cargo.lock new file mode 100644 index 0000000000..8b2387fa64 --- /dev/null +++ b/src/tools/miri/cargo-miri-test/Cargo.lock @@ -0,0 +1,14 @@ +[root] +name = "cargo-miri-test" +version = "0.1.0" +dependencies = [ + "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "byteorder" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c40977b0ee6b9885c9013cd41d9feffdd22deb3bb4dc3a71d901cc7a77de18c8" diff --git a/src/tools/miri/cargo-miri-test/Cargo.toml b/src/tools/miri/cargo-miri-test/Cargo.toml new file mode 100644 index 0000000000..5fbe923f23 --- /dev/null +++ b/src/tools/miri/cargo-miri-test/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "cargo-miri-test" +version = "0.1.0" +authors = ["Oliver Schneider "] + +[dependencies] +byteorder = "1.0" \ No newline at end of file diff --git a/src/tools/miri/cargo-miri-test/src/main.rs b/src/tools/miri/cargo-miri-test/src/main.rs new file mode 100644 index 0000000000..07b0e4cee4 --- /dev/null +++ b/src/tools/miri/cargo-miri-test/src/main.rs @@ -0,0 +1,9 @@ +extern crate byteorder; + +use byteorder::{BigEndian, ByteOrder}; + +fn main() { + let buf = &[1,2,3,4]; + let n = ::read_u32(buf); + assert_eq!(n, 0x01020304); +} diff --git a/src/tools/miri/cargo-miri-test/tests/foo.rs b/src/tools/miri/cargo-miri-test/tests/foo.rs new file mode 100644 index 0000000000..fb7fad21c9 --- /dev/null +++ b/src/tools/miri/cargo-miri-test/tests/foo.rs @@ -0,0 +1,4 @@ +#[test] +fn bar() { + assert_eq!(4, 4); +} diff --git a/src/tools/miri/miri/bin/cargo-miri.rs b/src/tools/miri/miri/bin/cargo-miri.rs new file mode 100644 index 0000000000..06d5b3e997 --- /dev/null +++ b/src/tools/miri/miri/bin/cargo-miri.rs @@ -0,0 +1,212 @@ +extern crate cargo_metadata; + +use std::path::{PathBuf, Path}; +use std::io::Write; +use std::process::Command; + + +const CARGO_MIRI_HELP: &str = r#"Interprets bin crates + +Usage: + cargo miri [options] [--] [...] + +Common options: + -h, --help Print this message + --features Features to compile for the package + -V, --version Print version info and exit + +Other options are the same as `cargo rustc`. + +The feature `cargo-miri` is automatically defined for convenience. You can use +it to configure the resource limits + + #![cfg_attr(feature = "cargo-miri", memory_size = 42)] + +available resource limits are `memory_size`, `step_limit`, `stack_limit` +"#; + +fn show_help() { + println!("{}", CARGO_MIRI_HELP); +} + +fn show_version() { + println!("{}", env!("CARGO_PKG_VERSION")); +} + +fn main() { + // Check for version and help flags even when invoked as 'cargo-miri' + if std::env::args().any(|a| a == "--help" || a == "-h") { + show_help(); + return; + } + if std::env::args().any(|a| a == "--version" || a == "-V") { + show_version(); + return; + } + + if let Some("miri") = std::env::args().nth(1).as_ref().map(AsRef::as_ref) { + // this arm is when `cargo miri` is called + + let test = std::env::args().nth(2).map_or(false, |text| text == "test"); + let skip = if test { 3 } else { 2 }; + + let manifest_path_arg = std::env::args().skip(skip).find(|val| { + val.starts_with("--manifest-path=") + }); + + let mut metadata = if let Ok(metadata) = cargo_metadata::metadata( + manifest_path_arg.as_ref().map(AsRef::as_ref), + ) + { + metadata + } else { + let _ = std::io::stderr().write_fmt(format_args!( + "error: Could not obtain cargo metadata." + )); + std::process::exit(101); + }; + + let manifest_path = manifest_path_arg.map(|arg| { + PathBuf::from(Path::new(&arg["--manifest-path=".len()..])) + }); + + let current_dir = std::env::current_dir(); + + let package_index = metadata + .packages + .iter() + .position(|package| { + let package_manifest_path = Path::new(&package.manifest_path); + if let Some(ref manifest_path) = manifest_path { + package_manifest_path == manifest_path + } else { + let current_dir = current_dir.as_ref().expect( + "could not read current directory", + ); + let package_manifest_directory = package_manifest_path.parent().expect( + "could not find parent directory of package manifest", + ); + package_manifest_directory == current_dir + } + }) + .expect("could not find matching package"); + let package = metadata.packages.remove(package_index); + for target in package.targets { + let args = std::env::args().skip(skip); + let kind = target.kind.get(0).expect( + "badly formatted cargo metadata: target::kind is an empty array", + ); + if test && kind == "test" { + if let Err(code) = process( + vec!["--test".to_string(), target.name].into_iter().chain( + args, + ), + ) + { + std::process::exit(code); + } + } else if !test && kind == "bin" { + if let Err(code) = process( + vec!["--bin".to_string(), target.name].into_iter().chain( + args, + ), + ) + { + std::process::exit(code); + } + } + } + } else { + // this arm is executed when cargo-miri runs `cargo rustc` with the `RUSTC` env var set to itself + + let home = option_env!("RUSTUP_HOME").or(option_env!("MULTIRUST_HOME")); + let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN")); + let sys_root = if let (Some(home), Some(toolchain)) = (home, toolchain) { + format!("{}/toolchains/{}", home, toolchain) + } else { + option_env!("RUST_SYSROOT") + .map(|s| s.to_owned()) + .or_else(|| { + Command::new("rustc") + .arg("--print") + .arg("sysroot") + .output() + .ok() + .and_then(|out| String::from_utf8(out.stdout).ok()) + .map(|s| s.trim().to_owned()) + }) + .expect("need to specify RUST_SYSROOT env var during miri compilation, or use rustup or multirust") + }; + + // this conditional check for the --sysroot flag is there so users can call `cargo-miri` directly + // without having to pass --sysroot or anything + let mut args: Vec = if std::env::args().any(|s| s == "--sysroot") { + std::env::args().skip(1).collect() + } else { + std::env::args() + .skip(1) + .chain(Some("--sysroot".to_owned())) + .chain(Some(sys_root)) + .collect() + }; + + // this check ensures that dependencies are built but not interpreted and the final crate is + // interpreted but not built + let miri_enabled = std::env::args().any(|s| s == "-Zno-trans"); + + let mut command = if miri_enabled { + let mut path = std::env::current_exe().expect("current executable path invalid"); + path.set_file_name("miri"); + Command::new(path) + } else { + Command::new("rustc") + }; + + args.extend_from_slice(&["-Z".to_owned(), "always-encode-mir".to_owned()]); + args.extend_from_slice(&["--cfg".to_owned(), r#"feature="cargo-miri""#.to_owned()]); + + match command.args(&args).status() { + Ok(exit) => { + if !exit.success() { + std::process::exit(exit.code().unwrap_or(42)); + } + } + Err(ref e) if miri_enabled => panic!("error during miri run: {:?}", e), + Err(ref e) => panic!("error during rustc call: {:?}", e), + } + } +} + +fn process(old_args: I) -> Result<(), i32> +where + I: Iterator, +{ + let mut args = vec!["rustc".to_owned()]; + + let mut found_dashes = false; + for arg in old_args { + found_dashes |= arg == "--"; + args.push(arg); + } + if !found_dashes { + args.push("--".to_owned()); + } + args.push("-Zno-trans".to_owned()); + args.push("--cfg".to_owned()); + args.push(r#"feature="cargo-miri""#.to_owned()); + + let path = std::env::current_exe().expect("current executable path invalid"); + let exit_status = std::process::Command::new("cargo") + .args(&args) + .env("RUSTC", path) + .spawn() + .expect("could not run cargo") + .wait() + .expect("failed to wait for cargo?"); + + if exit_status.success() { + Ok(()) + } else { + Err(exit_status.code().unwrap_or(-1)) + } +} diff --git a/src/tools/miri/miri/bin/miri.rs b/src/tools/miri/miri/bin/miri.rs new file mode 100644 index 0000000000..d38f63610a --- /dev/null +++ b/src/tools/miri/miri/bin/miri.rs @@ -0,0 +1,265 @@ +#![feature(rustc_private, i128_type)] + +extern crate getopts; +extern crate miri; +extern crate rustc; +extern crate rustc_driver; +extern crate rustc_errors; +extern crate env_logger; +extern crate log_settings; +extern crate syntax; +extern crate log; + +use rustc::session::Session; +use rustc::middle::cstore::CrateStore; +use rustc_driver::{Compilation, CompilerCalls, RustcDefaultCalls}; +use rustc_driver::driver::{CompileState, CompileController}; +use rustc::session::config::{self, Input, ErrorOutputType}; +use rustc::hir::{self, itemlikevisit}; +use rustc::ty::TyCtxt; +use syntax::ast::{self, MetaItemKind, NestedMetaItemKind}; +use std::path::PathBuf; + +struct MiriCompilerCalls { + default: RustcDefaultCalls, +} + +impl<'a> CompilerCalls<'a> for MiriCompilerCalls { + fn early_callback( + &mut self, + matches: &getopts::Matches, + sopts: &config::Options, + cfg: &ast::CrateConfig, + descriptions: &rustc_errors::registry::Registry, + output: ErrorOutputType, + ) -> Compilation { + self.default.early_callback( + matches, + sopts, + cfg, + descriptions, + output, + ) + } + fn no_input( + &mut self, + matches: &getopts::Matches, + sopts: &config::Options, + cfg: &ast::CrateConfig, + odir: &Option, + ofile: &Option, + descriptions: &rustc_errors::registry::Registry, + ) -> Option<(Input, Option)> { + self.default.no_input( + matches, + sopts, + cfg, + odir, + ofile, + descriptions, + ) + } + fn late_callback( + &mut self, + matches: &getopts::Matches, + sess: &Session, + cstore: &CrateStore, + input: &Input, + odir: &Option, + ofile: &Option, + ) -> Compilation { + self.default.late_callback(matches, sess, cstore, input, odir, ofile) + } + fn build_controller( + &mut self, + sess: &Session, + matches: &getopts::Matches, + ) -> CompileController<'a> { + let mut control = self.default.build_controller(sess, matches); + control.after_hir_lowering.callback = Box::new(after_hir_lowering); + control.after_analysis.callback = Box::new(after_analysis); + if sess.target.target != sess.host { + // only fully compile targets on the host. linking will fail for cross-compilation. + control.after_analysis.stop = Compilation::Stop; + } + control + } +} + +fn after_hir_lowering(state: &mut CompileState) { + let attr = ( + String::from("miri"), + syntax::feature_gate::AttributeType::Whitelisted, + ); + state.session.plugin_attributes.borrow_mut().push(attr); +} + +fn after_analysis<'a, 'tcx>(state: &mut CompileState<'a, 'tcx>) { + state.session.abort_if_errors(); + + let tcx = state.tcx.unwrap(); + let limits = resource_limits_from_attributes(state); + + if std::env::args().any(|arg| arg == "--test") { + struct Visitor<'a, 'tcx: 'a>( + miri::ResourceLimits, + TyCtxt<'a, 'tcx, 'tcx>, + &'a CompileState<'a, 'tcx> + ); + impl<'a, 'tcx: 'a, 'hir> itemlikevisit::ItemLikeVisitor<'hir> for Visitor<'a, 'tcx> { + fn visit_item(&mut self, i: &'hir hir::Item) { + if let hir::Item_::ItemFn(_, _, _, _, _, body_id) = i.node { + if i.attrs.iter().any(|attr| { + attr.name().map_or(false, |n| n == "test") + }) + { + let did = self.1.hir.body_owner_def_id(body_id); + println!( + "running test: {}", + self.1.hir.def_path(did).to_string(self.1) + ); + miri::eval_main(self.1, did, None, self.0); + self.2.session.abort_if_errors(); + } + } + } + fn visit_trait_item(&mut self, _trait_item: &'hir hir::TraitItem) {} + fn visit_impl_item(&mut self, _impl_item: &'hir hir::ImplItem) {} + } + state.hir_crate.unwrap().visit_all_item_likes( + &mut Visitor(limits, tcx, state), + ); + } else if let Some((entry_node_id, _)) = *state.session.entry_fn.borrow() { + let entry_def_id = tcx.hir.local_def_id(entry_node_id); + let start_wrapper = tcx.lang_items().start_fn().and_then(|start_fn| { + if tcx.is_mir_available(start_fn) { + Some(start_fn) + } else { + None + } + }); + miri::eval_main(tcx, entry_def_id, start_wrapper, limits); + + state.session.abort_if_errors(); + } else { + println!("no main function found, assuming auxiliary build"); + } +} + +fn resource_limits_from_attributes(state: &CompileState) -> miri::ResourceLimits { + let mut limits = miri::ResourceLimits::default(); + let krate = state.hir_crate.as_ref().unwrap(); + let err_msg = "miri attributes need to be in the form `miri(key = value)`"; + let extract_int = |lit: &syntax::ast::Lit| -> u128 { + match lit.node { + syntax::ast::LitKind::Int(i, _) => i, + _ => { + state.session.span_fatal( + lit.span, + "expected an integer literal", + ) + } + } + }; + + for attr in krate.attrs.iter().filter(|a| { + a.name().map_or(false, |n| n == "miri") + }) + { + if let Some(items) = attr.meta_item_list() { + for item in items { + if let NestedMetaItemKind::MetaItem(ref inner) = item.node { + if let MetaItemKind::NameValue(ref value) = inner.node { + match &inner.name().as_str()[..] { + "memory_size" => limits.memory_size = extract_int(value) as u64, + "step_limit" => limits.step_limit = extract_int(value) as u64, + "stack_limit" => limits.stack_limit = extract_int(value) as usize, + _ => state.session.span_err(item.span, "unknown miri attribute"), + } + } else { + state.session.span_err(inner.span, err_msg); + } + } else { + state.session.span_err(item.span, err_msg); + } + } + } else { + state.session.span_err(attr.span, err_msg); + } + } + limits +} + +fn init_logger() { + let format = |record: &log::LogRecord| { + if record.level() == log::LogLevel::Trace { + // prepend spaces to indent the final string + let indentation = log_settings::settings().indentation; + format!( + "{lvl}:{module}:{indent: String { + if let Ok(sysroot) = std::env::var("MIRI_SYSROOT") { + return sysroot; + } + + // Taken from https://github.com/Manishearth/rust-clippy/pull/911. + let home = option_env!("RUSTUP_HOME").or(option_env!("MULTIRUST_HOME")); + let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN")); + match (home, toolchain) { + (Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain), + _ => { + option_env!("RUST_SYSROOT") + .expect( + "need to specify RUST_SYSROOT env var or use rustup or multirust", + ) + .to_owned() + } + } +} + +fn main() { + init_logger(); + let mut args: Vec = std::env::args().collect(); + + let sysroot_flag = String::from("--sysroot"); + if !args.contains(&sysroot_flag) { + args.push(sysroot_flag); + args.push(find_sysroot()); + } + + // Make sure we always have all the MIR (e.g. for auxilary builds in unit tests). + args.push("-Zalways-encode-mir".to_owned()); + + rustc_driver::run_compiler(&args, &mut MiriCompilerCalls { + default: RustcDefaultCalls, + }, None, None); +} diff --git a/src/tools/miri/miri/fn_call.rs b/src/tools/miri/miri/fn_call.rs new file mode 100644 index 0000000000..36d9c0b481 --- /dev/null +++ b/src/tools/miri/miri/fn_call.rs @@ -0,0 +1,653 @@ +use rustc::ty::{self, Ty}; +use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX}; +use rustc::mir; +use syntax::attr; +use syntax::abi::Abi; +use syntax::codemap::Span; + +use std::mem; + +use rustc_miri::interpret::*; + +use super::{TlsKey, EvalContext}; + +use tls::MemoryExt; + +use super::memory::MemoryKind; + +pub trait EvalContextExt<'tcx> { + fn call_c_abi( + &mut self, + def_id: DefId, + args: &[ValTy<'tcx>], + dest: Lvalue, + dest_ty: Ty<'tcx>, + dest_block: mir::BasicBlock, + ) -> EvalResult<'tcx>; + + fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>>; + + fn call_missing_fn( + &mut self, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue, mir::BasicBlock)>, + args: &[ValTy<'tcx>], + sig: ty::FnSig<'tcx>, + path: String, + ) -> EvalResult<'tcx>; + + fn eval_fn_call( + &mut self, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue, mir::BasicBlock)>, + args: &[ValTy<'tcx>], + span: Span, + sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool>; +} + +impl<'a, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'tcx, super::Evaluator> { + fn eval_fn_call( + &mut self, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue, mir::BasicBlock)>, + args: &[ValTy<'tcx>], + span: Span, + sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool> { + trace!("eval_fn_call: {:#?}, {:#?}", instance, destination); + + let mir = match self.load_mir(instance.def) { + Ok(mir) => mir, + Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => { + self.call_missing_fn( + instance, + destination, + args, + sig, + path, + )?; + return Ok(true); + } + Err(other) => return Err(other), + }; + + let (return_lvalue, return_to_block) = match destination { + Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)), + None => (Lvalue::undef(), StackPopCleanup::None), + }; + + self.push_stack_frame( + instance, + span, + mir, + return_lvalue, + return_to_block, + )?; + + Ok(false) + } + + fn call_c_abi( + &mut self, + def_id: DefId, + args: &[ValTy<'tcx>], + dest: Lvalue, + dest_ty: Ty<'tcx>, + dest_block: mir::BasicBlock, + ) -> EvalResult<'tcx> { + let attrs = self.tcx.get_attrs(def_id); + let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") { + Some(name) => name.as_str(), + None => self.tcx.item_name(def_id), + }; + + match &link_name[..] { + "malloc" => { + let size = self.value_to_primval(args[0])?.to_u64()?; + if size == 0 { + self.write_null(dest, dest_ty)?; + } else { + let align = self.memory.pointer_size(); + let ptr = self.memory.allocate(size, align, MemoryKind::C.into())?; + self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?; + } + } + + "free" => { + let ptr = args[0].into_ptr(&mut self.memory)?; + if !ptr.is_null()? { + self.memory.deallocate( + ptr.to_ptr()?, + None, + MemoryKind::C.into(), + )?; + } + } + + "syscall" => { + // TODO: read `syscall` ids like `sysconf` ids and + // figure out some way to actually process some of them + // + // libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK) + // is called if a `HashMap` is created the regular way. + match self.value_to_primval(args[0])?.to_u64()? { + 318 | 511 => { + return err!(Unimplemented( + "miri does not support random number generators".to_owned(), + )) + } + id => { + return err!(Unimplemented( + format!("miri does not support syscall id {}", id), + )) + } + } + } + + "dlsym" => { + let _handle = args[0].into_ptr(&mut self.memory)?; + let symbol = args[1].into_ptr(&mut self.memory)?.to_ptr()?; + let symbol_name = self.memory.read_c_str(symbol)?; + let err = format!("bad c unicode symbol: {:?}", symbol_name); + let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err); + return err!(Unimplemented(format!( + "miri does not support dynamically loading libraries (requested symbol: {})", + symbol_name + ))); + } + + "__rust_maybe_catch_panic" => { + // fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32 + // We abort on panic, so not much is going on here, but we still have to call the closure + let u8_ptr_ty = self.tcx.mk_mut_ptr(self.tcx.types.u8); + let f = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let data = args[1].into_ptr(&mut self.memory)?; + let f_instance = self.memory.get_fn(f)?; + self.write_null(dest, dest_ty)?; + + // Now we make a function call. TODO: Consider making this re-usable? EvalContext::step does sth. similar for the TLS dtors, + // and of course eval_main. + let mir = self.load_mir(f_instance.def)?; + self.push_stack_frame( + f_instance, + mir.span, + mir, + Lvalue::undef(), + StackPopCleanup::Goto(dest_block), + )?; + + let arg_local = self.frame().mir.args_iter().next().ok_or( + EvalErrorKind::AbiViolation( + "Argument to __rust_maybe_catch_panic does not take enough arguments." + .to_owned(), + ), + )?; + let arg_dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + self.write_ptr(arg_dest, data, u8_ptr_ty)?; + + // We ourselves return 0 + self.write_null(dest, dest_ty)?; + + // Don't fall through + return Ok(()); + } + + "__rust_start_panic" => { + return err!(Panic); + } + + "memcmp" => { + let left = args[0].into_ptr(&mut self.memory)?; + let right = args[1].into_ptr(&mut self.memory)?; + let n = self.value_to_primval(args[2])?.to_u64()?; + + let result = { + let left_bytes = self.memory.read_bytes(left, n)?; + let right_bytes = self.memory.read_bytes(right, n)?; + + use std::cmp::Ordering::*; + match left_bytes.cmp(right_bytes) { + Less => -1i8, + Equal => 0, + Greater => 1, + } + }; + + self.write_primval( + dest, + PrimVal::Bytes(result as u128), + dest_ty, + )?; + } + + "memrchr" => { + let ptr = args[0].into_ptr(&mut self.memory)?; + let val = self.value_to_primval(args[1])?.to_u64()? as u8; + let num = self.value_to_primval(args[2])?.to_u64()?; + if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().rev().position( + |&c| c == val, + ) + { + let new_ptr = ptr.offset(num - idx as u64 - 1, &self)?; + self.write_ptr(dest, new_ptr, dest_ty)?; + } else { + self.write_null(dest, dest_ty)?; + } + } + + "memchr" => { + let ptr = args[0].into_ptr(&mut self.memory)?; + let val = self.value_to_primval(args[1])?.to_u64()? as u8; + let num = self.value_to_primval(args[2])?.to_u64()?; + if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().position( + |&c| c == val, + ) + { + let new_ptr = ptr.offset(idx as u64, &self)?; + self.write_ptr(dest, new_ptr, dest_ty)?; + } else { + self.write_null(dest, dest_ty)?; + } + } + + "getenv" => { + let result = { + let name_ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let name = self.memory.read_c_str(name_ptr)?; + match self.machine_data.env_vars.get(name) { + Some(&var) => PrimVal::Ptr(var), + None => PrimVal::Bytes(0), + } + }; + self.write_primval(dest, result, dest_ty)?; + } + + "unsetenv" => { + let mut success = None; + { + let name_ptr = args[0].into_ptr(&mut self.memory)?; + if !name_ptr.is_null()? { + let name = self.memory.read_c_str(name_ptr.to_ptr()?)?; + if !name.is_empty() && !name.contains(&b'=') { + success = Some(self.machine_data.env_vars.remove(name)); + } + } + } + if let Some(old) = success { + if let Some(var) = old { + self.memory.deallocate(var, None, MemoryKind::Env.into())?; + } + self.write_null(dest, dest_ty)?; + } else { + self.write_primval(dest, PrimVal::from_i128(-1), dest_ty)?; + } + } + + "setenv" => { + let mut new = None; + { + let name_ptr = args[0].into_ptr(&mut self.memory)?; + let value_ptr = args[1].into_ptr(&mut self.memory)?.to_ptr()?; + let value = self.memory.read_c_str(value_ptr)?; + if !name_ptr.is_null()? { + let name = self.memory.read_c_str(name_ptr.to_ptr()?)?; + if !name.is_empty() && !name.contains(&b'=') { + new = Some((name.to_owned(), value.to_owned())); + } + } + } + if let Some((name, value)) = new { + // +1 for the null terminator + let value_copy = self.memory.allocate( + (value.len() + 1) as u64, + 1, + MemoryKind::Env.into(), + )?; + self.memory.write_bytes(value_copy.into(), &value)?; + let trailing_zero_ptr = value_copy.offset(value.len() as u64, &self)?.into(); + self.memory.write_bytes(trailing_zero_ptr, &[0])?; + if let Some(var) = self.machine_data.env_vars.insert( + name.to_owned(), + value_copy, + ) + { + self.memory.deallocate(var, None, MemoryKind::Env.into())?; + } + self.write_null(dest, dest_ty)?; + } else { + self.write_primval(dest, PrimVal::from_i128(-1), dest_ty)?; + } + } + + "write" => { + let fd = self.value_to_primval(args[0])?.to_u64()?; + let buf = args[1].into_ptr(&mut self.memory)?; + let n = self.value_to_primval(args[2])?.to_u64()?; + trace!("Called write({:?}, {:?}, {:?})", fd, buf, n); + let result = if fd == 1 || fd == 2 { + // stdout/stderr + use std::io::{self, Write}; + + let buf_cont = self.memory.read_bytes(buf, n)?; + let res = if fd == 1 { + io::stdout().write(buf_cont) + } else { + io::stderr().write(buf_cont) + }; + match res { + Ok(n) => n as isize, + Err(_) => -1, + } + } else { + warn!("Ignored output to FD {}", fd); + n as isize // pretend it all went well + }; // now result is the value we return back to the program + self.write_primval( + dest, + PrimVal::Bytes(result as u128), + dest_ty, + )?; + } + + "strlen" => { + let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let n = self.memory.read_c_str(ptr)?.len(); + self.write_primval(dest, PrimVal::Bytes(n as u128), dest_ty)?; + } + + // Some things needed for sys::thread initialization to go through + "signal" | "sigaction" | "sigaltstack" => { + self.write_primval(dest, PrimVal::Bytes(0), dest_ty)?; + } + + "sysconf" => { + let name = self.value_to_primval(args[0])?.to_u64()?; + trace!("sysconf() called with name {}", name); + // cache the sysconf integers via miri's global cache + let paths = &[ + (&["libc", "_SC_PAGESIZE"], PrimVal::Bytes(4096)), + (&["libc", "_SC_GETPW_R_SIZE_MAX"], PrimVal::from_i128(-1)), + ]; + let mut result = None; + for &(path, path_value) in paths { + if let Ok(instance) = self.resolve_path(path) { + let cid = GlobalId { + instance, + promoted: None, + }; + // compute global if not cached + let val = match self.globals.get(&cid).cloned() { + Some(ptr) => self.value_to_primval(ValTy { value: Value::ByRef(ptr), ty: args[0].ty })?.to_u64()?, + None => eval_body_as_primval(self.tcx, instance)?.0.to_u64()?, + }; + if val == name { + result = Some(path_value); + break; + } + } + } + if let Some(result) = result { + self.write_primval(dest, result, dest_ty)?; + } else { + return err!(Unimplemented( + format!("Unimplemented sysconf name: {}", name), + )); + } + } + + // Hook pthread calls that go to the thread-local storage memory subsystem + "pthread_key_create" => { + let key_ptr = args[0].into_ptr(&mut self.memory)?; + + // Extract the function type out of the signature (that seems easier than constructing it ourselves...) + let dtor = match args[1].into_ptr(&mut self.memory)?.into_inner_primval() { + PrimVal::Ptr(dtor_ptr) => Some(self.memory.get_fn(dtor_ptr)?), + PrimVal::Bytes(0) => None, + PrimVal::Bytes(_) => return err!(ReadBytesAsPointer), + PrimVal::Undef => return err!(ReadUndefBytes), + }; + + // Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t. + let key_type = args[0].ty.builtin_deref(true, ty::LvaluePreference::NoPreference) + .ok_or(EvalErrorKind::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty; + let key_size = { + let layout = self.type_layout(key_type)?; + layout.size(&self.tcx.data_layout) + }; + + // Create key and write it into the memory where key_ptr wants it + let key = self.memory.create_tls_key(dtor) as u128; + if key_size.bits() < 128 && key >= (1u128 << key_size.bits() as u128) { + return err!(OutOfTls); + } + self.memory.write_primval( + key_ptr.to_ptr()?, + PrimVal::Bytes(key), + key_size.bytes(), + false, + )?; + + // Return success (0) + self.write_null(dest, dest_ty)?; + } + "pthread_key_delete" => { + // The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t + let key = self.value_to_primval(args[0])?.to_u64()? as TlsKey; + self.memory.delete_tls_key(key)?; + // Return success (0) + self.write_null(dest, dest_ty)?; + } + "pthread_getspecific" => { + // The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t + let key = self.value_to_primval(args[0])?.to_u64()? as TlsKey; + let ptr = self.memory.load_tls(key)?; + self.write_ptr(dest, ptr, dest_ty)?; + } + "pthread_setspecific" => { + // The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t + let key = self.value_to_primval(args[0])?.to_u64()? as TlsKey; + let new_ptr = args[1].into_ptr(&mut self.memory)?; + self.memory.store_tls(key, new_ptr)?; + + // Return success (0) + self.write_null(dest, dest_ty)?; + } + + // Stub out all the other pthread calls to just return 0 + link_name if link_name.starts_with("pthread_") => { + info!("ignoring C ABI call: {}", link_name); + self.write_null(dest, dest_ty)?; + } + + _ => { + return err!(Unimplemented( + format!("can't call C ABI function: {}", link_name), + )); + } + } + + // Since we pushed no stack frame, the main loop will act + // as if the call just completed and it's returning to the + // current frame. + self.dump_local(dest); + self.goto_block(dest_block); + Ok(()) + } + + /// Get an instance for a path. + fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> { + self.tcx + .crates() + .iter() + .find(|&&krate| self.tcx.original_crate_name(krate) == path[0]) + .and_then(|krate| { + let krate = DefId { + krate: *krate, + index: CRATE_DEF_INDEX, + }; + let mut items = self.tcx.item_children(krate); + let mut path_it = path.iter().skip(1).peekable(); + + while let Some(segment) = path_it.next() { + for item in mem::replace(&mut items, Default::default()).iter() { + if item.ident.name == *segment { + if path_it.peek().is_none() { + return Some(ty::Instance::mono(self.tcx, item.def.def_id())); + } + + items = self.tcx.item_children(item.def.def_id()); + break; + } + } + } + None + }) + .ok_or_else(|| { + let path = path.iter().map(|&s| s.to_owned()).collect(); + EvalErrorKind::PathNotFound(path).into() + }) + } + + fn call_missing_fn( + &mut self, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue, mir::BasicBlock)>, + args: &[ValTy<'tcx>], + sig: ty::FnSig<'tcx>, + path: String, + ) -> EvalResult<'tcx> { + // In some cases in non-MIR libstd-mode, not having a destination is legit. Handle these early. + match &path[..] { + "std::panicking::rust_panic_with_hook" | + "core::panicking::panic_fmt::::panic_impl" | + "std::rt::begin_panic_fmt" => return err!(Panic), + _ => {} + } + + let dest_ty = sig.output(); + let (dest, dest_block) = destination.ok_or_else( + || EvalErrorKind::NoMirFor(path.clone()), + )?; + + if sig.abi == Abi::C { + // An external C function + // TODO: That functions actually has a similar preamble to what follows here. May make sense to + // unify these two mechanisms for "hooking into missing functions". + self.call_c_abi( + instance.def_id(), + args, + dest, + dest_ty, + dest_block, + )?; + return Ok(()); + } + + match &path[..] { + // Allocators are magic. They have no MIR, even when the rest of libstd does. + "alloc::heap::::__rust_alloc" => { + let size = self.value_to_primval(args[0])?.to_u64()?; + let align = self.value_to_primval(args[1])?.to_u64()?; + if size == 0 { + return err!(HeapAllocZeroBytes); + } + if !align.is_power_of_two() { + return err!(HeapAllocNonPowerOfTwoAlignment(align)); + } + let ptr = self.memory.allocate(size, align, MemoryKind::Rust.into())?; + self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?; + } + "alloc::heap::::__rust_alloc_zeroed" => { + let size = self.value_to_primval(args[0])?.to_u64()?; + let align = self.value_to_primval(args[1])?.to_u64()?; + if size == 0 { + return err!(HeapAllocZeroBytes); + } + if !align.is_power_of_two() { + return err!(HeapAllocNonPowerOfTwoAlignment(align)); + } + let ptr = self.memory.allocate(size, align, MemoryKind::Rust.into())?; + self.memory.write_repeat(ptr.into(), 0, size)?; + self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?; + } + "alloc::heap::::__rust_dealloc" => { + let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let old_size = self.value_to_primval(args[1])?.to_u64()?; + let align = self.value_to_primval(args[2])?.to_u64()?; + if old_size == 0 { + return err!(HeapAllocZeroBytes); + } + if !align.is_power_of_two() { + return err!(HeapAllocNonPowerOfTwoAlignment(align)); + } + self.memory.deallocate( + ptr, + Some((old_size, align)), + MemoryKind::Rust.into(), + )?; + } + "alloc::heap::::__rust_realloc" => { + let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let old_size = self.value_to_primval(args[1])?.to_u64()?; + let old_align = self.value_to_primval(args[2])?.to_u64()?; + let new_size = self.value_to_primval(args[3])?.to_u64()?; + let new_align = self.value_to_primval(args[4])?.to_u64()?; + if old_size == 0 || new_size == 0 { + return err!(HeapAllocZeroBytes); + } + if !old_align.is_power_of_two() { + return err!(HeapAllocNonPowerOfTwoAlignment(old_align)); + } + if !new_align.is_power_of_two() { + return err!(HeapAllocNonPowerOfTwoAlignment(new_align)); + } + let new_ptr = self.memory.reallocate( + ptr, + old_size, + old_align, + new_size, + new_align, + MemoryKind::Rust.into(), + )?; + self.write_primval(dest, PrimVal::Ptr(new_ptr), dest_ty)?; + } + + // A Rust function is missing, which means we are running with MIR missing for libstd (or other dependencies). + // Still, we can make many things mostly work by "emulating" or ignoring some functions. + "std::io::_print" => { + warn!( + "Ignoring output. To run programs that print, make sure you have a libstd with full MIR." + ); + } + "std::thread::Builder::new" => { + return err!(Unimplemented("miri does not support threading".to_owned())) + } + "std::env::args" => { + return err!(Unimplemented( + "miri does not support program arguments".to_owned(), + )) + } + "std::panicking::panicking" | + "std::rt::panicking" => { + // we abort on panic -> `std::rt::panicking` always returns false + let bool = self.tcx.types.bool; + self.write_primval(dest, PrimVal::from_bool(false), bool)?; + } + "std::sys::imp::c::::AddVectoredExceptionHandler" | + "std::sys::imp::c::::SetThreadStackGuarantee" => { + let usize = self.tcx.types.usize; + // any non zero value works for the stdlib. This is just used for stackoverflows anyway + self.write_primval(dest, PrimVal::Bytes(1), usize)?; + }, + _ => return err!(NoMirFor(path)), + } + + // Since we pushed no stack frame, the main loop will act + // as if the call just completed and it's returning to the + // current frame. + self.dump_local(dest); + self.goto_block(dest_block); + return Ok(()); + } +} diff --git a/src/tools/miri/miri/helpers.rs b/src/tools/miri/miri/helpers.rs new file mode 100644 index 0000000000..809e5ebfac --- /dev/null +++ b/src/tools/miri/miri/helpers.rs @@ -0,0 +1,73 @@ +use rustc_miri::interpret::{Pointer, EvalResult, PrimVal, EvalContext}; + +use rustc::ty::Ty; + +pub trait EvalContextExt<'tcx> { + fn wrapping_pointer_offset( + &self, + ptr: Pointer, + pointee_ty: Ty<'tcx>, + offset: i64, + ) -> EvalResult<'tcx, Pointer>; + + fn pointer_offset( + &self, + ptr: Pointer, + pointee_ty: Ty<'tcx>, + offset: i64, + ) -> EvalResult<'tcx, Pointer>; +} + +impl<'a, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'tcx, super::Evaluator> { + fn wrapping_pointer_offset( + &self, + ptr: Pointer, + pointee_ty: Ty<'tcx>, + offset: i64, + ) -> EvalResult<'tcx, Pointer> { + // FIXME: assuming here that type size is < i64::max_value() + let pointee_size = self.type_size(pointee_ty)?.expect( + "cannot offset a pointer to an unsized type", + ) as i64; + let offset = offset.overflowing_mul(pointee_size).0; + ptr.wrapping_signed_offset(offset, self) + } + + fn pointer_offset( + &self, + ptr: Pointer, + pointee_ty: Ty<'tcx>, + offset: i64, + ) -> EvalResult<'tcx, Pointer> { + // This function raises an error if the offset moves the pointer outside of its allocation. We consider + // ZSTs their own huge allocation that doesn't overlap with anything (and nothing moves in there because the size is 0). + // We also consider the NULL pointer its own separate allocation, and all the remaining integers pointers their own + // allocation. + + if ptr.is_null()? { + // NULL pointers must only be offset by 0 + return if offset == 0 { + Ok(ptr) + } else { + err!(InvalidNullPointerUsage) + }; + } + // FIXME: assuming here that type size is < i64::max_value() + let pointee_size = self.type_size(pointee_ty)?.expect( + "cannot offset a pointer to an unsized type", + ) as i64; + return if let Some(offset) = offset.checked_mul(pointee_size) { + let ptr = ptr.signed_offset(offset, self)?; + // Do not do bounds-checking for integers; they can never alias a normal pointer anyway. + if let PrimVal::Ptr(ptr) = ptr.into_inner_primval() { + self.memory.check_bounds(ptr, false)?; + } else if ptr.is_null()? { + // We moved *to* a NULL pointer. That seems wrong, LLVM considers the NULL pointer its own small allocation. Reject this, for now. + return err!(InvalidNullPointerUsage); + } + Ok(ptr) + } else { + err!(OverflowingMath) + }; + } +} diff --git a/src/tools/miri/miri/intrinsic.rs b/src/tools/miri/miri/intrinsic.rs new file mode 100644 index 0000000000..bcff3b4aa9 --- /dev/null +++ b/src/tools/miri/miri/intrinsic.rs @@ -0,0 +1,685 @@ +use rustc::mir; +use rustc::traits::Reveal; +use rustc::ty::layout::Layout; +use rustc::ty::{self, Ty}; + +use rustc_miri::interpret::{EvalResult, Lvalue, LvalueExtra, PrimVal, PrimValKind, Value, Pointer, + HasMemory, AccessKind, EvalContext, PtrAndAlign, ValTy}; + +use helpers::EvalContextExt as HelperEvalContextExt; + +pub trait EvalContextExt<'tcx> { + fn call_intrinsic( + &mut self, + instance: ty::Instance<'tcx>, + args: &[ValTy<'tcx>], + dest: Lvalue, + dest_ty: Ty<'tcx>, + dest_layout: &'tcx Layout, + target: mir::BasicBlock, + ) -> EvalResult<'tcx>; +} + +impl<'a, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'tcx, super::Evaluator> { + fn call_intrinsic( + &mut self, + instance: ty::Instance<'tcx>, + args: &[ValTy<'tcx>], + dest: Lvalue, + dest_ty: Ty<'tcx>, + dest_layout: &'tcx Layout, + target: mir::BasicBlock, + ) -> EvalResult<'tcx> { + let substs = instance.substs; + + let intrinsic_name = &self.tcx.item_name(instance.def_id())[..]; + match intrinsic_name { + "align_offset" => { + // FIXME: return a real value in case the target allocation has an + // alignment bigger than the one requested + self.write_primval(dest, PrimVal::Bytes(u128::max_value()), dest_ty)?; + }, + + "add_with_overflow" => { + self.intrinsic_with_overflow( + mir::BinOp::Add, + args[0], + args[1], + dest, + dest_ty, + )? + } + + "sub_with_overflow" => { + self.intrinsic_with_overflow( + mir::BinOp::Sub, + args[0], + args[1], + dest, + dest_ty, + )? + } + + "mul_with_overflow" => { + self.intrinsic_with_overflow( + mir::BinOp::Mul, + args[0], + args[1], + dest, + dest_ty, + )? + } + + "arith_offset" => { + let offset = self.value_to_primval(args[1])?.to_i128()? as i64; + let ptr = args[0].into_ptr(&self.memory)?; + let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?; + self.write_ptr(dest, result_ptr, dest_ty)?; + } + + "assume" => { + let cond = self.value_to_primval(args[0])?.to_bool()?; + if !cond { + return err!(AssumptionNotHeld); + } + } + + "atomic_load" | + "atomic_load_relaxed" | + "atomic_load_acq" | + "volatile_load" => { + let ptr = args[0].into_ptr(&self.memory)?; + let valty = ValTy { + value: Value::by_ref(ptr), + ty: substs.type_at(0), + }; + self.write_value(valty, dest)?; + } + + "atomic_store" | + "atomic_store_relaxed" | + "atomic_store_rel" | + "volatile_store" => { + let ty = substs.type_at(0); + let dest = args[0].into_ptr(&self.memory)?; + self.write_value_to_ptr(args[1].value, dest, ty)?; + } + + "atomic_fence_acq" => { + // we are inherently singlethreaded and singlecored, this is a nop + } + + _ if intrinsic_name.starts_with("atomic_xchg") => { + let ty = substs.type_at(0); + let ptr = args[0].into_ptr(&self.memory)?; + let change = self.value_to_primval(args[1])?; + let old = self.read_value(ptr, ty)?; + let old = match old { + Value::ByVal(val) => val, + Value::ByRef { .. } => bug!("just read the value, can't be byref"), + Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"), + }; + self.write_primval(dest, old, ty)?; + self.write_primval( + Lvalue::from_primval_ptr(ptr), + change, + ty, + )?; + } + + _ if intrinsic_name.starts_with("atomic_cxchg") => { + let ty = substs.type_at(0); + let ptr = args[0].into_ptr(&self.memory)?; + let expect_old = self.value_to_primval(args[1])?; + let change = self.value_to_primval(args[2])?; + let old = self.read_value(ptr, ty)?; + let old = match old { + Value::ByVal(val) => val, + Value::ByRef { .. } => bug!("just read the value, can't be byref"), + Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"), + }; + let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?; + let dest = self.force_allocation(dest)?.to_ptr()?; + self.write_pair_to_ptr(old, val, dest, dest_ty)?; + self.write_primval( + Lvalue::from_primval_ptr(ptr), + change, + ty, + )?; + } + + "atomic_or" | + "atomic_or_acq" | + "atomic_or_rel" | + "atomic_or_acqrel" | + "atomic_or_relaxed" | + "atomic_xor" | + "atomic_xor_acq" | + "atomic_xor_rel" | + "atomic_xor_acqrel" | + "atomic_xor_relaxed" | + "atomic_and" | + "atomic_and_acq" | + "atomic_and_rel" | + "atomic_and_acqrel" | + "atomic_and_relaxed" | + "atomic_xadd" | + "atomic_xadd_acq" | + "atomic_xadd_rel" | + "atomic_xadd_acqrel" | + "atomic_xadd_relaxed" | + "atomic_xsub" | + "atomic_xsub_acq" | + "atomic_xsub_rel" | + "atomic_xsub_acqrel" | + "atomic_xsub_relaxed" => { + let ty = substs.type_at(0); + let ptr = args[0].into_ptr(&self.memory)?; + let change = self.value_to_primval(args[1])?; + let old = self.read_value(ptr, ty)?; + let old = match old { + Value::ByVal(val) => val, + Value::ByRef { .. } => bug!("just read the value, can't be byref"), + Value::ByValPair(..) => { + bug!("atomic_xadd_relaxed doesn't work with nonprimitives") + } + }; + self.write_primval(dest, old, ty)?; + let op = match intrinsic_name.split('_').nth(1).unwrap() { + "or" => mir::BinOp::BitOr, + "xor" => mir::BinOp::BitXor, + "and" => mir::BinOp::BitAnd, + "xadd" => mir::BinOp::Add, + "xsub" => mir::BinOp::Sub, + _ => bug!(), + }; + // FIXME: what do atomics do on overflow? + let (val, _) = self.binary_op(op, old, ty, change, ty)?; + self.write_primval(Lvalue::from_primval_ptr(ptr), val, ty)?; + } + + "breakpoint" => unimplemented!(), // halt miri + + "copy" | + "copy_nonoverlapping" => { + let elem_ty = substs.type_at(0); + let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value"); + let count = self.value_to_primval(args[2])?.to_u64()?; + if count * elem_size != 0 { + // TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next. + // Also see the write_bytes intrinsic. + let elem_align = self.type_align(elem_ty)?; + let src = args[0].into_ptr(&self.memory)?; + let dest = args[1].into_ptr(&self.memory)?; + self.memory.copy( + src, + dest, + count * elem_size, + elem_align, + intrinsic_name.ends_with("_nonoverlapping"), + )?; + } + } + + "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => { + let ty = substs.type_at(0); + let num = self.value_to_primval(args[0])?.to_bytes()?; + let kind = self.ty_to_primval_kind(ty)?; + let num = if intrinsic_name.ends_with("_nonzero") { + if num == 0 { + return err!(Intrinsic(format!("{} called on 0", intrinsic_name))); + } + numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)? + } else { + numeric_intrinsic(intrinsic_name, num, kind)? + }; + self.write_primval(dest, num, ty)?; + } + + "discriminant_value" => { + let ty = substs.type_at(0); + let adt_ptr = args[0].into_ptr(&self.memory)?.to_ptr()?; + let discr_val = self.read_discriminant_value(adt_ptr, ty)?; + self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?; + } + + "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" | + "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => { + let f = self.value_to_primval(args[0])?.to_f32()?; + let f = match intrinsic_name { + "sinf32" => f.sin(), + "fabsf32" => f.abs(), + "cosf32" => f.cos(), + "sqrtf32" => f.sqrt(), + "expf32" => f.exp(), + "exp2f32" => f.exp2(), + "logf32" => f.ln(), + "log10f32" => f.log10(), + "log2f32" => f.log2(), + "floorf32" => f.floor(), + "ceilf32" => f.ceil(), + "truncf32" => f.trunc(), + _ => bug!(), + }; + self.write_primval(dest, PrimVal::from_f32(f), dest_ty)?; + } + + "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" | + "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => { + let f = self.value_to_primval(args[0])?.to_f64()?; + let f = match intrinsic_name { + "sinf64" => f.sin(), + "fabsf64" => f.abs(), + "cosf64" => f.cos(), + "sqrtf64" => f.sqrt(), + "expf64" => f.exp(), + "exp2f64" => f.exp2(), + "logf64" => f.ln(), + "log10f64" => f.log10(), + "log2f64" => f.log2(), + "floorf64" => f.floor(), + "ceilf64" => f.ceil(), + "truncf64" => f.trunc(), + _ => bug!(), + }; + self.write_primval(dest, PrimVal::from_f64(f), dest_ty)?; + } + + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { + let ty = substs.type_at(0); + let a = self.value_to_primval(args[0])?; + let b = self.value_to_primval(args[1])?; + let op = match intrinsic_name { + "fadd_fast" => mir::BinOp::Add, + "fsub_fast" => mir::BinOp::Sub, + "fmul_fast" => mir::BinOp::Mul, + "fdiv_fast" => mir::BinOp::Div, + "frem_fast" => mir::BinOp::Rem, + _ => bug!(), + }; + let result = self.binary_op(op, a, ty, b, ty)?; + self.write_primval(dest, result.0, dest_ty)?; + } + + "likely" | "unlikely" | "forget" => {} + + "init" => { + let size = self.type_size(dest_ty)?.expect("cannot zero unsized value"); + let init = |this: &mut Self, val: Value| { + let zero_val = match val { + Value::ByRef(PtrAndAlign { ptr, .. }) => { + // These writes have no alignment restriction anyway. + this.memory.write_repeat(ptr, 0, size)?; + val + } + // TODO(solson): Revisit this, it's fishy to check for Undef here. + Value::ByVal(PrimVal::Undef) => { + match this.ty_to_primval_kind(dest_ty) { + Ok(_) => Value::ByVal(PrimVal::Bytes(0)), + Err(_) => { + let ptr = this.alloc_ptr_with_substs(dest_ty, substs)?; + let ptr = Pointer::from(PrimVal::Ptr(ptr)); + this.memory.write_repeat(ptr, 0, size)?; + Value::by_ref(ptr) + } + } + } + Value::ByVal(_) => Value::ByVal(PrimVal::Bytes(0)), + Value::ByValPair(..) => { + Value::ByValPair(PrimVal::Bytes(0), PrimVal::Bytes(0)) + } + }; + Ok(zero_val) + }; + match dest { + Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?, + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::None, + } => self.memory.write_repeat(ptr, 0, size)?, + Lvalue::Ptr { .. } => { + bug!("init intrinsic tried to write to fat or unaligned ptr target") + } + } + } + + "min_align_of" => { + let elem_ty = substs.type_at(0); + let elem_align = self.type_align(elem_ty)?; + let align_val = PrimVal::from_u128(elem_align as u128); + self.write_primval(dest, align_val, dest_ty)?; + } + + "pref_align_of" => { + let ty = substs.type_at(0); + let layout = self.type_layout(ty)?; + let align = layout.align(&self.tcx.data_layout).pref(); + let align_val = PrimVal::from_u128(align as u128); + self.write_primval(dest, align_val, dest_ty)?; + } + + "move_val_init" => { + let ty = substs.type_at(0); + let ptr = args[0].into_ptr(&self.memory)?; + self.write_value_to_ptr(args[1].value, ptr, ty)?; + } + + "needs_drop" => { + let ty = substs.type_at(0); + let env = ty::ParamEnv::empty(Reveal::All); + let needs_drop = ty.needs_drop(self.tcx, env); + self.write_primval( + dest, + PrimVal::from_bool(needs_drop), + dest_ty, + )?; + } + + "offset" => { + let offset = self.value_to_primval(args[1])?.to_i128()? as i64; + let ptr = args[0].into_ptr(&self.memory)?; + let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?; + self.write_ptr(dest, result_ptr, dest_ty)?; + } + + "overflowing_sub" => { + self.intrinsic_overflowing( + mir::BinOp::Sub, + args[0], + args[1], + dest, + dest_ty, + )?; + } + + "overflowing_mul" => { + self.intrinsic_overflowing( + mir::BinOp::Mul, + args[0], + args[1], + dest, + dest_ty, + )?; + } + + "overflowing_add" => { + self.intrinsic_overflowing( + mir::BinOp::Add, + args[0], + args[1], + dest, + dest_ty, + )?; + } + + "powf32" => { + let f = self.value_to_primval(args[0])?.to_f32()?; + let f2 = self.value_to_primval(args[1])?.to_f32()?; + self.write_primval( + dest, + PrimVal::from_f32(f.powf(f2)), + dest_ty, + )?; + } + + "powf64" => { + let f = self.value_to_primval(args[0])?.to_f64()?; + let f2 = self.value_to_primval(args[1])?.to_f64()?; + self.write_primval( + dest, + PrimVal::from_f64(f.powf(f2)), + dest_ty, + )?; + } + + "fmaf32" => { + let a = self.value_to_primval(args[0])?.to_f32()?; + let b = self.value_to_primval(args[1])?.to_f32()?; + let c = self.value_to_primval(args[2])?.to_f32()?; + self.write_primval( + dest, + PrimVal::from_f32(a * b + c), + dest_ty, + )?; + } + + "fmaf64" => { + let a = self.value_to_primval(args[0])?.to_f64()?; + let b = self.value_to_primval(args[1])?.to_f64()?; + let c = self.value_to_primval(args[2])?.to_f64()?; + self.write_primval( + dest, + PrimVal::from_f64(a * b + c), + dest_ty, + )?; + } + + "powif32" => { + let f = self.value_to_primval(args[0])?.to_f32()?; + let i = self.value_to_primval(args[1])?.to_i128()?; + self.write_primval( + dest, + PrimVal::from_f32(f.powi(i as i32)), + dest_ty, + )?; + } + + "powif64" => { + let f = self.value_to_primval(args[0])?.to_f64()?; + let i = self.value_to_primval(args[1])?.to_i128()?; + self.write_primval( + dest, + PrimVal::from_f64(f.powi(i as i32)), + dest_ty, + )?; + } + + "size_of" => { + let ty = substs.type_at(0); + let size = self.type_size(ty)?.expect( + "size_of intrinsic called on unsized value", + ) as u128; + self.write_primval(dest, PrimVal::from_u128(size), dest_ty)?; + } + + "size_of_val" => { + let ty = substs.type_at(0); + let (size, _) = self.size_and_align_of_dst(ty, args[0].value)?; + self.write_primval( + dest, + PrimVal::from_u128(size as u128), + dest_ty, + )?; + } + + "min_align_of_val" | + "align_of_val" => { + let ty = substs.type_at(0); + let (_, align) = self.size_and_align_of_dst(ty, args[0].value)?; + self.write_primval( + dest, + PrimVal::from_u128(align as u128), + dest_ty, + )?; + } + + "type_name" => { + let ty = substs.type_at(0); + let ty_name = ty.to_string(); + let value = self.str_to_value(&ty_name)?; + self.write_value(ValTy { value, ty: dest_ty }, dest)?; + } + "type_id" => { + let ty = substs.type_at(0); + let n = self.tcx.type_id_hash(ty); + self.write_primval(dest, PrimVal::Bytes(n as u128), dest_ty)?; + } + + "transmute" => { + let src_ty = substs.type_at(0); + let ptr = self.force_allocation(dest)?.to_ptr()?; + self.write_maybe_aligned_mut( + /*aligned*/ + false, + |ectx| { + ectx.write_value_to_ptr(args[0].value, ptr.into(), src_ty) + }, + )?; + } + + "unchecked_shl" => { + let bits = self.type_size(dest_ty)?.expect( + "intrinsic can't be called on unsized type", + ) as u128 * 8; + let rhs = self.value_to_primval(args[1])? + .to_bytes()?; + if rhs >= bits { + return err!(Intrinsic( + format!("Overflowing shift by {} in unchecked_shl", rhs), + )); + } + self.intrinsic_overflowing( + mir::BinOp::Shl, + args[0], + args[1], + dest, + dest_ty, + )?; + } + + "unchecked_shr" => { + let bits = self.type_size(dest_ty)?.expect( + "intrinsic can't be called on unsized type", + ) as u128 * 8; + let rhs = self.value_to_primval(args[1])? + .to_bytes()?; + if rhs >= bits { + return err!(Intrinsic( + format!("Overflowing shift by {} in unchecked_shr", rhs), + )); + } + self.intrinsic_overflowing( + mir::BinOp::Shr, + args[0], + args[1], + dest, + dest_ty, + )?; + } + + "unchecked_div" => { + let rhs = self.value_to_primval(args[1])? + .to_bytes()?; + if rhs == 0 { + return err!(Intrinsic(format!("Division by 0 in unchecked_div"))); + } + self.intrinsic_overflowing( + mir::BinOp::Div, + args[0], + args[1], + dest, + dest_ty, + )?; + } + + "unchecked_rem" => { + let rhs = self.value_to_primval(args[1])? + .to_bytes()?; + if rhs == 0 { + return err!(Intrinsic(format!("Division by 0 in unchecked_rem"))); + } + self.intrinsic_overflowing( + mir::BinOp::Rem, + args[0], + args[1], + dest, + dest_ty, + )?; + } + + "uninit" => { + let size = dest_layout.size(&self.tcx.data_layout).bytes(); + let uninit = |this: &mut Self, val: Value| match val { + Value::ByRef(PtrAndAlign { ptr, .. }) => { + this.memory.mark_definedness(ptr, size, false)?; + Ok(val) + } + _ => Ok(Value::ByVal(PrimVal::Undef)), + }; + match dest { + Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?, + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::None, + } => self.memory.mark_definedness(ptr, size, false)?, + Lvalue::Ptr { .. } => { + bug!("uninit intrinsic tried to write to fat or unaligned ptr target") + } + } + } + + "write_bytes" => { + let ty = substs.type_at(0); + let ty_align = self.type_align(ty)?; + let val_byte = self.value_to_primval(args[1])?.to_u128()? as u8; + let size = self.type_size(ty)?.expect( + "write_bytes() type must be sized", + ); + let ptr = args[0].into_ptr(&self.memory)?; + let count = self.value_to_primval(args[2])?.to_u64()?; + if count > 0 { + // HashMap relies on write_bytes on a NULL ptr with count == 0 to work + // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic) + self.memory.check_align(ptr, ty_align, Some(AccessKind::Write))?; + self.memory.write_repeat(ptr, val_byte, size * count)?; + } + } + + name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))), + } + + self.goto_block(target); + + // Since we pushed no stack frame, the main loop will act + // as if the call just completed and it's returning to the + // current frame. + Ok(()) + } +} + +fn numeric_intrinsic<'tcx>( + name: &str, + bytes: u128, + kind: PrimValKind, +) -> EvalResult<'tcx, PrimVal> { + macro_rules! integer_intrinsic { + ($method:ident) => ({ + use rustc_miri::interpret::PrimValKind::*; + let result_bytes = match kind { + I8 => (bytes as i8).$method() as u128, + U8 => (bytes as u8).$method() as u128, + I16 => (bytes as i16).$method() as u128, + U16 => (bytes as u16).$method() as u128, + I32 => (bytes as i32).$method() as u128, + U32 => (bytes as u32).$method() as u128, + I64 => (bytes as i64).$method() as u128, + U64 => (bytes as u64).$method() as u128, + I128 => (bytes as i128).$method() as u128, + U128 => bytes.$method() as u128, + _ => bug!("invalid `{}` argument: {:?}", name, bytes), + }; + + PrimVal::Bytes(result_bytes) + }); + } + + let result_val = match name { + "bswap" => integer_intrinsic!(swap_bytes), + "ctlz" => integer_intrinsic!(leading_zeros), + "ctpop" => integer_intrinsic!(count_ones), + "cttz" => integer_intrinsic!(trailing_zeros), + _ => bug!("not a numeric intrinsic: {}", name), + }; + + Ok(result_val) +} diff --git a/src/tools/miri/miri/lib.rs b/src/tools/miri/miri/lib.rs new file mode 100644 index 0000000000..428724f7de --- /dev/null +++ b/src/tools/miri/miri/lib.rs @@ -0,0 +1,270 @@ +#![feature( + i128_type, + rustc_private, +)] + +// From rustc. +#[macro_use] +extern crate log; +#[macro_use] +extern crate rustc; +extern crate syntax; + +use rustc::ty::{self, TyCtxt}; +use rustc::ty::layout::Layout; +use rustc::hir::def_id::DefId; +use rustc::mir; + +use syntax::ast::Mutability; +use syntax::codemap::Span; + +use std::collections::{HashMap, BTreeMap}; + +#[macro_use] +extern crate rustc_miri; +pub use rustc_miri::interpret::*; + +mod fn_call; +mod operator; +mod intrinsic; +mod helpers; +mod memory; +mod tls; + +use fn_call::EvalContextExt as MissingFnsEvalContextExt; +use operator::EvalContextExt as OperatorEvalContextExt; +use intrinsic::EvalContextExt as IntrinsicEvalContextExt; +use tls::EvalContextExt as TlsEvalContextExt; + +pub fn eval_main<'a, 'tcx: 'a>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + main_id: DefId, + start_wrapper: Option, + limits: ResourceLimits, +) { + fn run_main<'a, 'tcx: 'a>( + ecx: &mut rustc_miri::interpret::EvalContext<'a, 'tcx, Evaluator>, + main_id: DefId, + start_wrapper: Option, + ) -> EvalResult<'tcx> { + let main_instance = ty::Instance::mono(ecx.tcx, main_id); + let main_mir = ecx.load_mir(main_instance.def)?; + let mut cleanup_ptr = None; // Pointer to be deallocated when we are done + + if !main_mir.return_ty.is_nil() || main_mir.arg_count != 0 { + return err!(Unimplemented( + "miri does not support main functions without `fn()` type signatures" + .to_owned(), + )); + } + + if let Some(start_id) = start_wrapper { + let start_instance = ty::Instance::mono(ecx.tcx, start_id); + let start_mir = ecx.load_mir(start_instance.def)?; + + if start_mir.arg_count != 3 { + return err!(AbiViolation(format!( + "'start' lang item should have three arguments, but has {}", + start_mir.arg_count + ))); + } + + // Return value + let size = ecx.tcx.data_layout.pointer_size.bytes(); + let align = ecx.tcx.data_layout.pointer_align.abi(); + let ret_ptr = ecx.memory_mut().allocate(size, align, MemoryKind::Stack)?; + cleanup_ptr = Some(ret_ptr); + + // Push our stack frame + ecx.push_stack_frame( + start_instance, + start_mir.span, + start_mir, + Lvalue::from_ptr(ret_ptr), + StackPopCleanup::None, + )?; + + let mut args = ecx.frame().mir.args_iter(); + + // First argument: pointer to main() + let main_ptr = ecx.memory_mut().create_fn_alloc(main_instance); + let dest = ecx.eval_lvalue(&mir::Lvalue::Local(args.next().unwrap()))?; + let main_ty = main_instance.def.def_ty(ecx.tcx); + let main_ptr_ty = ecx.tcx.mk_fn_ptr(main_ty.fn_sig(ecx.tcx)); + ecx.write_value( + ValTy { + value: Value::ByVal(PrimVal::Ptr(main_ptr)), + ty: main_ptr_ty, + }, + dest, + )?; + + // Second argument (argc): 1 + let dest = ecx.eval_lvalue(&mir::Lvalue::Local(args.next().unwrap()))?; + let ty = ecx.tcx.types.isize; + ecx.write_primval(dest, PrimVal::Bytes(1), ty)?; + + // FIXME: extract main source file path + // Third argument (argv): &[b"foo"] + let dest = ecx.eval_lvalue(&mir::Lvalue::Local(args.next().unwrap()))?; + let ty = ecx.tcx.mk_imm_ptr(ecx.tcx.mk_imm_ptr(ecx.tcx.types.u8)); + let foo = ecx.memory.allocate_cached(b"foo\0")?; + let ptr_size = ecx.memory.pointer_size(); + let foo_ptr = ecx.memory.allocate(ptr_size * 1, ptr_size, MemoryKind::UninitializedStatic)?; + ecx.memory.write_primval(foo_ptr.into(), PrimVal::Ptr(foo.into()), ptr_size, false)?; + ecx.memory.mark_static_initalized(foo_ptr.alloc_id, Mutability::Immutable)?; + ecx.write_ptr(dest, foo_ptr.into(), ty)?; + } else { + ecx.push_stack_frame( + main_instance, + main_mir.span, + main_mir, + Lvalue::undef(), + StackPopCleanup::None, + )?; + } + + while ecx.step()? {} + ecx.run_tls_dtors()?; + if let Some(cleanup_ptr) = cleanup_ptr { + ecx.memory_mut().deallocate( + cleanup_ptr, + None, + MemoryKind::Stack, + )?; + } + Ok(()) + } + + let mut ecx = EvalContext::new(tcx, limits, Default::default(), Default::default()); + match run_main(&mut ecx, main_id, start_wrapper) { + Ok(()) => { + let leaks = ecx.memory().leak_report(); + if leaks != 0 { + tcx.sess.err("the evaluated program leaked memory"); + } + } + Err(mut e) => { + ecx.report(&mut e); + } + } +} + +pub struct Evaluator; +#[derive(Default)] +pub struct EvaluatorData { + /// Environment variables set by `setenv` + /// Miri does not expose env vars from the host to the emulated program + pub(crate) env_vars: HashMap, MemoryPointer>, +} + +pub type TlsKey = usize; + +#[derive(Copy, Clone, Debug)] +pub struct TlsEntry<'tcx> { + data: Pointer, // Will eventually become a map from thread IDs to `Pointer`s, if we ever support more than one thread. + dtor: Option>, +} + +#[derive(Default)] +pub struct MemoryData<'tcx> { + /// The Key to use for the next thread-local allocation. + next_thread_local: TlsKey, + + /// pthreads-style thread-local storage. + thread_local: BTreeMap>, +} + +impl<'tcx> Machine<'tcx> for Evaluator { + type Data = EvaluatorData; + type MemoryData = MemoryData<'tcx>; + type MemoryKinds = memory::MemoryKind; + + /// Returns Ok() when the function was handled, fail otherwise + fn eval_fn_call<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue, mir::BasicBlock)>, + args: &[ValTy<'tcx>], + span: Span, + sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool> { + ecx.eval_fn_call(instance, destination, args, span, sig) + } + + fn call_intrinsic<'a>( + ecx: &mut rustc_miri::interpret::EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + args: &[ValTy<'tcx>], + dest: Lvalue, + dest_ty: ty::Ty<'tcx>, + dest_layout: &'tcx Layout, + target: mir::BasicBlock, + ) -> EvalResult<'tcx> { + ecx.call_intrinsic(instance, args, dest, dest_ty, dest_layout, target) + } + + fn try_ptr_op<'a>( + ecx: &rustc_miri::interpret::EvalContext<'a, 'tcx, Self>, + bin_op: mir::BinOp, + left: PrimVal, + left_ty: ty::Ty<'tcx>, + right: PrimVal, + right_ty: ty::Ty<'tcx>, + ) -> EvalResult<'tcx, Option<(PrimVal, bool)>> { + ecx.ptr_op(bin_op, left, left_ty, right, right_ty) + } + + fn mark_static_initialized(m: memory::MemoryKind) -> EvalResult<'tcx> { + use memory::MemoryKind::*; + match m { + // FIXME: This could be allowed, but not for env vars set during miri execution + Env => err!(Unimplemented("statics can't refer to env vars".to_owned())), + _ => Ok(()), + } + } + + fn box_alloc<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + ty: ty::Ty<'tcx>, + ) -> EvalResult<'tcx, PrimVal> { + // FIXME: call the `exchange_malloc` lang item if available + let size = ecx.type_size(ty)?.expect("box only works with sized types"); + let align = ecx.type_align(ty)?; + if size == 0 { + Ok(PrimVal::Bytes(align.into())) + } else { + ecx.memory + .allocate(size, align, MemoryKind::Machine(memory::MemoryKind::Rust)) + .map(PrimVal::Ptr) + } + } + + fn global_item_with_linkage<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + mutability: Mutability, + ) -> EvalResult<'tcx> { + // FIXME: check that it's `#[linkage = "extern_weak"]` + trace!("Initializing an extern global with NULL"); + let ptr_size = ecx.memory.pointer_size(); + let ptr = ecx.memory.allocate( + ptr_size, + ptr_size, + MemoryKind::UninitializedStatic, + )?; + ecx.memory.write_ptr_sized_unsigned(ptr, PrimVal::Bytes(0))?; + ecx.memory.mark_static_initalized(ptr.alloc_id, mutability)?; + ecx.globals.insert( + GlobalId { + instance, + promoted: None, + }, + PtrAndAlign { + ptr: ptr.into(), + aligned: true, + }, + ); + Ok(()) + } +} diff --git a/src/tools/miri/miri/memory.rs b/src/tools/miri/miri/memory.rs new file mode 100644 index 0000000000..110540c0cf --- /dev/null +++ b/src/tools/miri/miri/memory.rs @@ -0,0 +1,16 @@ + +#[derive(Debug, PartialEq, Copy, Clone)] +pub enum MemoryKind { + /// Error if deallocated any other way than `rust_deallocate` + Rust, + /// Error if deallocated any other way than `free` + C, + /// Part of env var emulation + Env, +} + +impl Into<::rustc_miri::interpret::MemoryKind> for MemoryKind { + fn into(self) -> ::rustc_miri::interpret::MemoryKind { + ::rustc_miri::interpret::MemoryKind::Machine(self) + } +} diff --git a/src/tools/miri/miri/operator.rs b/src/tools/miri/miri/operator.rs new file mode 100644 index 0000000000..6d68aadf96 --- /dev/null +++ b/src/tools/miri/miri/operator.rs @@ -0,0 +1,175 @@ +use rustc::ty; +use rustc::mir; + +use rustc_miri::interpret::*; + +use helpers::EvalContextExt as HelperEvalContextExt; + +pub trait EvalContextExt<'tcx> { + fn ptr_op( + &self, + bin_op: mir::BinOp, + left: PrimVal, + left_ty: ty::Ty<'tcx>, + right: PrimVal, + right_ty: ty::Ty<'tcx>, + ) -> EvalResult<'tcx, Option<(PrimVal, bool)>>; + + fn ptr_int_arithmetic( + &self, + bin_op: mir::BinOp, + left: MemoryPointer, + right: i128, + signed: bool, + ) -> EvalResult<'tcx, (PrimVal, bool)>; +} + +impl<'a, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'tcx, super::Evaluator> { + fn ptr_op( + &self, + bin_op: mir::BinOp, + left: PrimVal, + left_ty: ty::Ty<'tcx>, + right: PrimVal, + right_ty: ty::Ty<'tcx>, + ) -> EvalResult<'tcx, Option<(PrimVal, bool)>> { + use rustc_miri::interpret::PrimValKind::*; + use rustc::mir::BinOp::*; + let usize = PrimValKind::from_uint_size(self.memory.pointer_size()); + let isize = PrimValKind::from_int_size(self.memory.pointer_size()); + let left_kind = self.ty_to_primval_kind(left_ty)?; + let right_kind = self.ty_to_primval_kind(right_ty)?; + match bin_op { + Offset if left_kind == Ptr && right_kind == usize => { + let pointee_ty = left_ty + .builtin_deref(true, ty::LvaluePreference::NoPreference) + .expect("Offset called on non-ptr type") + .ty; + let ptr = self.pointer_offset( + left.into(), + pointee_ty, + right.to_bytes()? as i64, + )?; + Ok(Some((ptr.into_inner_primval(), false))) + } + // These work on anything + Eq if left_kind == right_kind => { + let result = match (left, right) { + (PrimVal::Bytes(left), PrimVal::Bytes(right)) => left == right, + (PrimVal::Ptr(left), PrimVal::Ptr(right)) => left == right, + (PrimVal::Undef, _) | + (_, PrimVal::Undef) => return err!(ReadUndefBytes), + _ => false, + }; + Ok(Some((PrimVal::from_bool(result), false))) + } + Ne if left_kind == right_kind => { + let result = match (left, right) { + (PrimVal::Bytes(left), PrimVal::Bytes(right)) => left != right, + (PrimVal::Ptr(left), PrimVal::Ptr(right)) => left != right, + (PrimVal::Undef, _) | + (_, PrimVal::Undef) => return err!(ReadUndefBytes), + _ => true, + }; + Ok(Some((PrimVal::from_bool(result), false))) + } + // These need both pointers to be in the same allocation + Lt | Le | Gt | Ge | Sub + if left_kind == right_kind && + (left_kind == Ptr || left_kind == usize || left_kind == isize) && + left.is_ptr() && right.is_ptr() => { + let left = left.to_ptr()?; + let right = right.to_ptr()?; + if left.alloc_id == right.alloc_id { + let res = match bin_op { + Lt => left.offset < right.offset, + Le => left.offset <= right.offset, + Gt => left.offset > right.offset, + Ge => left.offset >= right.offset, + Sub => { + return self.binary_op( + Sub, + PrimVal::Bytes(left.offset as u128), + self.tcx.types.usize, + PrimVal::Bytes(right.offset as u128), + self.tcx.types.usize, + ).map(Some) + } + _ => bug!("We already established it has to be one of these operators."), + }; + Ok(Some((PrimVal::from_bool(res), false))) + } else { + // Both are pointers, but from different allocations. + err!(InvalidPointerMath) + } + } + // These work if one operand is a pointer, the other an integer + Add | BitAnd | Sub + if left_kind == right_kind && (left_kind == usize || left_kind == isize) && + left.is_ptr() && right.is_bytes() => { + // Cast to i128 is fine as we checked the kind to be ptr-sized + self.ptr_int_arithmetic( + bin_op, + left.to_ptr()?, + right.to_bytes()? as i128, + left_kind == isize, + ).map(Some) + } + Add | BitAnd + if left_kind == right_kind && (left_kind == usize || left_kind == isize) && + left.is_bytes() && right.is_ptr() => { + // This is a commutative operation, just swap the operands + self.ptr_int_arithmetic( + bin_op, + right.to_ptr()?, + left.to_bytes()? as i128, + left_kind == isize, + ).map(Some) + } + _ => Ok(None), + } + } + + fn ptr_int_arithmetic( + &self, + bin_op: mir::BinOp, + left: MemoryPointer, + right: i128, + signed: bool, + ) -> EvalResult<'tcx, (PrimVal, bool)> { + use rustc::mir::BinOp::*; + + fn map_to_primval((res, over): (MemoryPointer, bool)) -> (PrimVal, bool) { + (PrimVal::Ptr(res), over) + } + + Ok(match bin_op { + Sub => + // The only way this can overflow is by underflowing, so signdeness of the right operands does not matter + map_to_primval(left.overflowing_signed_offset(-right, self)), + Add if signed => + map_to_primval(left.overflowing_signed_offset(right, self)), + Add if !signed => + map_to_primval(left.overflowing_offset(right as u64, self)), + + BitAnd if !signed => { + let base_mask : u64 = !(self.memory.get(left.alloc_id)?.align - 1); + let right = right as u64; + if right & base_mask == base_mask { + // Case 1: The base address bits are all preserved, i.e., right is all-1 there + (PrimVal::Ptr(MemoryPointer::new(left.alloc_id, left.offset & right)), false) + } else if right & base_mask == 0 { + // Case 2: The base address bits are all taken away, i.e., right is all-0 there + (PrimVal::from_u128((left.offset & right) as u128), false) + } else { + return err!(ReadPointerAsBytes); + } + } + + _ => { + let msg = format!("unimplemented binary op on pointer {:?}: {:?}, {:?} ({})", bin_op, left, right, if signed { "signed" } else { "unsigned" }); + return err!(Unimplemented(msg)); + } + }) + } +} diff --git a/src/tools/miri/miri/tls.rs b/src/tools/miri/miri/tls.rs new file mode 100644 index 0000000000..e592478f6f --- /dev/null +++ b/src/tools/miri/miri/tls.rs @@ -0,0 +1,142 @@ +use rustc::{ty, mir}; + +use super::{TlsKey, TlsEntry, EvalResult, EvalErrorKind, Pointer, Memory, Evaluator, Lvalue, + StackPopCleanup, EvalContext}; + +pub trait MemoryExt<'tcx> { + fn create_tls_key(&mut self, dtor: Option>) -> TlsKey; + fn delete_tls_key(&mut self, key: TlsKey) -> EvalResult<'tcx>; + fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Pointer>; + fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx>; + fn fetch_tls_dtor( + &mut self, + key: Option, + ) -> EvalResult<'tcx, Option<(ty::Instance<'tcx>, Pointer, TlsKey)>>; +} + +pub trait EvalContextExt<'tcx> { + fn run_tls_dtors(&mut self) -> EvalResult<'tcx>; +} + +impl<'a, 'tcx: 'a> MemoryExt<'tcx> for Memory<'a, 'tcx, Evaluator> { + fn create_tls_key(&mut self, dtor: Option>) -> TlsKey { + let new_key = self.data.next_thread_local; + self.data.next_thread_local += 1; + self.data.thread_local.insert( + new_key, + TlsEntry { + data: Pointer::null(), + dtor, + }, + ); + trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor); + return new_key; + } + + fn delete_tls_key(&mut self, key: TlsKey) -> EvalResult<'tcx> { + return match self.data.thread_local.remove(&key) { + Some(_) => { + trace!("TLS key {} removed", key); + Ok(()) + } + None => err!(TlsOutOfBounds), + }; + } + + fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Pointer> { + return match self.data.thread_local.get(&key) { + Some(&TlsEntry { data, .. }) => { + trace!("TLS key {} loaded: {:?}", key, data); + Ok(data) + } + None => err!(TlsOutOfBounds), + }; + } + + fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx> { + return match self.data.thread_local.get_mut(&key) { + Some(&mut TlsEntry { ref mut data, .. }) => { + trace!("TLS key {} stored: {:?}", key, new_data); + *data = new_data; + Ok(()) + } + None => err!(TlsOutOfBounds), + }; + } + + /// Returns a dtor, its argument and its index, if one is supposed to run + /// + /// An optional destructor function may be associated with each key value. + /// At thread exit, if a key value has a non-NULL destructor pointer, + /// and the thread has a non-NULL value associated with that key, + /// the value of the key is set to NULL, and then the function pointed + /// to is called with the previously associated value as its sole argument. + /// The order of destructor calls is unspecified if more than one destructor + /// exists for a thread when it exits. + /// + /// If, after all the destructors have been called for all non-NULL values + /// with associated destructors, there are still some non-NULL values with + /// associated destructors, then the process is repeated. + /// If, after at least {PTHREAD_DESTRUCTOR_ITERATIONS} iterations of destructor + /// calls for outstanding non-NULL values, there are still some non-NULL values + /// with associated destructors, implementations may stop calling destructors, + /// or they may continue calling destructors until no non-NULL values with + /// associated destructors exist, even though this might result in an infinite loop. + fn fetch_tls_dtor( + &mut self, + key: Option, + ) -> EvalResult<'tcx, Option<(ty::Instance<'tcx>, Pointer, TlsKey)>> { + use std::collections::Bound::*; + let start = match key { + Some(key) => Excluded(key), + None => Unbounded, + }; + for (&key, &mut TlsEntry { ref mut data, dtor }) in + self.data.thread_local.range_mut((start, Unbounded)) + { + if !data.is_null()? { + if let Some(dtor) = dtor { + let ret = Some((dtor, *data, key)); + *data = Pointer::null(); + return Ok(ret); + } + } + } + return Ok(None); + } +} + +impl<'a, 'tcx: 'a> EvalContextExt<'tcx> for EvalContext<'a, 'tcx, Evaluator> { + fn run_tls_dtors(&mut self) -> EvalResult<'tcx> { + let mut dtor = self.memory.fetch_tls_dtor(None)?; + // FIXME: replace loop by some structure that works with stepping + while let Some((instance, ptr, key)) = dtor { + trace!("Running TLS dtor {:?} on {:?}", instance, ptr); + // TODO: Potentially, this has to support all the other possible instances? + // See eval_fn_call in interpret/terminator/mod.rs + let mir = self.load_mir(instance.def)?; + self.push_stack_frame( + instance, + mir.span, + mir, + Lvalue::undef(), + StackPopCleanup::None, + )?; + let arg_local = self.frame().mir.args_iter().next().ok_or( + EvalErrorKind::AbiViolation("TLS dtor does not take enough arguments.".to_owned()), + )?; + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + let ty = self.tcx.mk_mut_ptr(self.tcx.types.u8); + self.write_ptr(dest, ptr, ty)?; + + // step until out of stackframes + while self.step()? {} + + dtor = match self.memory.fetch_tls_dtor(Some(key))? { + dtor @ Some(_) => dtor, + None => self.memory.fetch_tls_dtor(None)?, + }; + } + Ok(()) + } +} diff --git a/src/tools/miri/rustc_tests/Cargo.lock b/src/tools/miri/rustc_tests/Cargo.lock new file mode 100644 index 0000000000..a1e273a96b --- /dev/null +++ b/src/tools/miri/rustc_tests/Cargo.lock @@ -0,0 +1,217 @@ +[root] +name = "rustc_tests" +version = "0.1.0" +dependencies = [ + "miri 0.1.0", +] + +[[package]] +name = "aho-corasick" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "backtrace" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "backtrace-sys" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "gcc 0.3.53 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "byteorder" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "dbghelp-sys" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "env_logger" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gcc" +version = "0.3.53" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lazy_static" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.30" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "log" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "log_settings" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memchr" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "miri" +version = "0.1.0" +dependencies = [ + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_miri 0.1.0", +] + +[[package]] +name = "regex" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-demangle" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc_miri" +version = "0.1.0" +dependencies = [ + "backtrace 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unreachable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "utf8-ranges" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699" +"checksum backtrace 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "99f2ce94e22b8e664d95c57fff45b98a966c2252b60691d0b7aeeccd88d70983" +"checksum backtrace-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "afccc5772ba333abccdf60d55200fa3406f8c59dcf54d5f7998c9107d3799c7c" +"checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d" +"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" +"checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850" +"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" +"checksum gcc 0.3.53 (registry+https://github.com/rust-lang/crates.io-index)" = "e8310f7e9c890398b0e80e301c4f474e9918d2b27fca8f48486ca775fa9ffc5a" +"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +"checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf" +"checksum libc 0.2.30 (registry+https://github.com/rust-lang/crates.io-index)" = "2370ca07ec338939e356443dac2296f581453c35fe1e3a3ed06023c49435f915" +"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" +"checksum log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3d382732ea0fbc09790c4899db3255bdea0fc78b54bf234bd18a63bb603915b6" +"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4" +"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" +"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" +"checksum rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "aee45432acc62f7b9a108cc054142dac51f979e69e71ddce7d6fc7adf29e817e" +"checksum thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1697c4b57aeeb7a536b647165a2825faddffb1d3bad386d507709bd51a90bb14" +"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" +"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122" +"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" +"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" diff --git a/src/tools/miri/rustc_tests/Cargo.toml b/src/tools/miri/rustc_tests/Cargo.toml new file mode 100644 index 0000000000..736f062976 --- /dev/null +++ b/src/tools/miri/rustc_tests/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "rustc_tests" +version = "0.1.0" +authors = ["Oliver Schneider "] + +[dependencies] +miri = { path = ".." } diff --git a/src/tools/miri/rustc_tests/src/main.rs b/src/tools/miri/rustc_tests/src/main.rs new file mode 100644 index 0000000000..819721c1cd --- /dev/null +++ b/src/tools/miri/rustc_tests/src/main.rs @@ -0,0 +1,292 @@ +#![feature(rustc_private, i128_type)] +extern crate miri; +extern crate getopts; +extern crate rustc; +extern crate rustc_driver; +extern crate rustc_errors; +extern crate syntax; + +use std::path::{PathBuf, Path}; +use std::io::Write; +use std::sync::{Mutex, Arc}; +use std::io; + + +use rustc::session::Session; +use rustc::middle::cstore::CrateStore; +use rustc_driver::{Compilation, CompilerCalls, RustcDefaultCalls}; +use rustc_driver::driver::{CompileState, CompileController}; +use rustc::session::config::{self, Input, ErrorOutputType}; +use rustc::hir::{self, itemlikevisit}; +use rustc::ty::TyCtxt; +use syntax::ast; + +struct MiriCompilerCalls { + default: RustcDefaultCalls, + /// whether we are building for the host + host_target: bool, +} + +impl<'a> CompilerCalls<'a> for MiriCompilerCalls { + fn early_callback( + &mut self, + matches: &getopts::Matches, + sopts: &config::Options, + cfg: &ast::CrateConfig, + descriptions: &rustc_errors::registry::Registry, + output: ErrorOutputType + ) -> Compilation { + self.default.early_callback(matches, sopts, cfg, descriptions, output) + } + fn no_input( + &mut self, + matches: &getopts::Matches, + sopts: &config::Options, + cfg: &ast::CrateConfig, + odir: &Option, + ofile: &Option, + descriptions: &rustc_errors::registry::Registry + ) -> Option<(Input, Option)> { + self.default.no_input(matches, sopts, cfg, odir, ofile, descriptions) + } + fn late_callback( + &mut self, + matches: &getopts::Matches, + sess: &Session, + cstore: &CrateStore, + input: &Input, + odir: &Option, + ofile: &Option + ) -> Compilation { + self.default.late_callback(matches, sess, cstore, input, odir, ofile) + } + fn build_controller(&mut self, sess: &Session, matches: &getopts::Matches) -> CompileController<'a> { + let mut control = self.default.build_controller(sess, matches); + control.after_hir_lowering.callback = Box::new(after_hir_lowering); + control.after_analysis.callback = Box::new(after_analysis); + if !self.host_target { + // only fully compile targets on the host + control.after_analysis.stop = Compilation::Stop; + } + control + } +} + +fn after_hir_lowering(state: &mut CompileState) { + let attr = (String::from("miri"), syntax::feature_gate::AttributeType::Whitelisted); + state.session.plugin_attributes.borrow_mut().push(attr); +} + +fn after_analysis<'a, 'tcx>(state: &mut CompileState<'a, 'tcx>) { + state.session.abort_if_errors(); + + let tcx = state.tcx.unwrap(); + let limits = Default::default(); + + if std::env::args().any(|arg| arg == "--test") { + struct Visitor<'a, 'tcx: 'a>(miri::ResourceLimits, TyCtxt<'a, 'tcx, 'tcx>, &'a CompileState<'a, 'tcx>); + impl<'a, 'tcx: 'a, 'hir> itemlikevisit::ItemLikeVisitor<'hir> for Visitor<'a, 'tcx> { + fn visit_item(&mut self, i: &'hir hir::Item) { + if let hir::Item_::ItemFn(_, _, _, _, _, body_id) = i.node { + if i.attrs.iter().any(|attr| attr.name().map_or(false, |n| n == "test")) { + let did = self.1.hir.body_owner_def_id(body_id); + println!("running test: {}", self.1.hir.def_path(did).to_string(self.1)); + miri::eval_main(self.1, did, None, self.0); + self.2.session.abort_if_errors(); + } + } + } + fn visit_trait_item(&mut self, _trait_item: &'hir hir::TraitItem) {} + fn visit_impl_item(&mut self, _impl_item: &'hir hir::ImplItem) {} + } + state.hir_crate.unwrap().visit_all_item_likes(&mut Visitor(limits, tcx, state)); + } else if let Some((entry_node_id, _)) = *state.session.entry_fn.borrow() { + let entry_def_id = tcx.hir.local_def_id(entry_node_id); + let start_wrapper = tcx.lang_items().start_fn().and_then(|start_fn| + if tcx.is_mir_available(start_fn) { Some(start_fn) } else { None }); + miri::eval_main(tcx, entry_def_id, start_wrapper, limits); + + state.session.abort_if_errors(); + } else { + println!("no main function found, assuming auxiliary build"); + } +} + +fn main() { + let path = option_env!("MIRI_RUSTC_TEST") + .map(String::from) + .unwrap_or_else(|| { + std::env::var("MIRI_RUSTC_TEST") + .expect("need to set MIRI_RUSTC_TEST to path of rustc tests") + }); + + let mut mir_not_found = Vec::new(); + let mut crate_not_found = Vec::new(); + let mut success = 0; + let mut failed = Vec::new(); + let mut c_abi_fns = Vec::new(); + let mut abi = Vec::new(); + let mut unsupported = Vec::new(); + let mut unimplemented_intrinsic = Vec::new(); + let mut limits = Vec::new(); + let mut files: Vec<_> = std::fs::read_dir(path).unwrap().collect(); + while let Some(file) = files.pop() { + let file = file.unwrap(); + let path = file.path(); + if file.metadata().unwrap().is_dir() { + if !path.to_str().unwrap().ends_with("auxiliary") { + // add subdirs recursively + files.extend(std::fs::read_dir(path).unwrap()); + } + continue; + } + if !file.metadata().unwrap().is_file() || !path.to_str().unwrap().ends_with(".rs") { + continue; + } + let stderr = std::io::stderr(); + write!(stderr.lock(), "test [miri-pass] {} ... ", path.display()).unwrap(); + let mut host_target = false; + let mut args: Vec = std::env::args().filter(|arg| { + if arg == "--miri_host_target" { + host_target = true; + false // remove the flag, rustc doesn't know it + } else { + true + } + }).collect(); + // file to process + args.push(path.display().to_string()); + + let sysroot_flag = String::from("--sysroot"); + if !args.contains(&sysroot_flag) { + args.push(sysroot_flag); + args.push(Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST").display().to_string()); + } + + args.push("-Zmir-opt-level=3".to_owned()); + // for auxilary builds in unit tests + args.push("-Zalways-encode-mir".to_owned()); + + // A threadsafe buffer for writing. + #[derive(Default, Clone)] + struct BufWriter(Arc>>); + + impl Write for BufWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0.lock().unwrap().write(buf) + } + fn flush(&mut self) -> io::Result<()> { + self.0.lock().unwrap().flush() + } + } + let buf = BufWriter::default(); + let output = buf.clone(); + let result = std::panic::catch_unwind(|| { + rustc_driver::run_compiler(&args, &mut MiriCompilerCalls { + default: RustcDefaultCalls, + host_target, + }, None, Some(Box::new(buf))); + }); + + match result { + Ok(()) => { + success += 1; + writeln!(stderr.lock(), "ok").unwrap() + }, + Err(_) => { + let output = output.0.lock().unwrap(); + let output_err = std::str::from_utf8(&output).unwrap(); + if let Some(text) = output_err.splitn(2, "no mir for `").nth(1) { + let end = text.find('`').unwrap(); + mir_not_found.push(text[..end].to_string()); + writeln!(stderr.lock(), "NO MIR FOR `{}`", &text[..end]).unwrap(); + } else if let Some(text) = output_err.splitn(2, "can't find crate for `").nth(1) { + let end = text.find('`').unwrap(); + crate_not_found.push(text[..end].to_string()); + writeln!(stderr.lock(), "CAN'T FIND CRATE FOR `{}`", &text[..end]).unwrap(); + } else { + for text in output_err.split("error: ").skip(1) { + let end = text.find('\n').unwrap_or(text.len()); + let c_abi = "can't call C ABI function: "; + let unimplemented_intrinsic_s = "unimplemented intrinsic: "; + let unsupported_s = "miri does not support "; + let abi_s = "can't handle function with "; + let limit_s = "reached the configured maximum "; + if text.starts_with(c_abi) { + c_abi_fns.push(text[c_abi.len()..end].to_string()); + } else if text.starts_with(unimplemented_intrinsic_s) { + unimplemented_intrinsic.push(text[unimplemented_intrinsic_s.len()..end].to_string()); + } else if text.starts_with(unsupported_s) { + unsupported.push(text[unsupported_s.len()..end].to_string()); + } else if text.starts_with(abi_s) { + abi.push(text[abi_s.len()..end].to_string()); + } else if text.starts_with(limit_s) { + limits.push(text[limit_s.len()..end].to_string()); + } else if text.find("aborting").is_none() { + failed.push(text[..end].to_string()); + } + } + writeln!(stderr.lock(), "stderr: \n {}", output_err).unwrap(); + } + } + } + } + let stderr = std::io::stderr(); + let mut stderr = stderr.lock(); + writeln!(stderr, "{} success, {} no mir, {} crate not found, {} failed, \ + {} C fn, {} ABI, {} unsupported, {} intrinsic", + success, mir_not_found.len(), crate_not_found.len(), failed.len(), + c_abi_fns.len(), abi.len(), unsupported.len(), unimplemented_intrinsic.len()).unwrap(); + writeln!(stderr, "# The \"other reasons\" errors").unwrap(); + writeln!(stderr, "(sorted, deduplicated)").unwrap(); + print_vec(&mut stderr, failed); + + writeln!(stderr, "# can't call C ABI function").unwrap(); + print_vec(&mut stderr, c_abi_fns); + + writeln!(stderr, "# unsupported ABI").unwrap(); + print_vec(&mut stderr, abi); + + writeln!(stderr, "# unsupported").unwrap(); + print_vec(&mut stderr, unsupported); + + writeln!(stderr, "# unimplemented intrinsics").unwrap(); + print_vec(&mut stderr, unimplemented_intrinsic); + + writeln!(stderr, "# mir not found").unwrap(); + print_vec(&mut stderr, mir_not_found); + + writeln!(stderr, "# crate not found").unwrap(); + print_vec(&mut stderr, crate_not_found); +} + +fn print_vec(stderr: &mut W, v: Vec) { + writeln!(stderr, "```").unwrap(); + for (n, s) in vec_to_hist(v).into_iter().rev() { + writeln!(stderr, "{:4} {}", n, s).unwrap(); + } + writeln!(stderr, "```").unwrap(); +} + +fn vec_to_hist(mut v: Vec) -> Vec<(usize, T)> { + v.sort(); + let mut v = v.into_iter(); + let mut result = Vec::new(); + let mut current = v.next(); + 'outer: while let Some(current_val) = current { + let mut n = 1; + for next in &mut v { + if next == current_val { + n += 1; + } else { + result.push((n, current_val)); + current = Some(next); + continue 'outer; + } + } + result.push((n, current_val)); + break; + } + result.sort(); + result +} diff --git a/src/tools/miri/src/librustc_mir/Cargo.toml b/src/tools/miri/src/librustc_mir/Cargo.toml new file mode 100644 index 0000000000..c72de828c8 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors = ["Scott Olson "] +description = "An experimental interpreter for Rust MIR." +license = "MIT/Apache-2.0" +name = "rustc_miri" +repository = "https://github.com/solson/miri" +version = "0.1.0" +workspace = "../.." + +[lib] +path = "lib.rs" + +[dependencies] +byteorder = { version = "1.1", features = ["i128"]} +log = "0.3.6" +log_settings = "0.1.1" +lazy_static = "0.2.8" +regex = "0.2.2" +backtrace = "0.3.3" diff --git a/src/tools/miri/src/librustc_mir/interpret/cast.rs b/src/tools/miri/src/librustc_mir/interpret/cast.rs new file mode 100644 index 0000000000..5ae7c9da31 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/cast.rs @@ -0,0 +1,122 @@ +use rustc::ty::{self, Ty}; +use syntax::ast::{FloatTy, IntTy, UintTy}; + +use super::{PrimVal, EvalContext, EvalResult, MemoryPointer, PointerArithmetic, Machine}; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub(super) fn cast_primval( + &self, + val: PrimVal, + src_ty: Ty<'tcx>, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, PrimVal> { + trace!("Casting {:?}: {:?} to {:?}", val, src_ty, dest_ty); + let src_kind = self.ty_to_primval_kind(src_ty)?; + + match val { + PrimVal::Undef => Ok(PrimVal::Undef), + PrimVal::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty), + val @ PrimVal::Bytes(_) => { + use super::PrimValKind::*; + match src_kind { + F32 => self.cast_from_float(val.to_f32()? as f64, dest_ty), + F64 => self.cast_from_float(val.to_f64()?, dest_ty), + + I8 | I16 | I32 | I64 | I128 => { + self.cast_from_signed_int(val.to_i128()?, dest_ty) + } + + Bool | Char | U8 | U16 | U32 | U64 | U128 | FnPtr | Ptr => { + self.cast_from_int(val.to_u128()?, dest_ty, false) + } + } + } + } + } + + fn cast_from_signed_int(&self, val: i128, ty: ty::Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + self.cast_from_int(val as u128, ty, val < 0) + } + + fn int_to_int(&self, v: i128, ty: IntTy) -> u128 { + match ty { + IntTy::I8 => v as i8 as u128, + IntTy::I16 => v as i16 as u128, + IntTy::I32 => v as i32 as u128, + IntTy::I64 => v as i64 as u128, + IntTy::I128 => v as u128, + IntTy::Is => { + let ty = self.tcx.sess.target.isize_ty; + self.int_to_int(v, ty) + } + } + } + fn int_to_uint(&self, v: u128, ty: UintTy) -> u128 { + match ty { + UintTy::U8 => v as u8 as u128, + UintTy::U16 => v as u16 as u128, + UintTy::U32 => v as u32 as u128, + UintTy::U64 => v as u64 as u128, + UintTy::U128 => v, + UintTy::Us => { + let ty = self.tcx.sess.target.usize_ty; + self.int_to_uint(v, ty) + } + } + } + + fn cast_from_int( + &self, + v: u128, + ty: ty::Ty<'tcx>, + negative: bool, + ) -> EvalResult<'tcx, PrimVal> { + trace!("cast_from_int: {}, {}, {}", v, ty, negative); + use rustc::ty::TypeVariants::*; + match ty.sty { + // Casts to bool are not permitted by rustc, no need to handle them here. + TyInt(ty) => Ok(PrimVal::Bytes(self.int_to_int(v as i128, ty))), + TyUint(ty) => Ok(PrimVal::Bytes(self.int_to_uint(v, ty))), + + TyFloat(FloatTy::F64) if negative => Ok(PrimVal::from_f64(v as i128 as f64)), + TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(v as f64)), + TyFloat(FloatTy::F32) if negative => Ok(PrimVal::from_f32(v as i128 as f32)), + TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(v as f32)), + + TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)), + TyChar => err!(InvalidChar(v)), + + // No alignment check needed for raw pointers. But we have to truncate to target ptr size. + TyRawPtr(_) => Ok(PrimVal::Bytes(self.memory.truncate_to_ptr(v).0 as u128)), + + _ => err!(Unimplemented(format!("int to {:?} cast", ty))), + } + } + + fn cast_from_float(&self, val: f64, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + use rustc::ty::TypeVariants::*; + match ty.sty { + // Casting negative floats to unsigned integers yields zero. + TyUint(_) if val < 0.0 => self.cast_from_int(0, ty, false), + TyInt(_) if val < 0.0 => self.cast_from_int(val as i128 as u128, ty, true), + + TyInt(_) | ty::TyUint(_) => self.cast_from_int(val as u128, ty, false), + + TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(val)), + TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(val as f32)), + _ => err!(Unimplemented(format!("float to {:?} cast", ty))), + } + } + + fn cast_from_ptr(&self, ptr: MemoryPointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + use rustc::ty::TypeVariants::*; + match ty.sty { + // Casting to a reference or fn pointer is not permitted by rustc, no need to support it here. + TyRawPtr(_) | + TyInt(IntTy::Is) | + TyUint(UintTy::Us) => Ok(PrimVal::Ptr(ptr)), + TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes), + _ => err!(Unimplemented(format!("ptr to {:?} cast", ty))), + } + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/const_eval.rs b/src/tools/miri/src/librustc_mir/interpret/const_eval.rs new file mode 100644 index 0000000000..7fa28dccba --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/const_eval.rs @@ -0,0 +1,258 @@ +use rustc::traits::Reveal; +use rustc::ty::{self, TyCtxt, Ty, Instance, layout}; +use rustc::mir; + +use syntax::ast::Mutability; +use syntax::codemap::Span; + +use super::{EvalResult, EvalError, EvalErrorKind, GlobalId, Lvalue, Value, PrimVal, EvalContext, + StackPopCleanup, PtrAndAlign, MemoryKind, ValTy}; + +use rustc_const_math::ConstInt; + +use std::fmt; +use std::error::Error; + +pub fn eval_body_as_primval<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + instance: Instance<'tcx>, +) -> EvalResult<'tcx, (PrimVal, Ty<'tcx>)> { + let limits = super::ResourceLimits::default(); + let mut ecx = EvalContext::::new(tcx, limits, (), ()); + let cid = GlobalId { + instance, + promoted: None, + }; + if ecx.tcx.has_attr(instance.def_id(), "linkage") { + return Err(ConstEvalError::NotConst("extern global".to_string()).into()); + } + + let mir = ecx.load_mir(instance.def)?; + if !ecx.globals.contains_key(&cid) { + let size = ecx.type_size_with_substs(mir.return_ty, instance.substs)? + .expect("unsized global"); + let align = ecx.type_align_with_substs(mir.return_ty, instance.substs)?; + let ptr = ecx.memory.allocate( + size, + align, + MemoryKind::UninitializedStatic, + )?; + let aligned = !ecx.is_packed(mir.return_ty)?; + ecx.globals.insert( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned, + }, + ); + let mutable = !mir.return_ty.is_freeze( + ecx.tcx, + ty::ParamEnv::empty(Reveal::All), + mir.span, + ); + let mutability = if mutable { + Mutability::Mutable + } else { + Mutability::Immutable + }; + let cleanup = StackPopCleanup::MarkStatic(mutability); + let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id())); + trace!("const_eval: pushing stack frame for global: {}", name); + ecx.push_stack_frame( + instance, + mir.span, + mir, + Lvalue::from_ptr(ptr), + cleanup, + )?; + + while ecx.step()? {} + } + let value = Value::ByRef(*ecx.globals.get(&cid).expect("global not cached")); + let valty = ValTy { + value, + ty: mir.return_ty, + }; + Ok((ecx.value_to_primval(valty)?, mir.return_ty)) +} + +pub fn eval_body_as_integer<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + instance: Instance<'tcx>, +) -> EvalResult<'tcx, ConstInt> { + let (prim, ty) = eval_body_as_primval(tcx, instance)?; + let prim = prim.to_bytes()?; + use syntax::ast::{IntTy, UintTy}; + use rustc::ty::TypeVariants::*; + use rustc_const_math::{ConstIsize, ConstUsize}; + Ok(match ty.sty { + TyInt(IntTy::I8) => ConstInt::I8(prim as i128 as i8), + TyInt(IntTy::I16) => ConstInt::I16(prim as i128 as i16), + TyInt(IntTy::I32) => ConstInt::I32(prim as i128 as i32), + TyInt(IntTy::I64) => ConstInt::I64(prim as i128 as i64), + TyInt(IntTy::I128) => ConstInt::I128(prim as i128), + TyInt(IntTy::Is) => ConstInt::Isize( + ConstIsize::new(prim as i128 as i64, tcx.sess.target.isize_ty) + .expect("miri should already have errored"), + ), + TyUint(UintTy::U8) => ConstInt::U8(prim as u8), + TyUint(UintTy::U16) => ConstInt::U16(prim as u16), + TyUint(UintTy::U32) => ConstInt::U32(prim as u32), + TyUint(UintTy::U64) => ConstInt::U64(prim as u64), + TyUint(UintTy::U128) => ConstInt::U128(prim), + TyUint(UintTy::Us) => ConstInt::Usize( + ConstUsize::new(prim as u64, tcx.sess.target.usize_ty) + .expect("miri should already have errored"), + ), + _ => { + return Err( + ConstEvalError::NeedsRfc( + "evaluating anything other than isize/usize during typeck".to_string(), + ).into(), + ) + } + }) +} + +struct CompileTimeFunctionEvaluator; + +impl<'tcx> Into> for ConstEvalError { + fn into(self) -> EvalError<'tcx> { + EvalErrorKind::MachineError(Box::new(self)).into() + } +} + +#[derive(Clone, Debug)] +enum ConstEvalError { + NeedsRfc(String), + NotConst(String), +} + +impl fmt::Display for ConstEvalError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::ConstEvalError::*; + match *self { + NeedsRfc(ref msg) => { + write!( + f, + "\"{}\" needs an rfc before being allowed inside constants", + msg + ) + } + NotConst(ref msg) => write!(f, "Cannot evaluate within constants: \"{}\"", msg), + } + } +} + +impl Error for ConstEvalError { + fn description(&self) -> &str { + use self::ConstEvalError::*; + match *self { + NeedsRfc(_) => "this feature needs an rfc before being allowed inside constants", + NotConst(_) => "this feature is not compatible with constant evaluation", + } + } + + fn cause(&self) -> Option<&Error> { + None + } +} + +impl<'tcx> super::Machine<'tcx> for CompileTimeFunctionEvaluator { + type Data = (); + type MemoryData = (); + type MemoryKinds = !; + fn eval_fn_call<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue, mir::BasicBlock)>, + _args: &[ValTy<'tcx>], + span: Span, + _sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool> { + if !ecx.tcx.is_const_fn(instance.def_id()) { + return Err( + ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(), + ); + } + let mir = match ecx.load_mir(instance.def) { + Ok(mir) => mir, + Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => { + // some simple things like `malloc` might get accepted in the future + return Err( + ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path)) + .into(), + ); + } + Err(other) => return Err(other), + }; + let (return_lvalue, return_to_block) = match destination { + Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)), + None => (Lvalue::undef(), StackPopCleanup::None), + }; + + ecx.push_stack_frame( + instance, + span, + mir, + return_lvalue, + return_to_block, + )?; + + Ok(false) + } + + fn call_intrinsic<'a>( + _ecx: &mut EvalContext<'a, 'tcx, Self>, + _instance: ty::Instance<'tcx>, + _args: &[ValTy<'tcx>], + _dest: Lvalue, + _dest_ty: Ty<'tcx>, + _dest_layout: &'tcx layout::Layout, + _target: mir::BasicBlock, + ) -> EvalResult<'tcx> { + Err( + ConstEvalError::NeedsRfc("calling intrinsics".to_string()).into(), + ) + } + + fn try_ptr_op<'a>( + _ecx: &EvalContext<'a, 'tcx, Self>, + _bin_op: mir::BinOp, + left: PrimVal, + _left_ty: Ty<'tcx>, + right: PrimVal, + _right_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Option<(PrimVal, bool)>> { + if left.is_bytes() && right.is_bytes() { + Ok(None) + } else { + Err( + ConstEvalError::NeedsRfc("Pointer arithmetic or comparison".to_string()).into(), + ) + } + } + + fn mark_static_initialized(m: !) -> EvalResult<'tcx> { + m + } + + fn box_alloc<'a>( + _ecx: &mut EvalContext<'a, 'tcx, Self>, + _ty: ty::Ty<'tcx>, + ) -> EvalResult<'tcx, PrimVal> { + Err( + ConstEvalError::NeedsRfc("Heap allocations via `box` keyword".to_string()).into(), + ) + } + + fn global_item_with_linkage<'a>( + _ecx: &mut EvalContext<'a, 'tcx, Self>, + _instance: ty::Instance<'tcx>, + _mutability: Mutability, + ) -> EvalResult<'tcx> { + Err( + ConstEvalError::NotConst("statics with `linkage` attribute".to_string()).into(), + ) + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/error.rs b/src/tools/miri/src/librustc_mir/interpret/error.rs new file mode 100644 index 0000000000..96911c10cc --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/error.rs @@ -0,0 +1,313 @@ +use std::error::Error; +use std::{fmt, env}; + +use rustc::mir; +use rustc::ty::{FnSig, Ty, layout}; + +use super::{ + MemoryPointer, Lock, AccessKind +}; + +use rustc_const_math::ConstMathErr; +use syntax::codemap::Span; +use backtrace::Backtrace; + +#[derive(Debug)] +pub struct EvalError<'tcx> { + pub kind: EvalErrorKind<'tcx>, + pub backtrace: Option, +} + +impl<'tcx> From> for EvalError<'tcx> { + fn from(kind: EvalErrorKind<'tcx>) -> Self { + let backtrace = match env::var("RUST_BACKTRACE") { + Ok(ref val) if !val.is_empty() => Some(Backtrace::new_unresolved()), + _ => None + }; + EvalError { + kind, + backtrace, + } + } +} + +#[derive(Debug)] +pub enum EvalErrorKind<'tcx> { + /// This variant is used by machines to signal their own errors that do not + /// match an existing variant + MachineError(Box), + FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>), + NoMirFor(String), + UnterminatedCString(MemoryPointer), + DanglingPointerDeref, + DoubleFree, + InvalidMemoryAccess, + InvalidFunctionPointer, + InvalidBool, + InvalidDiscriminant, + PointerOutOfBounds { + ptr: MemoryPointer, + access: bool, + allocation_size: u64, + }, + InvalidNullPointerUsage, + ReadPointerAsBytes, + ReadBytesAsPointer, + InvalidPointerMath, + ReadUndefBytes, + DeadLocal, + InvalidBoolOp(mir::BinOp), + Unimplemented(String), + DerefFunctionPointer, + ExecuteMemory, + ArrayIndexOutOfBounds(Span, u64, u64), + Math(Span, ConstMathErr), + Intrinsic(String), + OverflowingMath, + InvalidChar(u128), + OutOfMemory { + allocation_size: u64, + memory_size: u64, + memory_usage: u64, + }, + ExecutionTimeLimitReached, + StackFrameLimitReached, + OutOfTls, + TlsOutOfBounds, + AbiViolation(String), + AlignmentCheckFailed { + required: u64, + has: u64, + }, + MemoryLockViolation { + ptr: MemoryPointer, + len: u64, + frame: usize, + access: AccessKind, + lock: Lock, + }, + MemoryAcquireConflict { + ptr: MemoryPointer, + len: u64, + kind: AccessKind, + lock: Lock, + }, + InvalidMemoryLockRelease { + ptr: MemoryPointer, + len: u64, + frame: usize, + lock: Lock, + }, + DeallocatedLockedMemory { + ptr: MemoryPointer, + lock: Lock, + }, + ValidationFailure(String), + CalledClosureAsFunction, + VtableForArgumentlessMethod, + ModifiedConstantMemory, + AssumptionNotHeld, + InlineAsm, + TypeNotPrimitive(Ty<'tcx>), + ReallocatedWrongMemoryKind(String, String), + DeallocatedWrongMemoryKind(String, String), + ReallocateNonBasePtr, + DeallocateNonBasePtr, + IncorrectAllocationInformation, + Layout(layout::LayoutError<'tcx>), + HeapAllocZeroBytes, + HeapAllocNonPowerOfTwoAlignment(u64), + Unreachable, + Panic, + ReadFromReturnPointer, + PathNotFound(Vec), +} + +pub type EvalResult<'tcx, T = ()> = Result>; + +impl<'tcx> Error for EvalError<'tcx> { + fn description(&self) -> &str { + use self::EvalErrorKind::*; + match self.kind { + MachineError(ref inner) => inner.description(), + FunctionPointerTyMismatch(..) => + "tried to call a function through a function pointer of a different type", + InvalidMemoryAccess => + "tried to access memory through an invalid pointer", + DanglingPointerDeref => + "dangling pointer was dereferenced", + DoubleFree => + "tried to deallocate dangling pointer", + InvalidFunctionPointer => + "tried to use a function pointer after offsetting it", + InvalidBool => + "invalid boolean value read", + InvalidDiscriminant => + "invalid enum discriminant value read", + PointerOutOfBounds { .. } => + "pointer offset outside bounds of allocation", + InvalidNullPointerUsage => + "invalid use of NULL pointer", + MemoryLockViolation { .. } => + "memory access conflicts with lock", + MemoryAcquireConflict { .. } => + "new memory lock conflicts with existing lock", + ValidationFailure(..) => + "type validation failed", + InvalidMemoryLockRelease { .. } => + "invalid attempt to release write lock", + DeallocatedLockedMemory { .. } => + "tried to deallocate memory in conflict with a lock", + ReadPointerAsBytes => + "a raw memory access tried to access part of a pointer value as raw bytes", + ReadBytesAsPointer => + "a memory access tried to interpret some bytes as a pointer", + InvalidPointerMath => + "attempted to do invalid arithmetic on pointers that would leak base addresses, e.g. comparing pointers into different allocations", + ReadUndefBytes => + "attempted to read undefined bytes", + DeadLocal => + "tried to access a dead local variable", + InvalidBoolOp(_) => + "invalid boolean operation", + Unimplemented(ref msg) => msg, + DerefFunctionPointer => + "tried to dereference a function pointer", + ExecuteMemory => + "tried to treat a memory pointer as a function pointer", + ArrayIndexOutOfBounds(..) => + "array index out of bounds", + Math(..) => + "mathematical operation failed", + Intrinsic(..) => + "intrinsic failed", + OverflowingMath => + "attempted to do overflowing math", + NoMirFor(..) => + "mir not found", + InvalidChar(..) => + "tried to interpret an invalid 32-bit value as a char", + OutOfMemory{..} => + "could not allocate more memory", + ExecutionTimeLimitReached => + "reached the configured maximum execution time", + StackFrameLimitReached => + "reached the configured maximum number of stack frames", + OutOfTls => + "reached the maximum number of representable TLS keys", + TlsOutOfBounds => + "accessed an invalid (unallocated) TLS key", + AbiViolation(ref msg) => msg, + AlignmentCheckFailed{..} => + "tried to execute a misaligned read or write", + CalledClosureAsFunction => + "tried to call a closure through a function pointer", + VtableForArgumentlessMethod => + "tried to call a vtable function without arguments", + ModifiedConstantMemory => + "tried to modify constant memory", + AssumptionNotHeld => + "`assume` argument was false", + InlineAsm => + "miri does not support inline assembly", + TypeNotPrimitive(_) => + "expected primitive type, got nonprimitive", + ReallocatedWrongMemoryKind(_, _) => + "tried to reallocate memory from one kind to another", + DeallocatedWrongMemoryKind(_, _) => + "tried to deallocate memory of the wrong kind", + ReallocateNonBasePtr => + "tried to reallocate with a pointer not to the beginning of an existing object", + DeallocateNonBasePtr => + "tried to deallocate with a pointer not to the beginning of an existing object", + IncorrectAllocationInformation => + "tried to deallocate or reallocate using incorrect alignment or size", + Layout(_) => + "rustc layout computation failed", + UnterminatedCString(_) => + "attempted to get length of a null terminated string, but no null found before end of allocation", + HeapAllocZeroBytes => + "tried to re-, de- or allocate zero bytes on the heap", + HeapAllocNonPowerOfTwoAlignment(_) => + "tried to re-, de-, or allocate heap memory with alignment that is not a power of two", + Unreachable => + "entered unreachable code", + Panic => + "the evaluated program panicked", + ReadFromReturnPointer => + "tried to read from the return pointer", + EvalErrorKind::PathNotFound(_) => + "a path could not be resolved, maybe the crate is not loaded", + } + } + + fn cause(&self) -> Option<&Error> { + use self::EvalErrorKind::*; + match self.kind { + MachineError(ref inner) => Some(&**inner), + _ => None, + } + } +} + +impl<'tcx> fmt::Display for EvalError<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::EvalErrorKind::*; + match self.kind { + PointerOutOfBounds { ptr, access, allocation_size } => { + write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}", + if access { "memory access" } else { "pointer computed" }, + ptr.offset, ptr.alloc_id, allocation_size) + }, + MemoryLockViolation { ptr, len, frame, access, ref lock } => { + write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}", + access, frame, ptr, len, lock) + } + MemoryAcquireConflict { ptr, len, kind, ref lock } => { + write!(f, "new {:?} lock at {:?}, size {}, is in conflict with lock {:?}", + kind, ptr, len, lock) + } + InvalidMemoryLockRelease { ptr, len, frame, ref lock } => { + write!(f, "frame {} tried to release memory write lock at {:?}, size {}, but cannot release lock {:?}", + frame, ptr, len, lock) + } + DeallocatedLockedMemory { ptr, ref lock } => { + write!(f, "tried to deallocate memory at {:?} in conflict with lock {:?}", + ptr, lock) + } + ValidationFailure(ref err) => { + write!(f, "type validation failed: {}", err) + } + NoMirFor(ref func) => write!(f, "no mir for `{}`", func), + FunctionPointerTyMismatch(sig, got) => + write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got), + ArrayIndexOutOfBounds(span, len, index) => + write!(f, "index out of bounds: the len is {} but the index is {} at {:?}", len, index, span), + ReallocatedWrongMemoryKind(ref old, ref new) => + write!(f, "tried to reallocate memory from {} to {}", old, new), + DeallocatedWrongMemoryKind(ref old, ref new) => + write!(f, "tried to deallocate {} memory but gave {} as the kind", old, new), + Math(span, ref err) => + write!(f, "{:?} at {:?}", err, span), + Intrinsic(ref err) => + write!(f, "{}", err), + InvalidChar(c) => + write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c), + OutOfMemory { allocation_size, memory_size, memory_usage } => + write!(f, "tried to allocate {} more bytes, but only {} bytes are free of the {} byte memory", + allocation_size, memory_size - memory_usage, memory_size), + AlignmentCheckFailed { required, has } => + write!(f, "tried to access memory with alignment {}, but alignment {} is required", + has, required), + TypeNotPrimitive(ty) => + write!(f, "expected primitive type, got {}", ty), + Layout(ref err) => + write!(f, "rustc layout computation failed: {:?}", err), + PathNotFound(ref path) => + write!(f, "Cannot find path {:?}", path), + MachineError(ref inner) => + write!(f, "machine error: {}", inner), + _ => write!(f, "{}", self.description()), + } + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/eval_context.rs b/src/tools/miri/src/librustc_mir/interpret/eval_context.rs new file mode 100644 index 0000000000..8fb63b3cb2 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/eval_context.rs @@ -0,0 +1,2539 @@ +use std::collections::{HashMap, HashSet}; +use std::fmt::Write; + +use rustc::hir::def_id::DefId; +use rustc::hir::map::definitions::DefPathData; +use rustc::middle::const_val::ConstVal; +use rustc::middle::region; +use rustc::mir; +use rustc::traits::Reveal; +use rustc::ty::layout::{self, Layout, Size, Align, HasDataLayout}; +use rustc::ty::subst::{Subst, Substs, Kind}; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc_data_structures::indexed_vec::Idx; +use syntax::codemap::{self, DUMMY_SP}; +use syntax::ast::Mutability; +use syntax::abi::Abi; + +use super::{EvalError, EvalResult, EvalErrorKind, GlobalId, Lvalue, LvalueExtra, Memory, + MemoryPointer, HasMemory, MemoryKind, operator, PrimVal, PrimValKind, Value, Pointer, + ValidationQuery, Machine}; + +pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> { + /// Stores data required by the `Machine` + pub machine_data: M::Data, + + /// The results of the type checker, from rustc. + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + + /// The virtual memory system. + pub memory: Memory<'a, 'tcx, M>, + + /// Lvalues that were suspended by the validation subsystem, and will be recovered later + pub(crate) suspended: HashMap>>, + + /// Precomputed statics, constants and promoteds. + pub globals: HashMap, PtrAndAlign>, + + /// The virtual call stack. + pub(crate) stack: Vec>, + + /// The maximum number of stack frames allowed + pub(crate) stack_limit: usize, + + /// The maximum number of operations that may be executed. + /// This prevents infinite loops and huge computations from freezing up const eval. + /// Remove once halting problem is solved. + pub(crate) steps_remaining: u64, +} + +/// A stack frame. +pub struct Frame<'tcx> { + //////////////////////////////////////////////////////////////////////////////// + // Function and callsite information + //////////////////////////////////////////////////////////////////////////////// + /// The MIR for the function called on this frame. + pub mir: &'tcx mir::Mir<'tcx>, + + /// The def_id and substs of the current function + pub instance: ty::Instance<'tcx>, + + /// The span of the call site. + pub span: codemap::Span, + + //////////////////////////////////////////////////////////////////////////////// + // Return lvalue and locals + //////////////////////////////////////////////////////////////////////////////// + /// The block to return to when returning from the current stack frame + pub return_to_block: StackPopCleanup, + + /// The location where the result of the current stack frame should be written to. + pub return_lvalue: Lvalue, + + /// The list of locals for this stack frame, stored in order as + /// `[arguments..., variables..., temporaries...]`. The locals are stored as `Option`s. + /// `None` represents a local that is currently dead, while a live local + /// can either directly contain `PrimVal` or refer to some part of an `Allocation`. + /// + /// Before being initialized, arguments are `Value::ByVal(PrimVal::Undef)` and other locals are `None`. + pub locals: Vec>, + + //////////////////////////////////////////////////////////////////////////////// + // Current position within the function + //////////////////////////////////////////////////////////////////////////////// + /// The block that is currently executed (or will be executed after the above call stacks + /// return). + pub block: mir::BasicBlock, + + /// The index of the currently evaluated statment. + pub stmt: usize, +} + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub enum StackPopCleanup { + /// The stackframe existed to compute the initial value of a static/constant, make sure it + /// isn't modifyable afterwards in case of constants. + /// In case of `static mut`, mark the memory to ensure it's never marked as immutable through + /// references or deallocated + MarkStatic(Mutability), + /// A regular stackframe added due to a function call will need to get forwarded to the next + /// block + Goto(mir::BasicBlock), + /// The main function and diverging functions have nowhere to return to + None, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct DynamicLifetime { + pub frame: usize, + pub region: Option, // "None" indicates "until the function ends" +} + +#[derive(Copy, Clone, Debug)] +pub struct ResourceLimits { + pub memory_size: u64, + pub step_limit: u64, + pub stack_limit: usize, +} + +impl Default for ResourceLimits { + fn default() -> Self { + ResourceLimits { + memory_size: 100 * 1024 * 1024, // 100 MB + step_limit: 1_000_000, + stack_limit: 100, + } + } +} + +#[derive(Copy, Clone, Debug)] +pub struct TyAndPacked<'tcx> { + pub ty: Ty<'tcx>, + pub packed: bool, +} + +#[derive(Copy, Clone, Debug)] +pub struct ValTy<'tcx> { + pub value: Value, + pub ty: Ty<'tcx>, +} + +impl<'tcx> ::std::ops::Deref for ValTy<'tcx> { + type Target = Value; + fn deref(&self) -> &Value { + &self.value + } +} + +#[derive(Copy, Clone, Debug)] +pub struct PtrAndAlign { + pub ptr: Pointer, + /// Remember whether this lvalue is *supposed* to be aligned. + pub aligned: bool, +} + +impl PtrAndAlign { + pub fn to_ptr<'tcx>(self) -> EvalResult<'tcx, MemoryPointer> { + self.ptr.to_ptr() + } + pub fn offset<'tcx, C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { + Ok(PtrAndAlign { + ptr: self.ptr.offset(i, cx)?, + aligned: self.aligned, + }) + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub fn new( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + limits: ResourceLimits, + machine_data: M::Data, + memory_data: M::MemoryData, + ) -> Self { + EvalContext { + machine_data, + tcx, + memory: Memory::new(&tcx.data_layout, limits.memory_size, memory_data), + suspended: HashMap::new(), + globals: HashMap::new(), + stack: Vec::new(), + stack_limit: limits.stack_limit, + steps_remaining: limits.step_limit, + } + } + + pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> { + let substs = self.substs(); + self.alloc_ptr_with_substs(ty, substs) + } + + pub fn alloc_ptr_with_substs( + &mut self, + ty: Ty<'tcx>, + substs: &'tcx Substs<'tcx>, + ) -> EvalResult<'tcx, MemoryPointer> { + let size = self.type_size_with_substs(ty, substs)?.expect( + "cannot alloc memory for unsized type", + ); + let align = self.type_align_with_substs(ty, substs)?; + self.memory.allocate(size, align, MemoryKind::Stack) + } + + pub fn memory(&self) -> &Memory<'a, 'tcx, M> { + &self.memory + } + + pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> { + &mut self.memory + } + + pub fn stack(&self) -> &[Frame<'tcx>] { + &self.stack + } + + #[inline] + pub fn cur_frame(&self) -> usize { + assert!(self.stack.len() > 0); + self.stack.len() - 1 + } + + pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { + let ptr = self.memory.allocate_cached(s.as_bytes())?; + Ok(Value::ByValPair( + PrimVal::Ptr(ptr), + PrimVal::from_u128(s.len() as u128), + )) + } + + pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> { + use rustc::middle::const_val::ConstVal::*; + + let primval = match *const_val { + Integral(const_int) => PrimVal::Bytes(const_int.to_u128_unchecked()), + + Float(val) => PrimVal::Bytes(val.bits), + + Bool(b) => PrimVal::from_bool(b), + Char(c) => PrimVal::from_char(c), + + Str(ref s) => return self.str_to_value(s), + + ByteStr(ref bs) => { + let ptr = self.memory.allocate_cached(bs.data)?; + PrimVal::Ptr(ptr) + } + + Unevaluated(def_id, substs) => { + let instance = self.resolve_associated_const(def_id, substs); + let cid = GlobalId { + instance, + promoted: None, + }; + return Ok(Value::ByRef(*self.globals.get(&cid).expect("static/const not cached"))); + } + + Aggregate(..) | + Variant(_) => bug!("should not have aggregate or variant constants in MIR"), + // function items are zero sized and thus have no readable value + Function(..) => PrimVal::Undef, + }; + + Ok(Value::ByVal(primval)) + } + + pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + // generics are weird, don't run this function on a generic + assert!(!ty.needs_subst()); + ty.is_sized(self.tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP) + } + + pub fn load_mir( + &self, + instance: ty::InstanceDef<'tcx>, + ) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> { + trace!("load mir {:?}", instance); + match instance { + ty::InstanceDef::Item(def_id) => { + self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| { + EvalErrorKind::NoMirFor(self.tcx.item_path_str(def_id)).into() + }) + } + _ => Ok(self.tcx.instance_mir(instance)), + } + } + + pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + // miri doesn't care about lifetimes, and will choke on some crazy ones + // let's simply get rid of them + let without_lifetimes = self.tcx.erase_regions(&ty); + let substituted = without_lifetimes.subst(self.tcx, substs); + let substituted = self.tcx.normalize_associated_type(&substituted); + substituted + } + + /// Return the size and aligment of the value at the given type. + /// Note that the value does not matter if the type is sized. For unsized types, + /// the value has to be a fat pointer, and we only care about the "extra" data in it. + pub fn size_and_align_of_dst( + &mut self, + ty: ty::Ty<'tcx>, + value: Value, + ) -> EvalResult<'tcx, (u64, u64)> { + if let Some(size) = self.type_size(ty)? { + Ok((size as u64, self.type_align(ty)? as u64)) + } else { + match ty.sty { + ty::TyAdt(..) | ty::TyTuple(..) => { + // First get the size of all statically known fields. + // Don't use type_of::sizing_type_of because that expects t to be sized, + // and it also rounds up to alignment, which we want to avoid, + // as the unsized field's alignment could be smaller. + assert!(!ty.is_simd()); + let layout = self.type_layout(ty)?; + debug!("DST {} layout: {:?}", ty, layout); + + let (sized_size, sized_align) = match *layout { + ty::layout::Layout::Univariant { ref variant, .. } => { + ( + variant.offsets.last().map_or(0, |o| o.bytes()), + variant.align, + ) + } + _ => { + bug!( + "size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}", + ty, + layout + ); + } + }; + debug!( + "DST {} statically sized prefix size: {} align: {:?}", + ty, + sized_size, + sized_align + ); + + // Recurse to get the size of the dynamically sized field (must be + // the last field). + let (unsized_size, unsized_align) = match ty.sty { + ty::TyAdt(def, substs) => { + let last_field = def.struct_variant().fields.last().unwrap(); + let field_ty = self.field_ty(substs, last_field); + self.size_and_align_of_dst(field_ty, value)? + } + ty::TyTuple(ref types, _) => { + let field_ty = types.last().unwrap(); + let field_ty = self.tcx.normalize_associated_type(field_ty); + self.size_and_align_of_dst(field_ty, value)? + } + _ => bug!("We already checked that we know this type"), + }; + + // FIXME (#26403, #27023): We should be adding padding + // to `sized_size` (to accommodate the `unsized_align` + // required of the unsized field that follows) before + // summing it with `sized_size`. (Note that since #26403 + // is unfixed, we do not yet add the necessary padding + // here. But this is where the add would go.) + + // Return the sum of sizes and max of aligns. + let size = sized_size + unsized_size; + + // Choose max of two known alignments (combined value must + // be aligned according to more restrictive of the two). + let align = + sized_align.max(Align::from_bytes(unsized_align, unsized_align).unwrap()); + + // Issue #27023: must add any necessary padding to `size` + // (to make it a multiple of `align`) before returning it. + // + // Namely, the returned size should be, in C notation: + // + // `size + ((size & (align-1)) ? align : 0)` + // + // emulated via the semi-standard fast bit trick: + // + // `(size + (align-1)) & -align` + + let size = Size::from_bytes(size).abi_align(align).bytes(); + Ok((size, align.abi())) + } + ty::TyDynamic(..) => { + let (_, vtable) = value.into_ptr_vtable_pair(&mut self.memory)?; + // the second entry in the vtable is the dynamic size of the object. + self.read_size_and_align_from_vtable(vtable) + } + + ty::TySlice(_) | ty::TyStr => { + let elem_ty = ty.sequence_element_type(self.tcx); + let elem_size = self.type_size(elem_ty)?.expect( + "slice element must be sized", + ) as u64; + let (_, len) = value.into_slice(&mut self.memory)?; + let align = self.type_align(elem_ty)?; + Ok((len * elem_size, align as u64)) + } + + _ => bug!("size_of_val::<{:?}>", ty), + } + } + } + + /// Returns the normalized type of a struct field + fn field_ty(&self, param_substs: &Substs<'tcx>, f: &ty::FieldDef) -> ty::Ty<'tcx> { + self.tcx.normalize_associated_type( + &f.ty(self.tcx, param_substs), + ) + } + + pub fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { + self.type_size_with_substs(ty, self.substs()) + } + + pub fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> { + self.type_align_with_substs(ty, self.substs()) + } + + pub fn type_size_with_substs( + &self, + ty: Ty<'tcx>, + substs: &'tcx Substs<'tcx>, + ) -> EvalResult<'tcx, Option> { + let layout = self.type_layout_with_substs(ty, substs)?; + if layout.is_unsized() { + Ok(None) + } else { + Ok(Some(layout.size(&self.tcx.data_layout).bytes())) + } + } + + pub fn type_align_with_substs( + &self, + ty: Ty<'tcx>, + substs: &'tcx Substs<'tcx>, + ) -> EvalResult<'tcx, u64> { + self.type_layout_with_substs(ty, substs).map(|layout| { + layout.align(&self.tcx.data_layout).abi() + }) + } + + pub fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, &'tcx Layout> { + self.type_layout_with_substs(ty, self.substs()) + } + + fn type_layout_with_substs( + &self, + ty: Ty<'tcx>, + substs: &'tcx Substs<'tcx>, + ) -> EvalResult<'tcx, &'tcx Layout> { + // TODO(solson): Is this inefficient? Needs investigation. + let ty = self.monomorphize(ty, substs); + + ty.layout(self.tcx, ty::ParamEnv::empty(Reveal::All)) + .map_err(|layout| EvalErrorKind::Layout(layout).into()) + } + + pub fn push_stack_frame( + &mut self, + instance: ty::Instance<'tcx>, + span: codemap::Span, + mir: &'tcx mir::Mir<'tcx>, + return_lvalue: Lvalue, + return_to_block: StackPopCleanup, + ) -> EvalResult<'tcx> { + ::log_settings::settings().indentation += 1; + + /// Return the set of locals that have a storage annotation anywhere + fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet { + use rustc::mir::StatementKind::*; + + let mut set = HashSet::new(); + for block in mir.basic_blocks() { + for stmt in block.statements.iter() { + match stmt.kind { + StorageLive(local) | + StorageDead(local) => { + set.insert(local); + } + _ => {} + } + } + } + set + } + + // Subtract 1 because `local_decls` includes the ReturnMemoryPointer, but we don't store a local + // `Value` for that. + let num_locals = mir.local_decls.len() - 1; + + let locals = { + let annotated_locals = collect_storage_annotations(mir); + let mut locals = vec![None; num_locals]; + for i in 0..num_locals { + let local = mir::Local::new(i + 1); + if !annotated_locals.contains(&local) { + locals[i] = Some(Value::ByVal(PrimVal::Undef)); + } + } + locals + }; + + self.stack.push(Frame { + mir, + block: mir::START_BLOCK, + return_to_block, + return_lvalue, + locals, + span, + instance, + stmt: 0, + }); + + self.memory.cur_frame = self.cur_frame(); + + if self.stack.len() > self.stack_limit { + err!(StackFrameLimitReached) + } else { + Ok(()) + } + } + + pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { + ::log_settings::settings().indentation -= 1; + self.end_region(None)?; + let frame = self.stack.pop().expect( + "tried to pop a stack frame, but there were none", + ); + if !self.stack.is_empty() { + // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame? + self.memory.cur_frame = self.cur_frame(); + } + match frame.return_to_block { + StackPopCleanup::MarkStatic(mutable) => { + if let Lvalue::Ptr { ptr, .. } = frame.return_lvalue { + // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions + self.memory.mark_static_initalized( + ptr.to_ptr()?.alloc_id, + mutable, + )? + } else { + bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_lvalue); + } + } + StackPopCleanup::Goto(target) => self.goto_block(target), + StackPopCleanup::None => {} + } + // deallocate all locals that are backed by an allocation + for local in frame.locals { + self.deallocate_local(local)?; + } + + Ok(()) + } + + pub fn deallocate_local(&mut self, local: Option) -> EvalResult<'tcx> { + if let Some(Value::ByRef(ptr)) = local { + trace!("deallocating local"); + let ptr = ptr.to_ptr()?; + self.memory.dump_alloc(ptr.alloc_id); + match self.memory.get(ptr.alloc_id)?.kind { + // for a constant like `const FOO: &i32 = &1;` the local containing + // the `1` is referred to by the global. We transitively marked everything + // the global refers to as static itself, so we don't free it here + MemoryKind::Static => {} + MemoryKind::Stack => self.memory.deallocate(ptr, None, MemoryKind::Stack)?, + other => bug!("local contained non-stack memory: {:?}", other), + } + }; + Ok(()) + } + + pub fn assign_discr_and_fields( + &mut self, + dest: Lvalue, + dest_ty: Ty<'tcx>, + discr_offset: u64, + operands: &[mir::Operand<'tcx>], + discr_val: u128, + variant_idx: usize, + discr_size: u64, + discr_signed: bool, + ) -> EvalResult<'tcx> { + // FIXME(solson) + let dest_ptr = self.force_allocation(dest)?.to_ptr()?; + + let discr_dest = dest_ptr.offset(discr_offset, &self)?; + self.memory.write_primval(discr_dest, PrimVal::Bytes(discr_val), discr_size, discr_signed)?; + + let dest = Lvalue::Ptr { + ptr: PtrAndAlign { + ptr: dest_ptr.into(), + aligned: true, + }, + extra: LvalueExtra::DowncastVariant(variant_idx), + }; + + self.assign_fields(dest, dest_ty, operands) + } + + pub fn assign_fields( + &mut self, + dest: Lvalue, + dest_ty: Ty<'tcx>, + operands: &[mir::Operand<'tcx>], + ) -> EvalResult<'tcx> { + if self.type_size(dest_ty)? == Some(0) { + // zst assigning is a nop + return Ok(()); + } + if self.ty_to_primval_kind(dest_ty).is_ok() { + assert_eq!(operands.len(), 1); + let value = self.eval_operand(&operands[0])?; + return self.write_value(value, dest); + } + for (field_index, operand) in operands.iter().enumerate() { + let value = self.eval_operand(operand)?; + let field_dest = self.lvalue_field(dest, mir::Field::new(field_index), dest_ty, value.ty)?; + self.write_value(value, field_dest)?; + } + Ok(()) + } + + /// Evaluate an assignment statement. + /// + /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue + /// type writes its results directly into the memory specified by the lvalue. + pub(super) fn eval_rvalue_into_lvalue( + &mut self, + rvalue: &mir::Rvalue<'tcx>, + lvalue: &mir::Lvalue<'tcx>, + ) -> EvalResult<'tcx> { + let dest = self.eval_lvalue(lvalue)?; + let dest_ty = self.lvalue_ty(lvalue); + let dest_layout = self.type_layout(dest_ty)?; + + use rustc::mir::Rvalue::*; + match *rvalue { + Use(ref operand) => { + let value = self.eval_operand(operand)?.value; + let valty = ValTy { + value, + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + + BinaryOp(bin_op, ref left, ref right) => { + let left = self.eval_operand(left)?; + let right = self.eval_operand(right)?; + if self.intrinsic_overflowing( + bin_op, + left, + right, + dest, + dest_ty, + )? + { + // There was an overflow in an unchecked binop. Right now, we consider this an error and bail out. + // The rationale is that the reason rustc emits unchecked binops in release mode (vs. the checked binops + // it emits in debug mode) is performance, but it doesn't cost us any performance in miri. + // If, however, the compiler ever starts transforming unchecked intrinsics into unchecked binops, + // we have to go back to just ignoring the overflow here. + return err!(OverflowingMath); + } + } + + CheckedBinaryOp(bin_op, ref left, ref right) => { + let left = self.eval_operand(left)?; + let right = self.eval_operand(right)?; + self.intrinsic_with_overflow( + bin_op, + left, + right, + dest, + dest_ty, + )?; + } + + UnaryOp(un_op, ref operand) => { + let val = self.eval_operand_to_primval(operand)?; + let kind = self.ty_to_primval_kind(dest_ty)?; + self.write_primval( + dest, + operator::unary_op(un_op, val, kind)?, + dest_ty, + )?; + } + + // Skip everything for zsts + Aggregate(..) if self.type_size(dest_ty)? == Some(0) => {} + + Aggregate(ref kind, ref operands) => { + self.inc_step_counter_and_check_limit(operands.len() as u64)?; + use rustc::ty::layout::Layout::*; + match *dest_layout { + Univariant { ref variant, .. } => { + self.write_maybe_aligned_mut(!variant.packed, |ecx| { + ecx.assign_fields(dest, dest_ty, operands) + })?; + } + + Array { .. } => { + self.assign_fields(dest, dest_ty, operands)?; + } + + General { + discr, + ref variants, + .. + } => { + if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind { + let discr_val = adt_def + .discriminants(self.tcx) + .nth(variant) + .expect("broken mir: Adt variant id invalid") + .to_u128_unchecked(); + let discr_size = discr.size().bytes(); + + self.assign_discr_and_fields( + dest, + dest_ty, + variants[variant].offsets[0].bytes(), + operands, + discr_val, + variant, + discr_size, + false, + )?; + } else { + bug!("tried to assign {:?} to Layout::General", kind); + } + } + + RawNullablePointer { nndiscr, .. } => { + if let mir::AggregateKind::Adt(_, variant, _, _) = **kind { + if nndiscr == variant as u64 { + assert_eq!(operands.len(), 1); + let operand = &operands[0]; + let value = self.eval_operand(operand)?; + self.write_value(value, dest)?; + } else { + if let Some(operand) = operands.get(0) { + assert_eq!(operands.len(), 1); + let operand_ty = self.operand_ty(operand); + assert_eq!(self.type_size(operand_ty)?, Some(0)); + } + self.write_null(dest, dest_ty)?; + } + } else { + bug!("tried to assign {:?} to Layout::RawNullablePointer", kind); + } + } + + StructWrappedNullablePointer { + nndiscr, + ref discrfield_source, + ref nonnull, + .. + } => { + if let mir::AggregateKind::Adt(_, variant, _, _) = **kind { + if nndiscr == variant as u64 { + self.write_maybe_aligned_mut(!nonnull.packed, |ecx| { + ecx.assign_fields(dest, dest_ty, operands) + })?; + } else { + for operand in operands { + let operand_ty = self.operand_ty(operand); + assert_eq!(self.type_size(operand_ty)?, Some(0)); + } + self.write_struct_wrapped_null_pointer( + dest_ty, + nndiscr, + discrfield_source, + dest, + )?; + } + } else { + bug!("tried to assign {:?} to Layout::RawNullablePointer", kind); + } + } + + CEnum { .. } => { + assert_eq!(operands.len(), 0); + if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind { + let n = adt_def + .discriminants(self.tcx) + .nth(variant) + .expect("broken mir: Adt variant index invalid") + .to_u128_unchecked(); + self.write_primval(dest, PrimVal::Bytes(n), dest_ty)?; + } else { + bug!("tried to assign {:?} to Layout::CEnum", kind); + } + } + + Vector { count, .. } => { + debug_assert_eq!(count, operands.len() as u64); + self.assign_fields(dest, dest_ty, operands)?; + } + + UntaggedUnion { ref variants } => { + assert_eq!(operands.len(), 1); + let operand = &operands[0]; + let value = self.eval_operand(operand)?; + self.write_maybe_aligned_mut(!variants.packed, |ecx| { + ecx.write_value(value, dest) + })?; + } + + _ => { + return err!(Unimplemented(format!( + "can't handle destination layout {:?} when assigning {:?}", + dest_layout, + kind + ))); + } + } + } + + Repeat(ref operand, _) => { + let (elem_ty, length) = match dest_ty.sty { + ty::TyArray(elem_ty, n) => (elem_ty, n.val.to_const_int().unwrap().to_u64().unwrap()), + _ => { + bug!( + "tried to assign array-repeat to non-array type {:?}", + dest_ty + ) + } + }; + self.inc_step_counter_and_check_limit(length)?; + let elem_size = self.type_size(elem_ty)?.expect( + "repeat element type must be sized", + ); + let value = self.eval_operand(operand)?.value; + + // FIXME(solson) + let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?); + + for i in 0..length { + let elem_dest = dest.offset(i * elem_size, &self)?; + self.write_value_to_ptr(value, elem_dest, elem_ty)?; + } + } + + Len(ref lvalue) => { + // FIXME(CTFE): don't allow computing the length of arrays in const eval + let src = self.eval_lvalue(lvalue)?; + let ty = self.lvalue_ty(lvalue); + let (_, len) = src.elem_ty_and_len(ty); + self.write_primval( + dest, + PrimVal::from_u128(len as u128), + dest_ty, + )?; + } + + Ref(_, _, ref lvalue) => { + let src = self.eval_lvalue(lvalue)?; + // We ignore the alignment of the lvalue here -- special handling for packed structs ends + // at the `&` operator. + let (ptr, extra) = self.force_allocation(src)?.to_ptr_extra_aligned(); + + let val = match extra { + LvalueExtra::None => ptr.ptr.to_value(), + LvalueExtra::Length(len) => ptr.ptr.to_value_with_len(len), + LvalueExtra::Vtable(vtable) => ptr.ptr.to_value_with_vtable(vtable), + LvalueExtra::DowncastVariant(..) => { + bug!("attempted to take a reference to an enum downcast lvalue") + } + }; + let valty = ValTy { + value: val, + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + + NullaryOp(mir::NullOp::Box, ty) => { + let ptr = M::box_alloc(self, ty)?; + self.write_primval(dest, ptr, dest_ty)?; + } + + NullaryOp(mir::NullOp::SizeOf, ty) => { + let size = self.type_size(ty)?.expect( + "SizeOf nullary MIR operator called for unsized type", + ); + self.write_primval( + dest, + PrimVal::from_u128(size as u128), + dest_ty, + )?; + } + + Cast(kind, ref operand, cast_ty) => { + debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty); + use rustc::mir::CastKind::*; + match kind { + Unsize => { + let src = self.eval_operand(operand)?; + self.unsize_into(src.value, src.ty, dest, dest_ty)?; + } + + Misc => { + let src = self.eval_operand(operand)?; + if self.type_is_fat_ptr(src.ty) { + match (src.value, self.type_is_fat_ptr(dest_ty)) { + (Value::ByRef { .. }, _) | + (Value::ByValPair(..), true) => { + let valty = ValTy { + value: src.value, + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + (Value::ByValPair(data, _), false) => { + let valty = ValTy { + value: Value::ByVal(data), + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + (Value::ByVal(_), _) => bug!("expected fat ptr"), + } + } else { + let src_val = self.value_to_primval(src)?; + let dest_val = self.cast_primval(src_val, src.ty, dest_ty)?; + let valty = ValTy { + value: Value::ByVal(dest_val), + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + } + + ReifyFnPointer => { + match self.operand_ty(operand).sty { + ty::TyFnDef(def_id, substs) => { + let instance = resolve(self.tcx, def_id, substs); + let fn_ptr = self.memory.create_fn_alloc(instance); + let valty = ValTy { + value: Value::ByVal(PrimVal::Ptr(fn_ptr)), + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + ref other => bug!("reify fn pointer on {:?}", other), + } + } + + UnsafeFnPointer => { + match dest_ty.sty { + ty::TyFnPtr(_) => { + let mut src = self.eval_operand(operand)?; + src.ty = dest_ty; + self.write_value(src, dest)?; + } + ref other => bug!("fn to unsafe fn cast on {:?}", other), + } + } + + ClosureFnPointer => { + match self.operand_ty(operand).sty { + ty::TyClosure(def_id, substs) => { + let instance = resolve_closure( + self.tcx, + def_id, + substs, + ty::ClosureKind::FnOnce, + ); + let fn_ptr = self.memory.create_fn_alloc(instance); + let valty = ValTy { + value: Value::ByVal(PrimVal::Ptr(fn_ptr)), + ty: dest_ty, + }; + self.write_value(valty, dest)?; + } + ref other => bug!("closure fn pointer on {:?}", other), + } + } + } + } + + Discriminant(ref lvalue) => { + let lval = self.eval_lvalue(lvalue)?; + let ty = self.lvalue_ty(lvalue); + let ptr = self.force_allocation(lval)?.to_ptr()?; + let discr_val = self.read_discriminant_value(ptr, ty)?; + if let ty::TyAdt(adt_def, _) = ty.sty { + trace!("Read discriminant {}, valid discriminants {:?}", discr_val, adt_def.discriminants(self.tcx).collect::>()); + if adt_def.discriminants(self.tcx).all(|v| { + discr_val != v.to_u128_unchecked() + }) + { + return err!(InvalidDiscriminant); + } + self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?; + } else { + bug!("rustc only generates Rvalue::Discriminant for enums"); + } + } + } + + if log_enabled!(::log::LogLevel::Trace) { + self.dump_local(dest); + } + + Ok(()) + } + + pub(crate) fn write_struct_wrapped_null_pointer( + &mut self, + dest_ty: ty::Ty<'tcx>, + nndiscr: u64, + discrfield_source: &layout::FieldPath, + dest: Lvalue, + ) -> EvalResult<'tcx> { + let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty( + dest_ty, + nndiscr, + discrfield_source, + )?; + let nonnull = self.force_allocation(dest)?.to_ptr()?.offset( + offset.bytes(), + &self, + )?; + trace!("struct wrapped nullable pointer type: {}", ty); + // only the pointer part of a fat pointer is used for this space optimization + let discr_size = self.type_size(ty)?.expect( + "bad StructWrappedNullablePointer discrfield", + ); + self.memory.write_maybe_aligned_mut(!packed, |mem| { + // We're writing 0, signedness does not matter + mem.write_primval(nonnull, PrimVal::Bytes(0), discr_size, false) + }) + } + + pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { + match ty.sty { + ty::TyRawPtr(ref tam) | + ty::TyRef(_, ref tam) => !self.type_is_sized(tam.ty), + ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()), + _ => false, + } + } + + pub(super) fn nonnull_offset_and_ty( + &self, + ty: Ty<'tcx>, + nndiscr: u64, + discrfield: &[u32], + ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> { + // Skip the constant 0 at the start meant for LLVM GEP and the outer non-null variant + let path = discrfield.iter().skip(2).map(|&i| i as usize); + + // Handle the field index for the outer non-null variant. + let (inner_offset, inner_ty) = match ty.sty { + ty::TyAdt(adt_def, substs) => { + let variant = &adt_def.variants[nndiscr as usize]; + let index = discrfield[1]; + let field = &variant.fields[index as usize]; + ( + self.get_field_offset(ty, index as usize)?, + field.ty(self.tcx, substs), + ) + } + _ => bug!("non-enum for StructWrappedNullablePointer: {}", ty), + }; + + self.field_path_offset_and_ty(inner_offset, inner_ty, path) + } + + fn field_path_offset_and_ty>( + &self, + mut offset: Size, + mut ty: Ty<'tcx>, + path: I, + ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> { + // Skip the initial 0 intended for LLVM GEP. + let mut packed = false; + for field_index in path { + let field_offset = self.get_field_offset(ty, field_index)?; + trace!( + "field_path_offset_and_ty: {}, {}, {:?}, {:?}", + field_index, + ty, + field_offset, + offset + ); + let field_ty = self.get_field_ty(ty, field_index)?; + ty = field_ty.ty; + packed = packed || field_ty.packed; + offset = offset + .checked_add(field_offset, &self.tcx.data_layout) + .unwrap(); + } + + Ok((offset, TyAndPacked { ty, packed })) + } + fn get_fat_field( + &self, + pointee_ty: Ty<'tcx>, + field_index: usize, + ) -> EvalResult<'tcx, Ty<'tcx>> { + match (field_index, &self.tcx.struct_tail(pointee_ty).sty) { + (1, &ty::TyStr) | + (1, &ty::TySlice(_)) => Ok(self.tcx.types.usize), + (1, &ty::TyDynamic(..)) | + (0, _) => Ok(self.tcx.mk_imm_ptr(self.tcx.types.u8)), + _ => bug!("invalid fat pointee type: {}", pointee_ty), + } + } + + /// Returns the field type and whether the field is packed + pub fn get_field_ty( + &self, + ty: Ty<'tcx>, + field_index: usize, + ) -> EvalResult<'tcx, TyAndPacked<'tcx>> { + match ty.sty { + ty::TyAdt(adt_def, _) if adt_def.is_box() => Ok(TyAndPacked { + ty: self.get_fat_field(ty.boxed_ty(), field_index)?, + packed: false, + }), + ty::TyAdt(adt_def, substs) if adt_def.is_enum() => { + use rustc::ty::layout::Layout::*; + match *self.type_layout(ty)? { + RawNullablePointer { nndiscr, .. } => Ok(TyAndPacked { + ty: adt_def.variants[nndiscr as usize].fields[field_index].ty( + self.tcx, + substs, + ), + packed: false, + }), + StructWrappedNullablePointer { + nndiscr, + ref nonnull, + .. + } => { + let ty = adt_def.variants[nndiscr as usize].fields[field_index].ty( + self.tcx, + substs, + ); + Ok(TyAndPacked { + ty, + packed: nonnull.packed, + }) + } + // mir optimizations treat single variant enums as structs + General { .. } if adt_def.variants.len() == 1 => Ok(TyAndPacked { + ty: adt_def.variants[0].fields[field_index].ty(self.tcx, substs), + packed: false, + }), + _ => { + err!(Unimplemented(format!( + "get_field_ty can't handle enum type: {:?}, {:?}", + ty, + ty.sty + ))) + } + } + } + ty::TyAdt(adt_def, substs) => { + let variant_def = adt_def.struct_variant(); + use rustc::ty::layout::Layout::*; + match *self.type_layout(ty)? { + UntaggedUnion { ref variants } => Ok(TyAndPacked { + ty: variant_def.fields[field_index].ty(self.tcx, substs), + packed: variants.packed, + }), + Univariant { ref variant, .. } => Ok(TyAndPacked { + ty: variant_def.fields[field_index].ty(self.tcx, substs), + packed: variant.packed, + }), + _ => { + err!(Unimplemented(format!( + "get_field_ty can't handle struct type: {:?}, {:?}", + ty, + ty.sty + ))) + } + } + } + + ty::TyTuple(fields, _) => Ok(TyAndPacked { + ty: fields[field_index], + packed: false, + }), + + ty::TyRef(_, ref tam) | + ty::TyRawPtr(ref tam) => Ok(TyAndPacked { + ty: self.get_fat_field(tam.ty, field_index)?, + packed: false, + }), + + ty::TyArray(ref inner, _) => Ok(TyAndPacked { + ty: inner, + packed: false, + }), + + ty::TyClosure(def_id, ref closure_substs) => Ok(TyAndPacked { + ty: closure_substs.upvar_tys(def_id, self.tcx).nth(field_index).unwrap(), + packed: false, + }), + + _ => { + err!(Unimplemented( + format!("can't handle type: {:?}, {:?}", ty, ty.sty), + )) + } + } + } + + fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> { + // Also see lvalue_field in lvalue.rs, which handles more cases but needs an actual value at the given type + let layout = self.type_layout(ty)?; + + use rustc::ty::layout::Layout::*; + match *layout { + Univariant { ref variant, .. } => Ok(variant.offsets[field_index]), + FatPointer { .. } => { + let bytes = field_index as u64 * self.memory.pointer_size(); + Ok(Size::from_bytes(bytes)) + } + StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets[field_index]), + UntaggedUnion { .. } => Ok(Size::from_bytes(0)), + // mir optimizations treat single variant enums as structs + General { ref variants, .. } if variants.len() == 1 => Ok(variants[0].offsets[field_index]), + _ => { + let msg = format!( + "get_field_offset: can't handle type: {:?}, with layout: {:?}", + ty, + layout + ); + err!(Unimplemented(msg)) + } + } + } + + pub fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> { + let layout = self.type_layout(ty)?; + + use rustc::ty::layout::Layout::*; + match *layout { + Univariant { ref variant, .. } => Ok(variant.offsets.len() as u64), + FatPointer { .. } => Ok(2), + StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets.len() as u64), + Vector { count, .. } | + Array { count, .. } => Ok(count), + Scalar { .. } => Ok(0), + UntaggedUnion { .. } => Ok(1), + _ => { + let msg = format!( + "get_field_count: can't handle type: {:?}, with layout: {:?}", + ty, + layout + ); + err!(Unimplemented(msg)) + } + } + } + + pub(super) fn eval_operand_to_primval( + &mut self, + op: &mir::Operand<'tcx>, + ) -> EvalResult<'tcx, PrimVal> { + let valty = self.eval_operand(op)?; + self.value_to_primval(valty) + } + + pub(crate) fn operands_to_args( + &mut self, + ops: &[mir::Operand<'tcx>], + ) -> EvalResult<'tcx, Vec>> { + ops.into_iter() + .map(|op| self.eval_operand(op)) + .collect() + } + + pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> { + use rustc::mir::Operand::*; + match *op { + Consume(ref lvalue) => { + Ok(ValTy { + value: self.eval_and_read_lvalue(lvalue)?, + ty: self.operand_ty(op), + }) + }, + + Constant(ref constant) => { + use rustc::mir::Literal; + let mir::Constant { ref literal, .. } = **constant; + let value = match *literal { + Literal::Value { ref value } => self.const_to_value(&value.val)?, + + Literal::Promoted { index } => { + let cid = GlobalId { + instance: self.frame().instance, + promoted: Some(index), + }; + Value::ByRef(*self.globals.get(&cid).expect("promoted not cached")) + } + }; + + Ok(ValTy { + value, + ty: self.operand_ty(op), + }) + } + } + } + + pub fn read_discriminant_value( + &self, + adt_ptr: MemoryPointer, + adt_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, u128> { + use rustc::ty::layout::Layout::*; + let adt_layout = self.type_layout(adt_ty)?; + //trace!("read_discriminant_value {:#?}", adt_layout); + + let discr_val = match *adt_layout { + General { discr, .. } => { + let discr_size = discr.size().bytes(); + self.memory.read_primval(adt_ptr, discr_size, false)?.to_bytes()? + } + + CEnum { + discr, + signed, + .. + } => { + let discr_size = discr.size().bytes(); + self.memory.read_primval(adt_ptr, discr_size, signed)?.to_bytes()? + } + + RawNullablePointer { nndiscr, value } => { + let discr_size = value.size(&self.tcx.data_layout).bytes(); + trace!("rawnullablepointer with size {}", discr_size); + self.read_nonnull_discriminant_value( + adt_ptr, + nndiscr as u128, + discr_size, + )? + } + + StructWrappedNullablePointer { + nndiscr, + ref discrfield_source, + .. + } => { + let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty( + adt_ty, + nndiscr, + discrfield_source, + )?; + let nonnull = adt_ptr.offset(offset.bytes(), &*self)?; + trace!("struct wrapped nullable pointer type: {}", ty); + // only the pointer part of a fat pointer is used for this space optimization + let discr_size = self.type_size(ty)?.expect( + "bad StructWrappedNullablePointer discrfield", + ); + self.read_maybe_aligned(!packed, |ectx| { + ectx.read_nonnull_discriminant_value(nonnull, nndiscr as u128, discr_size) + })? + } + + // The discriminant_value intrinsic returns 0 for non-sum types. + Array { .. } | + FatPointer { .. } | + Scalar { .. } | + Univariant { .. } | + Vector { .. } | + UntaggedUnion { .. } => 0, + }; + + Ok(discr_val) + } + + fn read_nonnull_discriminant_value( + &self, + ptr: MemoryPointer, + nndiscr: u128, + discr_size: u64, + ) -> EvalResult<'tcx, u128> { + trace!( + "read_nonnull_discriminant_value: {:?}, {}, {}", + ptr, + nndiscr, + discr_size + ); + // We are only interested in 0 vs. non-0, the sign does not matter for this + let null = match self.memory.read_primval(ptr, discr_size, false)? { + PrimVal::Bytes(0) => true, + PrimVal::Bytes(_) | + PrimVal::Ptr(..) => false, + PrimVal::Undef => return err!(ReadUndefBytes), + }; + assert!(nndiscr == 0 || nndiscr == 1); + Ok(if !null { nndiscr } else { 1 - nndiscr }) + } + + pub fn read_global_as_value(&self, gid: GlobalId) -> Value { + Value::ByRef(*self.globals.get(&gid).expect("global not cached")) + } + + pub fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> { + self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs()) + } + + fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> { + let size = self.type_size(ty)?.expect( + "cannot copy from an unsized type", + ); + let align = self.type_align(ty)?; + self.memory.copy(src, dest, size, align, false)?; + Ok(()) + } + + pub fn is_packed(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, bool> { + let layout = self.type_layout(ty)?; + use rustc::ty::layout::Layout::*; + Ok(match *layout { + Univariant { ref variant, .. } => variant.packed, + + StructWrappedNullablePointer { ref nonnull, .. } => nonnull.packed, + + UntaggedUnion { ref variants } => variants.packed, + + // can only apply #[repr(packed)] to struct and union + _ => false, + }) + } + + pub fn force_allocation(&mut self, lvalue: Lvalue) -> EvalResult<'tcx, Lvalue> { + let new_lvalue = match lvalue { + Lvalue::Local { frame, local } => { + // -1 since we don't store the return value + match self.stack[frame].locals[local.index() - 1] { + None => return err!(DeadLocal), + Some(Value::ByRef(ptr)) => { + Lvalue::Ptr { + ptr, + extra: LvalueExtra::None, + } + } + Some(val) => { + let ty = self.stack[frame].mir.local_decls[local].ty; + let ty = self.monomorphize(ty, self.stack[frame].instance.substs); + let substs = self.stack[frame].instance.substs; + let ptr = self.alloc_ptr_with_substs(ty, substs)?; + self.stack[frame].locals[local.index() - 1] = + Some(Value::by_ref(ptr.into())); // it stays live + self.write_value_to_ptr(val, ptr.into(), ty)?; + Lvalue::from_ptr(ptr) + } + } + } + Lvalue::Ptr { .. } => lvalue, + }; + Ok(new_lvalue) + } + + /// ensures this Value is not a ByRef + pub(super) fn follow_by_ref_value( + &self, + value: Value, + ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Value> { + match value { + Value::ByRef(PtrAndAlign { ptr, aligned }) => { + self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty)) + } + other => Ok(other), + } + } + + pub fn value_to_primval( + &self, + ValTy { value, ty } : ValTy<'tcx>, + ) -> EvalResult<'tcx, PrimVal> { + match self.follow_by_ref_value(value, ty)? { + Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"), + + Value::ByVal(primval) => { + // TODO: Do we really want insta-UB here? + self.ensure_valid_value(primval, ty)?; + Ok(primval) + } + + Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"), + } + } + + pub fn write_null(&mut self, dest: Lvalue, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> { + self.write_primval(dest, PrimVal::Bytes(0), dest_ty) + } + + pub fn write_ptr(&mut self, dest: Lvalue, val: Pointer, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> { + let valty = ValTy { + value: val.to_value(), + ty: dest_ty, + }; + self.write_value(valty, dest) + } + + pub fn write_primval( + &mut self, + dest: Lvalue, + val: PrimVal, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + let valty = ValTy { + value: Value::ByVal(val), + ty: dest_ty, + }; + self.write_value(valty, dest) + } + + pub fn write_value( + &mut self, + ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>, + dest: Lvalue, + ) -> EvalResult<'tcx> { + //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty); + // Note that it is really important that the type here is the right one, and matches the type things are read at. + // In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only + // correct if we never look at this data with the wrong type. + + match dest { + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned }, + extra, + } => { + assert_eq!(extra, LvalueExtra::None); + self.write_maybe_aligned_mut( + aligned, + |ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty), + ) + } + + Lvalue::Local { frame, local } => { + let dest = self.stack[frame].get_local(local)?; + self.write_value_possibly_by_val( + src_val, + |this, val| this.stack[frame].set_local(local, val), + dest, + dest_ty, + ) + } + } + } + + // The cases here can be a bit subtle. Read carefully! + fn write_value_possibly_by_val EvalResult<'tcx>>( + &mut self, + src_val: Value, + write_dest: F, + old_dest_val: Value, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + if let Value::ByRef(PtrAndAlign { + ptr: dest_ptr, + aligned, + }) = old_dest_val + { + // If the value is already `ByRef` (that is, backed by an `Allocation`), + // then we must write the new value into this allocation, because there may be + // other pointers into the allocation. These other pointers are logically + // pointers into the local variable, and must be able to observe the change. + // + // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we + // knew for certain that there were no outstanding pointers to this allocation. + self.write_maybe_aligned_mut(aligned, |ectx| { + ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty) + })?; + + } else if let Value::ByRef(PtrAndAlign { + ptr: src_ptr, + aligned, + }) = src_val + { + // If the value is not `ByRef`, then we know there are no pointers to it + // and we can simply overwrite the `Value` in the locals array directly. + // + // In this specific case, where the source value is `ByRef`, we must duplicate + // the allocation, because this is a by-value operation. It would be incorrect + // if they referred to the same allocation, since then a change to one would + // implicitly change the other. + // + // It is a valid optimization to attempt reading a primitive value out of the + // source and write that into the destination without making an allocation, so + // we do so here. + self.read_maybe_aligned_mut(aligned, |ectx| { + if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) { + write_dest(ectx, src_val)?; + } else { + let dest_ptr = ectx.alloc_ptr(dest_ty)?.into(); + ectx.copy(src_ptr, dest_ptr, dest_ty)?; + write_dest(ectx, Value::by_ref(dest_ptr))?; + } + Ok(()) + })?; + + } else { + // Finally, we have the simple case where neither source nor destination are + // `ByRef`. We may simply copy the source value over the the destintion. + write_dest(self, src_val)?; + } + Ok(()) + } + + pub fn write_value_to_ptr( + &mut self, + value: Value, + dest: Pointer, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + match value { + Value::ByRef(PtrAndAlign { ptr, aligned }) => { + self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty)) + } + Value::ByVal(primval) => { + let size = self.type_size(dest_ty)?.expect("dest type must be sized"); + if size == 0 { + assert!(primval.is_undef()); + Ok(()) + } else { + // TODO: Do we need signedness? + self.memory.write_primval(dest.to_ptr()?, primval, size, false) + } + } + Value::ByValPair(a, b) => self.write_pair_to_ptr(a, b, dest.to_ptr()?, dest_ty), + } + } + + pub fn write_pair_to_ptr( + &mut self, + a: PrimVal, + b: PrimVal, + ptr: MemoryPointer, + mut ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + let mut packed = false; + while self.get_field_count(ty)? == 1 { + let field = self.get_field_ty(ty, 0)?; + ty = field.ty; + packed = packed || field.packed; + } + assert_eq!(self.get_field_count(ty)?, 2); + let field_0 = self.get_field_offset(ty, 0)?; + let field_1 = self.get_field_offset(ty, 1)?; + let field_0_ty = self.get_field_ty(ty, 0)?; + let field_1_ty = self.get_field_ty(ty, 1)?; + assert_eq!( + field_0_ty.packed, + field_1_ty.packed, + "the two fields must agree on being packed" + ); + packed = packed || field_0_ty.packed; + let field_0_size = self.type_size(field_0_ty.ty)?.expect( + "pair element type must be sized", + ); + let field_1_size = self.type_size(field_1_ty.ty)?.expect( + "pair element type must be sized", + ); + let field_0_ptr = ptr.offset(field_0.bytes(), &self)?.into(); + let field_1_ptr = ptr.offset(field_1.bytes(), &self)?.into(); + // TODO: What about signedess? + self.write_maybe_aligned_mut(!packed, |ectx| { + ectx.memory.write_primval(field_0_ptr, a, field_0_size, false) + })?; + self.write_maybe_aligned_mut(!packed, |ectx| { + ectx.memory.write_primval(field_1_ptr, b, field_1_size, false) + })?; + Ok(()) + } + + pub fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> { + use syntax::ast::FloatTy; + + let kind = match ty.sty { + ty::TyBool => PrimValKind::Bool, + ty::TyChar => PrimValKind::Char, + + ty::TyInt(int_ty) => { + use syntax::ast::IntTy::*; + let size = match int_ty { + I8 => 1, + I16 => 2, + I32 => 4, + I64 => 8, + I128 => 16, + Is => self.memory.pointer_size(), + }; + PrimValKind::from_int_size(size) + } + + ty::TyUint(uint_ty) => { + use syntax::ast::UintTy::*; + let size = match uint_ty { + U8 => 1, + U16 => 2, + U32 => 4, + U64 => 8, + U128 => 16, + Us => self.memory.pointer_size(), + }; + PrimValKind::from_uint_size(size) + } + + ty::TyFloat(FloatTy::F32) => PrimValKind::F32, + ty::TyFloat(FloatTy::F64) => PrimValKind::F64, + + ty::TyFnPtr(_) => PrimValKind::FnPtr, + + ty::TyRef(_, ref tam) | + ty::TyRawPtr(ref tam) if self.type_is_sized(tam.ty) => PrimValKind::Ptr, + + ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr, + + ty::TyAdt(def, substs) => { + use rustc::ty::layout::Layout::*; + match *self.type_layout(ty)? { + CEnum { discr, signed, .. } => { + let size = discr.size().bytes(); + if signed { + PrimValKind::from_int_size(size) + } else { + PrimValKind::from_uint_size(size) + } + } + + RawNullablePointer { value, .. } => { + use rustc::ty::layout::Primitive::*; + match value { + // TODO(solson): Does signedness matter here? What should the sign be? + Int(int) => PrimValKind::from_uint_size(int.size().bytes()), + F32 => PrimValKind::F32, + F64 => PrimValKind::F64, + Pointer => PrimValKind::Ptr, + } + } + + // represent single field structs as their single field + Univariant { .. } => { + // enums with just one variant are no different, but `.struct_variant()` doesn't work for enums + let variant = &def.variants[0]; + // FIXME: also allow structs with only a single non zst field + if variant.fields.len() == 1 { + return self.ty_to_primval_kind(variant.fields[0].ty(self.tcx, substs)); + } else { + return err!(TypeNotPrimitive(ty)); + } + } + + _ => return err!(TypeNotPrimitive(ty)), + } + } + + _ => return err!(TypeNotPrimitive(ty)), + }; + + Ok(kind) + } + + fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> { + match ty.sty { + ty::TyBool if val.to_bytes()? > 1 => err!(InvalidBool), + + ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none() => { + err!(InvalidChar(val.to_bytes()? as u32 as u128)) + } + + _ => Ok(()), + } + } + + pub fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + if let Some(val) = self.try_read_value(ptr, ty)? { + Ok(val) + } else { + bug!("primitive read failed for type: {:?}", ty); + } + } + + pub(crate) fn read_ptr( + &self, + ptr: MemoryPointer, + pointee_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Value> { + let ptr_size = self.memory.pointer_size(); + let p : Pointer = self.memory.read_ptr_sized_unsigned(ptr)?.into(); + if self.type_is_sized(pointee_ty) { + Ok(p.to_value()) + } else { + trace!("reading fat pointer extra of type {}", pointee_ty); + let extra = ptr.offset(ptr_size, self)?; + match self.tcx.struct_tail(pointee_ty).sty { + ty::TyDynamic(..) => Ok(p.to_value_with_vtable( + self.memory.read_ptr_sized_unsigned(extra)?.to_ptr()?, + )), + ty::TySlice(..) | ty::TyStr => Ok( + p.to_value_with_len(self.memory.read_ptr_sized_unsigned(extra)?.to_bytes()? as u64), + ), + _ => bug!("unsized primval ptr read from {:?}", pointee_ty), + } + } + } + + fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { + use syntax::ast::FloatTy; + + let ptr = ptr.to_ptr()?; + let val = match ty.sty { + ty::TyBool => { + let val = self.memory.read_primval(ptr, 1, false)?; + let val = match val { + PrimVal::Bytes(0) => false, + PrimVal::Bytes(1) => true, + // TODO: This seems a little overeager, should reading at bool type already be UB? + _ => return err!(InvalidBool), + }; + PrimVal::from_bool(val) + } + ty::TyChar => { + let c = self.memory.read_primval(ptr, 4, false)?.to_bytes()? as u32; + match ::std::char::from_u32(c) { + Some(ch) => PrimVal::from_char(ch), + None => return err!(InvalidChar(c as u128)), + } + } + + ty::TyInt(int_ty) => { + use syntax::ast::IntTy::*; + let size = match int_ty { + I8 => 1, + I16 => 2, + I32 => 4, + I64 => 8, + I128 => 16, + Is => self.memory.pointer_size(), + }; + self.memory.read_primval(ptr, size, true)? + } + + ty::TyUint(uint_ty) => { + use syntax::ast::UintTy::*; + let size = match uint_ty { + U8 => 1, + U16 => 2, + U32 => 4, + U64 => 8, + U128 => 16, + Us => self.memory.pointer_size(), + }; + self.memory.read_primval(ptr, size, false)? + } + + ty::TyFloat(FloatTy::F32) => PrimVal::Bytes(self.memory.read_primval(ptr, 4, false)?.to_bytes()?), + ty::TyFloat(FloatTy::F64) => PrimVal::Bytes(self.memory.read_primval(ptr, 8, false)?.to_bytes()?), + + ty::TyFnPtr(_) => self.memory.read_ptr_sized_unsigned(ptr)?, + ty::TyRef(_, ref tam) | + ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, tam.ty).map(Some), + + ty::TyAdt(def, _) => { + if def.is_box() { + return self.read_ptr(ptr, ty.boxed_ty()).map(Some); + } + use rustc::ty::layout::Layout::*; + if let CEnum { discr, signed, .. } = *self.type_layout(ty)? { + let size = discr.size().bytes(); + self.memory.read_primval(ptr, size, signed)? + } else { + return Ok(None); + } + } + + _ => return Ok(None), + }; + + Ok(Some(Value::ByVal(val))) + } + + pub fn frame(&self) -> &Frame<'tcx> { + self.stack.last().expect("no call frames exist") + } + + pub(super) fn frame_mut(&mut self) -> &mut Frame<'tcx> { + self.stack.last_mut().expect("no call frames exist") + } + + pub(super) fn mir(&self) -> &'tcx mir::Mir<'tcx> { + self.frame().mir + } + + pub(super) fn substs(&self) -> &'tcx Substs<'tcx> { + self.frame().instance.substs + } + + fn unsize_into_ptr( + &mut self, + src: Value, + src_ty: Ty<'tcx>, + dest: Lvalue, + dest_ty: Ty<'tcx>, + sty: Ty<'tcx>, + dty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + // A -> A conversion + let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty); + + match (&src_pointee_ty.sty, &dest_pointee_ty.sty) { + (&ty::TyArray(_, length), &ty::TySlice(_)) => { + let ptr = src.into_ptr(&self.memory)?; + // u64 cast is from usize to u64, which is always good + let valty = ValTy { + value: ptr.to_value_with_len(length.val.to_const_int().unwrap().to_u64().unwrap() ), + ty: dest_ty, + }; + self.write_value(valty, dest) + } + (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + let valty = ValTy { + value: src, + ty: dest_ty, + }; + self.write_value(valty, dest) + } + (_, &ty::TyDynamic(ref data, _)) => { + let trait_ref = data.principal().unwrap().with_self_ty( + self.tcx, + src_pointee_ty, + ); + let trait_ref = self.tcx.erase_regions(&trait_ref); + let vtable = self.get_vtable(src_pointee_ty, trait_ref)?; + let ptr = src.into_ptr(&self.memory)?; + let valty = ValTy { + value: ptr.to_value_with_vtable(vtable), + ty: dest_ty, + }; + self.write_value(valty, dest) + } + + _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty), + } + } + + fn unsize_into( + &mut self, + src: Value, + src_ty: Ty<'tcx>, + dest: Lvalue, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + match (&src_ty.sty, &dest_ty.sty) { + (&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) | + (&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) | + (&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => { + self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty) + } + (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => { + if def_a.is_box() || def_b.is_box() { + if !def_a.is_box() || !def_b.is_box() { + panic!("invalid unsizing between {:?} -> {:?}", src_ty, dest_ty); + } + return self.unsize_into_ptr( + src, + src_ty, + dest, + dest_ty, + src_ty.boxed_ty(), + dest_ty.boxed_ty(), + ); + } + if self.ty_to_primval_kind(src_ty).is_ok() { + // TODO: We ignore the packed flag here + let sty = self.get_field_ty(src_ty, 0)?.ty; + let dty = self.get_field_ty(dest_ty, 0)?.ty; + return self.unsize_into(src, sty, dest, dty); + } + // unsizing of generic struct with pointer fields + // Example: `Arc` -> `Arc` + // here we need to increase the size of every &T thin ptr field to a fat ptr + + assert_eq!(def_a, def_b); + + let src_fields = def_a.variants[0].fields.iter(); + let dst_fields = def_b.variants[0].fields.iter(); + + //let src = adt::MaybeSizedValue::sized(src); + //let dst = adt::MaybeSizedValue::sized(dst); + let src_ptr = match src { + Value::ByRef(PtrAndAlign { ptr, aligned: true }) => ptr, + // TODO: Is it possible for unaligned pointers to occur here? + _ => bug!("expected aligned pointer, got {:?}", src), + }; + + // FIXME(solson) + let dest = self.force_allocation(dest)?.to_ptr()?; + let iter = src_fields.zip(dst_fields).enumerate(); + for (i, (src_f, dst_f)) in iter { + let src_fty = self.field_ty(substs_a, src_f); + let dst_fty = self.field_ty(substs_b, dst_f); + if self.type_size(dst_fty)? == Some(0) { + continue; + } + let src_field_offset = self.get_field_offset(src_ty, i)?.bytes(); + let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes(); + let src_f_ptr = src_ptr.offset(src_field_offset, &self)?; + let dst_f_ptr = dest.offset(dst_field_offset, &self)?; + if src_fty == dst_fty { + self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?; + } else { + self.unsize_into( + Value::by_ref(src_f_ptr), + src_fty, + Lvalue::from_ptr(dst_f_ptr), + dst_fty, + )?; + } + } + Ok(()) + } + _ => { + bug!( + "unsize_into: invalid conversion: {:?} -> {:?}", + src_ty, + dest_ty + ) + } + } + } + + pub fn dump_local(&self, lvalue: Lvalue) { + // Debug output + match lvalue { + Lvalue::Local { frame, local } => { + let mut allocs = Vec::new(); + let mut msg = format!("{:?}", local); + if frame != self.cur_frame() { + write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap(); + } + write!(msg, ":").unwrap(); + + match self.stack[frame].get_local(local) { + Err(EvalError { kind: EvalErrorKind::DeadLocal, .. }) => { + write!(msg, " is dead").unwrap(); + } + Err(err) => { + panic!("Failed to access local: {:?}", err); + } + Ok(Value::ByRef(PtrAndAlign { ptr, aligned })) => { + match ptr.into_inner_primval() { + PrimVal::Ptr(ptr) => { + write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " }) + .unwrap(); + allocs.push(ptr.alloc_id); + } + ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(), + } + } + Ok(Value::ByVal(val)) => { + write!(msg, " {:?}", val).unwrap(); + if let PrimVal::Ptr(ptr) = val { + allocs.push(ptr.alloc_id); + } + } + Ok(Value::ByValPair(val1, val2)) => { + write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); + if let PrimVal::Ptr(ptr) = val1 { + allocs.push(ptr.alloc_id); + } + if let PrimVal::Ptr(ptr) = val2 { + allocs.push(ptr.alloc_id); + } + } + } + + trace!("{}", msg); + self.memory.dump_allocs(allocs); + } + Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned }, .. } => { + match ptr.into_inner_primval() { + PrimVal::Ptr(ptr) => { + trace!("by {}ref:", if aligned { "" } else { "unaligned " }); + self.memory.dump_alloc(ptr.alloc_id); + } + ptr => trace!(" integral by ref: {:?}", ptr), + } + } + } + } + + /// Convenience function to ensure correct usage of locals + pub fn modify_local(&mut self, frame: usize, local: mir::Local, f: F) -> EvalResult<'tcx> + where + F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, + { + let val = self.stack[frame].get_local(local)?; + let new_val = f(self, val)?; + self.stack[frame].set_local(local, new_val)?; + // FIXME(solson): Run this when setting to Undef? (See previous version of this code.) + // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) { + // self.memory.deallocate(ptr)?; + // } + Ok(()) + } + + pub fn report(&self, e: &mut EvalError) { + if let Some(ref mut backtrace) = e.backtrace { + let mut trace_text = "\n\nAn error occurred in miri:\n".to_string(); + let mut skip_init = true; + backtrace.resolve(); + 'frames: for (i, frame) in backtrace.frames().iter().enumerate() { + for symbol in frame.symbols() { + if let Some(name) = symbol.name() { + // unmangle the symbol via `to_string` + let name = name.to_string(); + if name.starts_with("miri::after_analysis") { + // don't report initialization gibberish + break 'frames; + } else if name.starts_with("backtrace::capture::Backtrace::new") + // debug mode produces funky symbol names + || name.starts_with("backtrace::capture::{{impl}}::new") + { + // don't report backtrace internals + skip_init = false; + continue 'frames; + } + } + } + if skip_init { + continue; + } + for symbol in frame.symbols() { + write!(trace_text, "{}: ", i).unwrap(); + if let Some(name) = symbol.name() { + write!(trace_text, "{}\n", name).unwrap(); + } else { + write!(trace_text, "\n").unwrap(); + } + write!(trace_text, "\tat ").unwrap(); + if let Some(file_path) = symbol.filename() { + write!(trace_text, "{}", file_path.display()).unwrap(); + } else { + write!(trace_text, "").unwrap(); + } + if let Some(line) = symbol.lineno() { + write!(trace_text, ":{}\n", line).unwrap(); + } else { + write!(trace_text, "\n").unwrap(); + } + } + } + error!("{}", trace_text); + } + if let Some(frame) = self.stack().last() { + let block = &frame.mir.basic_blocks()[frame.block]; + let span = if frame.stmt < block.statements.len() { + block.statements[frame.stmt].source_info.span + } else { + block.terminator().source_info.span + }; + let mut err = self.tcx.sess.struct_span_err(span, &e.to_string()); + for &Frame { instance, span, .. } in self.stack().iter().rev() { + if self.tcx.def_key(instance.def_id()).disambiguated_data.data == + DefPathData::ClosureExpr + { + err.span_note(span, "inside call to closure"); + continue; + } + err.span_note(span, &format!("inside call to {}", instance)); + } + err.emit(); + } else { + self.tcx.sess.err(&e.to_string()); + } + } +} + +impl<'tcx> Frame<'tcx> { + pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> { + // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0. + self.locals[local.index() - 1].ok_or(EvalErrorKind::DeadLocal.into()) + } + + fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> { + // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0. + match self.locals[local.index() - 1] { + None => err!(DeadLocal), + Some(ref mut local) => { + *local = value; + Ok(()) + } + } + } + + pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, Option> { + trace!("{:?} is now live", local); + + let old = self.locals[local.index() - 1]; + self.locals[local.index() - 1] = Some(Value::ByVal(PrimVal::Undef)); // StorageLive *always* kills the value that's currently stored + return Ok(old); + } + + /// Returns the old value of the local + pub fn storage_dead(&mut self, local: mir::Local) -> EvalResult<'tcx, Option> { + trace!("{:?} is now dead", local); + + let old = self.locals[local.index() - 1]; + self.locals[local.index() - 1] = None; + return Ok(old); + } +} + +// TODO(solson): Upstream these methods into rustc::ty::layout. + +pub(super) trait IntegerExt { + fn size(self) -> Size; +} + +impl IntegerExt for layout::Integer { + fn size(self) -> Size { + use rustc::ty::layout::Integer::*; + match self { + I1 | I8 => Size::from_bits(8), + I16 => Size::from_bits(16), + I32 => Size::from_bits(32), + I64 => Size::from_bits(64), + I128 => Size::from_bits(128), + } + } +} + +pub fn is_inhabited<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.uninhabited_from(&mut HashMap::default(), tcx).is_empty() +} + +/// FIXME: expose trans::monomorphize::resolve_closure +pub fn resolve_closure<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: ty::ClosureSubsts<'tcx>, + requested_kind: ty::ClosureKind, +) -> ty::Instance<'tcx> { + let actual_kind = tcx.closure_kind(def_id); + match needs_fn_once_adapter_shim(actual_kind, requested_kind) { + Ok(true) => fn_once_adapter_instance(tcx, def_id, substs), + _ => ty::Instance::new(def_id, substs.substs), + } +} + +fn fn_once_adapter_instance<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + closure_did: DefId, + substs: ty::ClosureSubsts<'tcx>, +) -> ty::Instance<'tcx> { + debug!("fn_once_adapter_shim({:?}, {:?})", closure_did, substs); + let fn_once = tcx.lang_items().fn_once_trait().unwrap(); + let call_once = tcx.associated_items(fn_once) + .find(|it| it.kind == ty::AssociatedKind::Method) + .unwrap() + .def_id; + let def = ty::InstanceDef::ClosureOnceShim { call_once }; + + let self_ty = tcx.mk_closure_from_closure_substs(closure_did, substs); + + let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs); + let sig = tcx.erase_late_bound_regions_and_normalize(&sig); + assert_eq!(sig.inputs().len(), 1); + let substs = tcx.mk_substs( + [Kind::from(self_ty), Kind::from(sig.inputs()[0])] + .iter() + .cloned(), + ); + + debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig); + ty::Instance { def, substs } +} + +fn needs_fn_once_adapter_shim( + actual_closure_kind: ty::ClosureKind, + trait_closure_kind: ty::ClosureKind, +) -> Result { + match (actual_closure_kind, trait_closure_kind) { + (ty::ClosureKind::Fn, ty::ClosureKind::Fn) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) | + (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => { + // No adapter needed. + Ok(false) + } + (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => { + // The closure fn `llfn` is a `fn(&self, ...)`. We want a + // `fn(&mut self, ...)`. In fact, at trans time, these are + // basically the same thing, so we can just return llfn. + Ok(false) + } + (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => { + // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut + // self, ...)`. We want a `fn(self, ...)`. We can produce + // this by doing something like: + // + // fn call_once(self, ...) { call_mut(&self, ...) } + // fn call_once(mut self, ...) { call_mut(&mut self, ...) } + // + // These are both the same at trans time. + Ok(true) + } + _ => Err(()), + } +} + +/// The point where linking happens. Resolve a (def_id, substs) +/// pair to an instance. +pub fn resolve<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> ty::Instance<'tcx> { + debug!("resolve(def_id={:?}, substs={:?})", def_id, substs); + let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) { + debug!(" => associated item, attempting to find impl"); + let item = tcx.associated_item(def_id); + resolve_associated_item(tcx, &item, trait_def_id, substs) + } else { + let item_type = def_ty(tcx, def_id, substs); + let def = match item_type.sty { + ty::TyFnDef(..) + if { + let f = item_type.fn_sig(tcx); + f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic + } => { + debug!(" => intrinsic"); + ty::InstanceDef::Intrinsic(def_id) + } + _ => { + if Some(def_id) == tcx.lang_items().drop_in_place_fn() { + let ty = substs.type_at(0); + if needs_drop_glue(tcx, ty) { + debug!(" => nontrivial drop glue"); + ty::InstanceDef::DropGlue(def_id, Some(ty)) + } else { + debug!(" => trivial drop glue"); + ty::InstanceDef::DropGlue(def_id, None) + } + } else { + debug!(" => free item"); + ty::InstanceDef::Item(def_id) + } + } + }; + ty::Instance { def, substs } + }; + debug!( + "resolve(def_id={:?}, substs={:?}) = {}", + def_id, + substs, + result + ); + result +} + +pub fn needs_drop_glue<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> bool { + assert!(t.is_normalized_for_trans()); + + let t = tcx.erase_regions(&t); + + // FIXME (#22815): note that type_needs_drop conservatively + // approximates in some cases and may say a type expression + // requires drop glue when it actually does not. + // + // (In this case it is not clear whether any harm is done, i.e. + // erroneously returning `true` in some cases where we could have + // returned `false` does not appear unsound. The impact on + // code quality is unknown at this time.) + + let env = ty::ParamEnv::empty(Reveal::All); + if !t.needs_drop(tcx, env) { + return false; + } + match t.sty { + ty::TyAdt(def, _) if def.is_box() => { + let typ = t.boxed_ty(); + if !typ.needs_drop(tcx, env) && type_is_sized(tcx, typ) { + let layout = t.layout(tcx, ty::ParamEnv::empty(Reveal::All)).unwrap(); + // `Box` does not allocate. + layout.size(&tcx.data_layout).bytes() != 0 + } else { + true + } + } + _ => true, + } +} + +fn resolve_associated_item<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + trait_item: &ty::AssociatedItem, + trait_id: DefId, + rcvr_substs: &'tcx Substs<'tcx>, +) -> ty::Instance<'tcx> { + let def_id = trait_item.def_id; + debug!( + "resolve_associated_item(trait_item={:?}, \ + trait_id={:?}, \ + rcvr_substs={:?})", + def_id, + trait_id, + rcvr_substs + ); + + let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs); + let vtbl = tcx.trans_fulfill_obligation(DUMMY_SP, ty::Binder(trait_ref)); + + // Now that we know which impl is being used, we can dispatch to + // the actual function: + match vtbl { + ::rustc::traits::VtableImpl(impl_data) => { + let (def_id, substs) = + ::rustc::traits::find_associated_item(tcx, trait_item, rcvr_substs, &impl_data); + let substs = tcx.erase_regions(&substs); + ty::Instance::new(def_id, substs) + } + ::rustc::traits::VtableGenerator(closure_data) => { + ty::Instance { + def: ty::InstanceDef::Item(closure_data.closure_def_id), + substs: closure_data.substs.substs + } + } + ::rustc::traits::VtableClosure(closure_data) => { + let trait_closure_kind = tcx.lang_items().fn_trait_kind(trait_id).unwrap(); + resolve_closure( + tcx, + closure_data.closure_def_id, + closure_data.substs, + trait_closure_kind, + ) + } + ::rustc::traits::VtableFnPointer(ref data) => { + ty::Instance { + def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty), + substs: rcvr_substs, + } + } + ::rustc::traits::VtableObject(ref data) => { + let index = tcx.get_vtable_index_of_object_method(data, def_id); + ty::Instance { + def: ty::InstanceDef::Virtual(def_id, index), + substs: rcvr_substs, + } + } + ::rustc::traits::VtableBuiltin(..) if Some(trait_id) == tcx.lang_items().clone_trait() => { + ty::Instance { + def: ty::InstanceDef::CloneShim(def_id, trait_ref.self_ty()), + substs: rcvr_substs + } + } + _ => bug!("static call to invalid vtable: {:?}", vtbl), + } +} + +pub fn def_ty<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> Ty<'tcx> { + let ty = tcx.type_of(def_id); + apply_param_substs(tcx, substs, &ty) +} + +/// Monomorphizes a type from the AST by first applying the in-scope +/// substitutions and then normalizing any associated types. +pub fn apply_param_substs<'a, 'tcx, T>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_substs: &Substs<'tcx>, + value: &T, +) -> T +where + T: ::rustc::infer::TransNormalize<'tcx>, +{ + debug!( + "apply_param_substs(param_substs={:?}, value={:?})", + param_substs, + value + ); + let substituted = value.subst(tcx, param_substs); + let substituted = tcx.erase_regions(&substituted); + AssociatedTypeNormalizer { tcx }.fold(&substituted) +} + + +struct AssociatedTypeNormalizer<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, +} + +impl<'a, 'tcx> AssociatedTypeNormalizer<'a, 'tcx> { + fn fold>(&mut self, value: &T) -> T { + if !value.has_projections() { + value.clone() + } else { + value.fold_with(self) + } + } +} + +impl<'a, 'tcx> ::rustc::ty::fold::TypeFolder<'tcx, 'tcx> for AssociatedTypeNormalizer<'a, 'tcx> { + fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> { + self.tcx + } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + if !ty.has_projections() { + ty + } else { + self.tcx.normalize_associated_type(&ty) + } + } +} + +fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + // generics are weird, don't run this function on a generic + assert!(!ty.needs_subst()); + ty.is_sized(tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP) +} + +pub fn resolve_drop_in_place<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, +) -> ty::Instance<'tcx> { + let def_id = tcx.require_lang_item(::rustc::middle::lang_items::DropInPlaceFnLangItem); + let substs = tcx.intern_substs(&[Kind::from(ty)]); + resolve(tcx, def_id, substs) +} diff --git a/src/tools/miri/src/librustc_mir/interpret/lvalue.rs b/src/tools/miri/src/librustc_mir/interpret/lvalue.rs new file mode 100644 index 0000000000..7fb6ac4209 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/lvalue.rs @@ -0,0 +1,506 @@ +use rustc::mir; +use rustc::ty::layout::{Size, Align}; +use rustc::ty::{self, Ty}; +use rustc_data_structures::indexed_vec::Idx; + +use super::{EvalResult, EvalContext, MemoryPointer, PrimVal, Value, Pointer, Machine, PtrAndAlign, ValTy}; + +#[derive(Copy, Clone, Debug)] +pub enum Lvalue { + /// An lvalue referring to a value allocated in the `Memory` system. + Ptr { + /// An lvalue may have an invalid (integral or undef) pointer, + /// since it might be turned back into a reference + /// before ever being dereferenced. + ptr: PtrAndAlign, + extra: LvalueExtra, + }, + + /// An lvalue referring to a value on the stack. Represented by a stack frame index paired with + /// a Mir local index. + Local { frame: usize, local: mir::Local }, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum LvalueExtra { + None, + Length(u64), + Vtable(MemoryPointer), + DowncastVariant(usize), +} + +/// Uniquely identifies a specific constant or static. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub struct GlobalId<'tcx> { + /// For a constant or static, the `Instance` of the item itself. + /// For a promoted global, the `Instance` of the function they belong to. + pub instance: ty::Instance<'tcx>, + + /// The index for promoted globals within their function's `Mir`. + pub promoted: Option, +} + +impl<'tcx> Lvalue { + /// Produces an Lvalue that will error if attempted to be read from + pub fn undef() -> Self { + Self::from_primval_ptr(PrimVal::Undef.into()) + } + + pub fn from_primval_ptr(ptr: Pointer) -> Self { + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::None, + } + } + + pub fn from_ptr(ptr: MemoryPointer) -> Self { + Self::from_primval_ptr(ptr.into()) + } + + pub(super) fn to_ptr_extra_aligned(self) -> (PtrAndAlign, LvalueExtra) { + match self { + Lvalue::Ptr { ptr, extra } => (ptr, extra), + _ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self), + + } + } + + pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + let (ptr, extra) = self.to_ptr_extra_aligned(); + // At this point, we forget about the alignment information -- the lvalue has been turned into a reference, + // and no matter where it came from, it now must be aligned. + assert_eq!(extra, LvalueExtra::None); + ptr.to_ptr() + } + + pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) { + match ty.sty { + ty::TyArray(elem, n) => (elem, n.val.to_const_int().unwrap().to_u64().unwrap() as u64), + + ty::TySlice(elem) => { + match self { + Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => (elem, len), + _ => { + bug!( + "elem_ty_and_len of a TySlice given non-slice lvalue: {:?}", + self + ) + } + } + } + + _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty), + } + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + /// Reads a value from the lvalue without going through the intermediate step of obtaining + /// a `miri::Lvalue` + pub fn try_read_lvalue( + &mut self, + lvalue: &mir::Lvalue<'tcx>, + ) -> EvalResult<'tcx, Option> { + use rustc::mir::Lvalue::*; + match *lvalue { + // Might allow this in the future, right now there's no way to do this from Rust code anyway + Local(mir::RETURN_POINTER) => err!(ReadFromReturnPointer), + // Directly reading a local will always succeed + Local(local) => self.frame().get_local(local).map(Some), + // Directly reading a static will always succeed + Static(ref static_) => { + let instance = ty::Instance::mono(self.tcx, static_.def_id); + let cid = GlobalId { + instance, + promoted: None, + }; + Ok(Some(Value::ByRef( + *self.globals.get(&cid).expect("global not cached"), + ))) + } + Projection(ref proj) => self.try_read_lvalue_projection(proj), + } + } + + fn try_read_lvalue_projection( + &mut self, + proj: &mir::LvalueProjection<'tcx>, + ) -> EvalResult<'tcx, Option> { + use rustc::mir::ProjectionElem::*; + let base = match self.try_read_lvalue(&proj.base)? { + Some(base) => base, + None => return Ok(None), + }; + let base_ty = self.lvalue_ty(&proj.base); + match proj.elem { + Field(field, _) => match (field.index(), base) { + // the only field of a struct + (0, Value::ByVal(val)) => Ok(Some(Value::ByVal(val))), + // split fat pointers, 2 element tuples, ... + (0...1, Value::ByValPair(a, b)) if self.get_field_count(base_ty)? == 2 => { + let val = [a, b][field.index()]; + Ok(Some(Value::ByVal(val))) + }, + // the only field of a struct is a fat pointer + (0, Value::ByValPair(..)) => Ok(Some(base)), + _ => Ok(None), + }, + // The NullablePointer cases should work fine, need to take care for normal enums + Downcast(..) | + Subslice { .. } | + // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized + ConstantIndex { .. } | Index(_) | + // No way to optimize this projection any better than the normal lvalue path + Deref => Ok(None), + } + } + + /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses. + pub(super) fn eval_and_read_lvalue( + &mut self, + lvalue: &mir::Lvalue<'tcx>, + ) -> EvalResult<'tcx, Value> { + // Shortcut for things like accessing a fat pointer's field, + // which would otherwise (in the `eval_lvalue` path) require moving a `ByValPair` to memory + // and returning an `Lvalue::Ptr` to it + if let Some(val) = self.try_read_lvalue(lvalue)? { + return Ok(val); + } + let lvalue = self.eval_lvalue(lvalue)?; + self.read_lvalue(lvalue) + } + + pub fn read_lvalue(&self, lvalue: Lvalue) -> EvalResult<'tcx, Value> { + match lvalue { + Lvalue::Ptr { ptr, extra } => { + assert_eq!(extra, LvalueExtra::None); + Ok(Value::ByRef(ptr)) + } + Lvalue::Local { frame, local } => self.stack[frame].get_local(local), + } + } + + pub fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue> { + use rustc::mir::Lvalue::*; + let lvalue = match *mir_lvalue { + Local(mir::RETURN_POINTER) => self.frame().return_lvalue, + Local(local) => Lvalue::Local { + frame: self.cur_frame(), + local, + }, + + Static(ref static_) => { + let instance = ty::Instance::mono(self.tcx, static_.def_id); + let gid = GlobalId { + instance, + promoted: None, + }; + Lvalue::Ptr { + ptr: *self.globals.get(&gid).expect("uncached global"), + extra: LvalueExtra::None, + } + } + + Projection(ref proj) => { + let ty = self.lvalue_ty(&proj.base); + let lvalue = self.eval_lvalue(&proj.base)?; + return self.eval_lvalue_projection(lvalue, ty, &proj.elem); + } + }; + + if log_enabled!(::log::LogLevel::Trace) { + self.dump_local(lvalue); + } + + Ok(lvalue) + } + + pub fn lvalue_field( + &mut self, + base: Lvalue, + field: mir::Field, + base_ty: Ty<'tcx>, + field_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Lvalue> { + use rustc::ty::layout::Layout::*; + + let base_layout = self.type_layout(base_ty)?; + let field_index = field.index(); + let (offset, packed) = match *base_layout { + Univariant { ref variant, .. } => (variant.offsets[field_index], variant.packed), + + // mir optimizations treat single variant enums as structs + General { ref variants, .. } if variants.len() == 1 => { + (variants[0].offsets[field_index], variants[0].packed) + } + + General { ref variants, .. } => { + let (_, base_extra) = base.to_ptr_extra_aligned(); + if let LvalueExtra::DowncastVariant(variant_idx) = base_extra { + // +1 for the discriminant, which is field 0 + assert!(!variants[variant_idx].packed); + (variants[variant_idx].offsets[field_index + 1], false) + } else { + bug!("field access on enum had no variant index"); + } + } + + RawNullablePointer { .. } => { + assert_eq!(field_index, 0); + return Ok(base); + } + + StructWrappedNullablePointer { ref nonnull, .. } => { + (nonnull.offsets[field_index], nonnull.packed) + } + + UntaggedUnion { .. } => return Ok(base), + + Vector { element, count } => { + let field = field_index as u64; + assert!(field < count); + let elem_size = element.size(&self.tcx.data_layout).bytes(); + (Size::from_bytes(field * elem_size), false) + } + + // We treat arrays + fixed sized indexing like field accesses + Array { .. } => { + let field = field_index as u64; + let elem_size = match base_ty.sty { + ty::TyArray(elem_ty, n) => { + assert!(field < n.val.to_const_int().unwrap().to_u64().unwrap() as u64); + self.type_size(elem_ty)?.expect("array elements are sized") as u64 + } + _ => { + bug!( + "lvalue_field: got Array layout but non-array type {:?}", + base_ty + ) + } + }; + (Size::from_bytes(field * elem_size), false) + } + + FatPointer { .. } => { + let bytes = field_index as u64 * self.memory.pointer_size(); + let offset = Size::from_bytes(bytes); + (offset, false) + } + + _ => bug!("field access on non-product type: {:?}", base_layout), + }; + + // Do not allocate in trivial cases + let (base_ptr, base_extra) = match base { + Lvalue::Ptr { ptr, extra } => (ptr, extra), + Lvalue::Local { frame, local } => { + match self.stack[frame].get_local(local)? { + // in case the type has a single field, just return the value + Value::ByVal(_) + if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or( + false, + ) => { + assert_eq!( + offset.bytes(), + 0, + "ByVal can only have 1 non zst field with offset 0" + ); + return Ok(base); + } + Value::ByRef { .. } | + Value::ByValPair(..) | + Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(), + } + } + }; + + let offset = match base_extra { + LvalueExtra::Vtable(tab) => { + let (_, align) = self.size_and_align_of_dst( + base_ty, + base_ptr.ptr.to_value_with_vtable(tab), + )?; + offset + .abi_align(Align::from_bytes(align, align).unwrap()) + .bytes() + } + _ => offset.bytes(), + }; + + let mut ptr = base_ptr.offset(offset, &self)?; + // if we were unaligned, stay unaligned + // no matter what we were, if we are packed, we must not be aligned anymore + ptr.aligned &= !packed; + + let field_ty = self.monomorphize(field_ty, self.substs()); + + let extra = if self.type_is_sized(field_ty) { + LvalueExtra::None + } else { + match base_extra { + LvalueExtra::None => bug!("expected fat pointer"), + LvalueExtra::DowncastVariant(..) => { + bug!("Rust doesn't support unsized fields in enum variants") + } + LvalueExtra::Vtable(_) | + LvalueExtra::Length(_) => {} + } + base_extra + }; + + Ok(Lvalue::Ptr { ptr, extra }) + } + + pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue> { + Ok(match self.tcx.struct_tail(ty).sty { + ty::TyDynamic(..) => { + let (ptr, vtable) = val.into_ptr_vtable_pair(&self.memory)?; + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::Vtable(vtable), + } + } + ty::TyStr | ty::TySlice(_) => { + let (ptr, len) = val.into_slice(&self.memory)?; + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::Length(len), + } + } + _ => Lvalue::from_primval_ptr(val.into_ptr(&self.memory)?), + }) + } + + pub(super) fn lvalue_index( + &mut self, + base: Lvalue, + outer_ty: Ty<'tcx>, + n: u64, + ) -> EvalResult<'tcx, Lvalue> { + // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length. + let base = self.force_allocation(base)?; + let (base_ptr, _) = base.to_ptr_extra_aligned(); + + let (elem_ty, len) = base.elem_ty_and_len(outer_ty); + let elem_size = self.type_size(elem_ty)?.expect( + "slice element must be sized", + ); + assert!( + n < len, + "Tried to access element {} of array/slice with length {}", + n, + len + ); + let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?; + Ok(Lvalue::Ptr { + ptr, + extra: LvalueExtra::None, + }) + } + + pub(super) fn eval_lvalue_projection( + &mut self, + base: Lvalue, + base_ty: Ty<'tcx>, + proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>, + ) -> EvalResult<'tcx, Lvalue> { + use rustc::mir::ProjectionElem::*; + let (ptr, extra) = match *proj_elem { + Field(field, field_ty) => { + return self.lvalue_field(base, field, base_ty, field_ty); + } + + Downcast(_, variant) => { + let base_layout = self.type_layout(base_ty)?; + // FIXME(solson) + let base = self.force_allocation(base)?; + let (base_ptr, base_extra) = base.to_ptr_extra_aligned(); + + use rustc::ty::layout::Layout::*; + let extra = match *base_layout { + General { .. } => LvalueExtra::DowncastVariant(variant), + RawNullablePointer { .. } | + StructWrappedNullablePointer { .. } => base_extra, + _ => bug!("variant downcast on non-aggregate: {:?}", base_layout), + }; + (base_ptr, extra) + } + + Deref => { + let val = self.read_lvalue(base)?; + + let pointee_type = match base_ty.sty { + ty::TyRawPtr(ref tam) | + ty::TyRef(_, ref tam) => tam.ty, + ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(), + _ => bug!("can only deref pointer types"), + }; + + trace!("deref to {} on {:?}", pointee_type, val); + + return self.val_to_lvalue(val, pointee_type); + } + + Index(local) => { + let value = self.frame().get_local(local)?; + let ty = self.tcx.types.usize; + let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?; + return self.lvalue_index(base, base_ty, n); + } + + ConstantIndex { + offset, + min_length, + from_end, + } => { + // FIXME(solson) + let base = self.force_allocation(base)?; + let (base_ptr, _) = base.to_ptr_extra_aligned(); + + let (elem_ty, n) = base.elem_ty_and_len(base_ty); + let elem_size = self.type_size(elem_ty)?.expect( + "sequence element must be sized", + ); + assert!(n >= min_length as u64); + + let index = if from_end { + n - u64::from(offset) + } else { + u64::from(offset) + }; + + let ptr = base_ptr.offset(index * elem_size, &self)?; + (ptr, LvalueExtra::None) + } + + Subslice { from, to } => { + // FIXME(solson) + let base = self.force_allocation(base)?; + let (base_ptr, _) = base.to_ptr_extra_aligned(); + + let (elem_ty, n) = base.elem_ty_and_len(base_ty); + let elem_size = self.type_size(elem_ty)?.expect( + "slice element must be sized", + ); + assert!(u64::from(from) <= n - u64::from(to)); + let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?; + // sublicing arrays produces arrays + let extra = if self.type_is_sized(base_ty) { + LvalueExtra::None + } else { + LvalueExtra::Length(n - u64::from(to) - u64::from(from)) + }; + (ptr, extra) + } + }; + + Ok(Lvalue::Ptr { ptr, extra }) + } + + pub(super) fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { + self.monomorphize( + lvalue.ty(self.mir(), self.tcx).to_ty(self.tcx), + self.substs(), + ) + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/machine.rs b/src/tools/miri/src/librustc_mir/interpret/machine.rs new file mode 100644 index 0000000000..debb17fc0a --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/machine.rs @@ -0,0 +1,81 @@ +//! This module contains everything needed to instantiate an interpreter. +//! This separation exists to ensure that no fancy miri features like +//! interpreting common C functions leak into CTFE. + +use super::{EvalResult, EvalContext, Lvalue, PrimVal, ValTy}; + +use rustc::{mir, ty}; +use syntax::codemap::Span; +use syntax::ast::Mutability; + +/// Methods of this trait signifies a point where CTFE evaluation would fail +/// and some use case dependent behaviour can instead be applied +pub trait Machine<'tcx>: Sized { + /// Additional data that can be accessed via the EvalContext + type Data; + + /// Additional data that can be accessed via the Memory + type MemoryData; + + /// Additional memory kinds a machine wishes to distinguish from the builtin ones + type MemoryKinds: ::std::fmt::Debug + PartialEq + Copy + Clone; + + /// Entry point to all function calls. + /// + /// Returns Ok(true) when the function was handled completely + /// e.g. due to missing mir + /// + /// Returns Ok(false) if a new stack frame was pushed + fn eval_fn_call<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue, mir::BasicBlock)>, + args: &[ValTy<'tcx>], + span: Span, + sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool>; + + /// directly process an intrinsic without pushing a stack frame. + fn call_intrinsic<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + args: &[ValTy<'tcx>], + dest: Lvalue, + dest_ty: ty::Ty<'tcx>, + dest_layout: &'tcx ty::layout::Layout, + target: mir::BasicBlock, + ) -> EvalResult<'tcx>; + + /// Called for all binary operations except on float types. + /// + /// Returns `None` if the operation should be handled by the integer + /// op code in order to share more code between machines + /// + /// Returns a (value, overflowed) pair if the operation succeeded + fn try_ptr_op<'a>( + ecx: &EvalContext<'a, 'tcx, Self>, + bin_op: mir::BinOp, + left: PrimVal, + left_ty: ty::Ty<'tcx>, + right: PrimVal, + right_ty: ty::Ty<'tcx>, + ) -> EvalResult<'tcx, Option<(PrimVal, bool)>>; + + /// Called when trying to mark machine defined `MemoryKinds` as static + fn mark_static_initialized(m: Self::MemoryKinds) -> EvalResult<'tcx>; + + /// Heap allocations via the `box` keyword + /// + /// Returns a pointer to the allocated memory + fn box_alloc<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + ty: ty::Ty<'tcx>, + ) -> EvalResult<'tcx, PrimVal>; + + /// Called when trying to access a global declared with a `linkage` attribute + fn global_item_with_linkage<'a>( + ecx: &mut EvalContext<'a, 'tcx, Self>, + instance: ty::Instance<'tcx>, + mutability: Mutability, + ) -> EvalResult<'tcx>; +} diff --git a/src/tools/miri/src/librustc_mir/interpret/memory.rs b/src/tools/miri/src/librustc_mir/interpret/memory.rs new file mode 100644 index 0000000000..bde79294ad --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/memory.rs @@ -0,0 +1,1700 @@ +use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian}; +use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque}; +use std::{fmt, iter, ptr, mem, io}; +use std::cell::Cell; + +use rustc::ty::Instance; +use rustc::ty::layout::{self, TargetDataLayout, HasDataLayout}; +use syntax::ast::Mutability; +use rustc::middle::region; + +use super::{EvalResult, EvalErrorKind, PrimVal, Pointer, EvalContext, DynamicLifetime, Machine, + RangeMap, AbsLvalue}; + +//////////////////////////////////////////////////////////////////////////////// +// Locks +//////////////////////////////////////////////////////////////////////////////// + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum AccessKind { + Read, + Write, +} + +/// Information about a lock that is currently held. +#[derive(Clone, Debug)] +struct LockInfo<'tcx> { + /// Stores for which lifetimes (of the original write lock) we got + /// which suspensions. + suspended: HashMap, Vec>, + /// The current state of the lock that's actually effective. + active: Lock, +} + +/// Write locks are identified by a stack frame and an "abstract" (untyped) lvalue. +/// It may be tempting to use the lifetime as identifier, but that does not work +/// for two reasons: +/// * First of all, due to subtyping, the same lock may be referred to with different +/// lifetimes. +/// * Secondly, different write locks may actually have the same lifetime. See `test2` +/// in `run-pass/many_shr_bor.rs`. +/// The Id is "captured" when the lock is first suspended; at that point, the borrow checker +/// considers the path frozen and hence the Id remains stable. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct WriteLockId<'tcx> { + frame: usize, + path: AbsLvalue<'tcx>, +} + +#[derive(Clone, Debug, PartialEq)] +pub enum Lock { + NoLock, + WriteLock(DynamicLifetime), + ReadLock(Vec), // This should never be empty -- that would be a read lock held and nobody there to release it... +} +use self::Lock::*; + +impl<'tcx> Default for LockInfo<'tcx> { + fn default() -> Self { + LockInfo::new(NoLock) + } +} + +impl<'tcx> LockInfo<'tcx> { + fn new(lock: Lock) -> LockInfo<'tcx> { + LockInfo { + suspended: HashMap::new(), + active: lock, + } + } + + fn access_permitted(&self, frame: Option, access: AccessKind) -> bool { + use self::AccessKind::*; + match (&self.active, access) { + (&NoLock, _) => true, + (&ReadLock(ref lfts), Read) => { + assert!(!lfts.is_empty(), "Someone left an empty read lock behind."); + // Read access to read-locked region is okay, no matter who's holding the read lock. + true + } + (&WriteLock(ref lft), _) => { + // All access is okay if we are the ones holding it + Some(lft.frame) == frame + } + _ => false, // Nothing else is okay. + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Allocations and pointers +//////////////////////////////////////////////////////////////////////////////// + +#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct AllocId(u64); + +#[derive(Debug)] +pub enum AllocIdKind { + /// We can't ever have more than `usize::max_value` functions at the same time + /// since we never "deallocate" functions + Function(usize), + /// Locals and heap allocations (also statics for now, but those will get their + /// own variant soonish). + Runtime(u64), +} + +impl AllocIdKind { + pub fn into_alloc_id(self) -> AllocId { + match self { + AllocIdKind::Function(n) => AllocId(n as u64), + AllocIdKind::Runtime(n) => AllocId((1 << 63) | n), + } + } +} + +impl AllocId { + /// Currently yields the top bit to discriminate the `AllocIdKind`s + fn discriminant(self) -> u64 { + self.0 >> 63 + } + /// Yields everything but the discriminant bits + pub fn index(self) -> u64 { + self.0 & ((1 << 63) - 1) + } + pub fn into_alloc_id_kind(self) -> AllocIdKind { + match self.discriminant() { + 0 => AllocIdKind::Function(self.index() as usize), + 1 => AllocIdKind::Runtime(self.index()), + n => bug!("got discriminant {} for AllocId", n), + } + } +} + +impl fmt::Display for AllocId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.into_alloc_id_kind()) + } +} + +impl fmt::Debug for AllocId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.into_alloc_id_kind()) + } +} + +#[derive(Debug)] +pub struct Allocation<'tcx, M> { + /// The actual bytes of the allocation. + /// Note that the bytes of a pointer represent the offset of the pointer + pub bytes: Vec, + /// Maps from byte addresses to allocations. + /// Only the first byte of a pointer is inserted into the map. + pub relocations: BTreeMap, + /// Denotes undefined memory. Reading from undefined memory is forbidden in miri + pub undef_mask: UndefMask, + /// The alignment of the allocation to detect unaligned reads. + pub align: u64, + /// Whether the allocation may be modified. + pub mutable: Mutability, + /// Use the `mark_static_initalized` method of `Memory` to ensure that an error occurs, if the memory of this + /// allocation is modified or deallocated in the future. + /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate` + pub kind: MemoryKind, + /// Memory regions that are locked by some function + locks: RangeMap>, +} + +impl<'tcx, M> Allocation<'tcx, M> { + fn check_locks( + &self, + frame: Option, + offset: u64, + len: u64, + access: AccessKind, + ) -> Result<(), LockInfo<'tcx>> { + if len == 0 { + return Ok(()); + } + for lock in self.locks.iter(offset, len) { + // Check if the lock is in conflict with the access. + if !lock.access_permitted(frame, access) { + return Err(lock.clone()); + } + } + Ok(()) + } +} + +#[derive(Debug, PartialEq, Copy, Clone)] +pub enum MemoryKind { + /// Error if deallocated except during a stack pop + Stack, + /// Static in the process of being initialized. + /// The difference is important: An immutable static referring to a + /// mutable initialized static will freeze immutably and would not + /// be able to distinguish already initialized statics from uninitialized ones + UninitializedStatic, + /// May never be deallocated + Static, + /// Additional memory kinds a machine wishes to distinguish from the builtin ones + Machine(T), +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct MemoryPointer { + pub alloc_id: AllocId, + pub offset: u64, +} + +impl<'tcx> MemoryPointer { + pub fn new(alloc_id: AllocId, offset: u64) -> Self { + MemoryPointer { alloc_id, offset } + } + + pub(crate) fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { + MemoryPointer::new( + self.alloc_id, + cx.data_layout().wrapping_signed_offset(self.offset, i), + ) + } + + pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i); + (MemoryPointer::new(self.alloc_id, res), over) + } + + pub(crate) fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + Ok(MemoryPointer::new( + self.alloc_id, + cx.data_layout().signed_offset(self.offset, i)?, + )) + } + + pub fn overflowing_offset(self, i: u64, cx: C) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_offset(self.offset, i); + (MemoryPointer::new(self.alloc_id, res), over) + } + + pub fn offset(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { + Ok(MemoryPointer::new( + self.alloc_id, + cx.data_layout().offset(self.offset, i)?, + )) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Top-level interpreter memory +//////////////////////////////////////////////////////////////////////////////// + +pub struct Memory<'a, 'tcx, M: Machine<'tcx>> { + /// Additional data required by the Machine + pub data: M::MemoryData, + + /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations). + alloc_map: HashMap>, + + /// The AllocId to assign to the next new regular allocation. Always incremented, never gets smaller. + next_alloc_id: u64, + + /// Number of virtual bytes allocated. + memory_usage: u64, + + /// Maximum number of virtual bytes that may be allocated. + memory_size: u64, + + /// Function "allocations". They exist solely so pointers have something to point to, and + /// we can figure out what they point to. + functions: Vec>, + + /// Inverse map of `functions` so we don't allocate a new pointer every time we need one + function_alloc_cache: HashMap, AllocId>, + + /// Target machine data layout to emulate. + pub layout: &'a TargetDataLayout, + + /// A cache for basic byte allocations keyed by their contents. This is used to deduplicate + /// allocations for string and bytestring literals. + literal_alloc_cache: HashMap, AllocId>, + + /// To avoid having to pass flags to every single memory access, we have some global state saying whether + /// alignment checking is currently enforced for read and/or write accesses. + reads_are_aligned: Cell, + writes_are_aligned: Cell, + + /// The current stack frame. Used to check accesses against locks. + pub(super) cur_frame: usize, +} + +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + pub fn new(layout: &'a TargetDataLayout, max_memory: u64, data: M::MemoryData) -> Self { + Memory { + data, + alloc_map: HashMap::new(), + functions: Vec::new(), + function_alloc_cache: HashMap::new(), + next_alloc_id: 0, + layout, + memory_size: max_memory, + memory_usage: 0, + literal_alloc_cache: HashMap::new(), + reads_are_aligned: Cell::new(true), + writes_are_aligned: Cell::new(true), + cur_frame: usize::max_value(), + } + } + + pub fn allocations<'x>( + &'x self, + ) -> impl Iterator)> { + self.alloc_map.iter().map(|(&id, alloc)| { + (AllocIdKind::Runtime(id).into_alloc_id(), alloc) + }) + } + + pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> MemoryPointer { + if let Some(&alloc_id) = self.function_alloc_cache.get(&instance) { + return MemoryPointer::new(alloc_id, 0); + } + let id = self.functions.len(); + debug!("creating fn ptr: {}", id); + self.functions.push(instance); + let alloc_id = AllocIdKind::Function(id).into_alloc_id(); + self.function_alloc_cache.insert(instance, alloc_id); + MemoryPointer::new(alloc_id, 0) + } + + pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointer> { + if let Some(&alloc_id) = self.literal_alloc_cache.get(bytes) { + return Ok(MemoryPointer::new(alloc_id, 0)); + } + + let ptr = self.allocate( + bytes.len() as u64, + 1, + MemoryKind::UninitializedStatic, + )?; + self.write_bytes(ptr.into(), bytes)?; + self.mark_static_initalized( + ptr.alloc_id, + Mutability::Immutable, + )?; + self.literal_alloc_cache.insert( + bytes.to_vec(), + ptr.alloc_id, + ); + Ok(ptr) + } + + pub fn allocate( + &mut self, + size: u64, + align: u64, + kind: MemoryKind, + ) -> EvalResult<'tcx, MemoryPointer> { + assert_ne!(align, 0); + assert!(align.is_power_of_two()); + + if self.memory_size - self.memory_usage < size { + return err!(OutOfMemory { + allocation_size: size, + memory_size: self.memory_size, + memory_usage: self.memory_usage, + }); + } + self.memory_usage += size; + assert_eq!(size as usize as u64, size); + let alloc = Allocation { + bytes: vec![0; size as usize], + relocations: BTreeMap::new(), + undef_mask: UndefMask::new(size), + align, + kind, + mutable: Mutability::Mutable, + locks: RangeMap::new(), + }; + let id = self.next_alloc_id; + self.next_alloc_id += 1; + self.alloc_map.insert(id, alloc); + Ok(MemoryPointer::new( + AllocIdKind::Runtime(id).into_alloc_id(), + 0, + )) + } + + pub fn reallocate( + &mut self, + ptr: MemoryPointer, + old_size: u64, + old_align: u64, + new_size: u64, + new_align: u64, + kind: MemoryKind, + ) -> EvalResult<'tcx, MemoryPointer> { + use std::cmp::min; + + if ptr.offset != 0 { + return err!(ReallocateNonBasePtr); + } + if let Ok(alloc) = self.get(ptr.alloc_id) { + if alloc.kind != kind { + return err!(ReallocatedWrongMemoryKind( + format!("{:?}", alloc.kind), + format!("{:?}", kind), + )); + } + } + + // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc" + let new_ptr = self.allocate(new_size, new_align, kind)?; + self.copy( + ptr.into(), + new_ptr.into(), + min(old_size, new_size), + min(old_align, new_align), + /*nonoverlapping*/ + true, + )?; + self.deallocate(ptr, Some((old_size, old_align)), kind)?; + + Ok(new_ptr) + } + + pub fn deallocate( + &mut self, + ptr: MemoryPointer, + size_and_align: Option<(u64, u64)>, + kind: MemoryKind, + ) -> EvalResult<'tcx> { + if ptr.offset != 0 { + return err!(DeallocateNonBasePtr); + } + + let alloc_id = match ptr.alloc_id.into_alloc_id_kind() { + AllocIdKind::Function(_) => { + return err!(DeallocatedWrongMemoryKind( + "function".to_string(), + format!("{:?}", kind), + )) + } + AllocIdKind::Runtime(id) => id, + }; + + let alloc = match self.alloc_map.remove(&alloc_id) { + Some(alloc) => alloc, + None => return err!(DoubleFree), + }; + + // It is okay for us to still holds locks on deallocation -- for example, we could store data we own + // in a local, and the local could be deallocated (from StorageDead) before the function returns. + // However, we should check *something*. For now, we make sure that there is no conflicting write + // lock by another frame. We *have* to permit deallocation if we hold a read lock. + // TODO: Figure out the exact rules here. + alloc + .check_locks( + Some(self.cur_frame), + 0, + alloc.bytes.len() as u64, + AccessKind::Read, + ) + .map_err(|lock| { + EvalErrorKind::DeallocatedLockedMemory { + ptr, + lock: lock.active, + } + })?; + + if alloc.kind != kind { + return err!(DeallocatedWrongMemoryKind( + format!("{:?}", alloc.kind), + format!("{:?}", kind), + )); + } + if let Some((size, align)) = size_and_align { + if size != alloc.bytes.len() as u64 || align != alloc.align { + return err!(IncorrectAllocationInformation); + } + } + + self.memory_usage -= alloc.bytes.len() as u64; + debug!("deallocated : {}", ptr.alloc_id); + + Ok(()) + } + + pub fn pointer_size(&self) -> u64 { + self.layout.pointer_size.bytes() + } + + pub fn endianess(&self) -> layout::Endian { + self.layout.endian + } + + /// Check that the pointer is aligned AND non-NULL. + pub fn check_align(&self, ptr: Pointer, align: u64, access: Option) -> EvalResult<'tcx> { + // Check non-NULL/Undef, extract offset + let (offset, alloc_align) = match ptr.into_inner_primval() { + PrimVal::Ptr(ptr) => { + let alloc = self.get(ptr.alloc_id)?; + (ptr.offset, alloc.align) + } + PrimVal::Bytes(bytes) => { + let v = ((bytes as u128) % (1 << self.pointer_size())) as u64; + if v == 0 { + return err!(InvalidNullPointerUsage); + } + (v, align) // the base address if the "integer allocation" is 0 and hence always aligned + } + PrimVal::Undef => return err!(ReadUndefBytes), + }; + // See if alignment checking is disabled + let enforce_alignment = match access { + Some(AccessKind::Read) => self.reads_are_aligned.get(), + Some(AccessKind::Write) => self.writes_are_aligned.get(), + None => true, + }; + if !enforce_alignment { + return Ok(()); + } + // Check alignment + if alloc_align < align { + return err!(AlignmentCheckFailed { + has: alloc_align, + required: align, + }); + } + if offset % align == 0 { + Ok(()) + } else { + err!(AlignmentCheckFailed { + has: offset % align, + required: align, + }) + } + } + + pub fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> { + let alloc = self.get(ptr.alloc_id)?; + let allocation_size = alloc.bytes.len() as u64; + if ptr.offset > allocation_size { + return err!(PointerOutOfBounds { + ptr, + access, + allocation_size, + }); + } + Ok(()) + } +} + +/// Locking +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + pub(crate) fn check_locks( + &self, + ptr: MemoryPointer, + len: u64, + access: AccessKind, + ) -> EvalResult<'tcx> { + if len == 0 { + return Ok(()); + } + let alloc = self.get(ptr.alloc_id)?; + let frame = self.cur_frame; + alloc + .check_locks(Some(frame), ptr.offset, len, access) + .map_err(|lock| { + EvalErrorKind::MemoryLockViolation { + ptr, + len, + frame, + access, + lock: lock.active, + }.into() + }) + } + + /// Acquire the lock for the given lifetime + pub(crate) fn acquire_lock( + &mut self, + ptr: MemoryPointer, + len: u64, + region: Option, + kind: AccessKind, + ) -> EvalResult<'tcx> { + let frame = self.cur_frame; + assert!(len > 0); + trace!( + "Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}", + frame, + kind, + ptr, + len, + region + ); + self.check_bounds(ptr.offset(len, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) + let alloc = self.get_mut_unchecked(ptr.alloc_id)?; + + // Iterate over our range and acquire the lock. If the range is already split into pieces, + // we have to manipulate all of them. + let lifetime = DynamicLifetime { frame, region }; + for lock in alloc.locks.iter_mut(ptr.offset, len) { + if !lock.access_permitted(None, kind) { + return err!(MemoryAcquireConflict { + ptr, + len, + kind, + lock: lock.active.clone(), + }); + } + // See what we have to do + match (&mut lock.active, kind) { + (active @ &mut NoLock, AccessKind::Write) => { + *active = WriteLock(lifetime); + } + (active @ &mut NoLock, AccessKind::Read) => { + *active = ReadLock(vec![lifetime]); + } + (&mut ReadLock(ref mut lifetimes), AccessKind::Read) => { + lifetimes.push(lifetime); + } + _ => bug!("We already checked that there is no conflicting lock"), + } + } + Ok(()) + } + + /// Release or suspend a write lock of the given lifetime prematurely. + /// When releasing, if there is a read lock or someone else's write lock, that's an error. + /// If no lock is held, that's fine. This can happen when e.g. a local is initialized + /// from a constant, and then suspended. + /// When suspending, the same cases are fine; we just register an additional suspension. + pub(crate) fn suspend_write_lock( + &mut self, + ptr: MemoryPointer, + len: u64, + lock_path: &AbsLvalue<'tcx>, + suspend: Option, + ) -> EvalResult<'tcx> { + assert!(len > 0); + let cur_frame = self.cur_frame; + let alloc = self.get_mut_unchecked(ptr.alloc_id)?; + + 'locks: for lock in alloc.locks.iter_mut(ptr.offset, len) { + let is_our_lock = match lock.active { + WriteLock(lft) => + // Double-check that we are holding the lock. + // (Due to subtyping, checking the region would not make any sense.) + lft.frame == cur_frame, + ReadLock(_) | NoLock => false, + }; + if is_our_lock { + trace!("Releasing {:?}", lock.active); + // Disable the lock + lock.active = NoLock; + } else { + trace!( + "Not touching {:?} as it is not our lock", + lock.active, + ); + } + // Check if we want to register a suspension + if let Some(suspend_region) = suspend { + let lock_id = WriteLockId { + frame: cur_frame, + path: lock_path.clone(), + }; + trace!("Adding suspension to {:?}", lock_id); + let mut new_suspension = false; + lock.suspended + .entry(lock_id) + // Remember whether we added a new suspension or not + .or_insert_with(|| { new_suspension = true; Vec::new() }) + .push(suspend_region); + // If the suspension is new, we should have owned this. + // If there already was a suspension, we should NOT have owned this. + if new_suspension == is_our_lock { + // All is well + continue 'locks; + } + } else { + if !is_our_lock { + // All is well. + continue 'locks; + } + } + // If we get here, releasing this is an error except for NoLock. + if lock.active != NoLock { + return err!(InvalidMemoryLockRelease { + ptr, + len, + frame: cur_frame, + lock: lock.active.clone(), + }); + } + } + + Ok(()) + } + + /// Release a suspension from the write lock. If this is the last suspension or if there is no suspension, acquire the lock. + pub(crate) fn recover_write_lock( + &mut self, + ptr: MemoryPointer, + len: u64, + lock_path: &AbsLvalue<'tcx>, + lock_region: Option, + suspended_region: region::Scope, + ) -> EvalResult<'tcx> { + assert!(len > 0); + let cur_frame = self.cur_frame; + let lock_id = WriteLockId { + frame: cur_frame, + path: lock_path.clone(), + }; + let alloc = self.get_mut_unchecked(ptr.alloc_id)?; + + for lock in alloc.locks.iter_mut(ptr.offset, len) { + // Check if we have a suspension here + let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_id) { + None => { + trace!("No suspension around, we can just acquire"); + (true, false) + } + Some(suspensions) => { + trace!("Found suspension of {:?}, removing it", lock_id); + // That's us! Remove suspension (it should be in there). The same suspension can + // occur multiple times (when there are multiple shared borrows of this that have the same + // lifetime); only remove one of them. + let idx = match suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) { + None => // TODO: Can the user trigger this? + bug!("We have this lock suspended, but not for the given region."), + Some((idx, _)) => idx + }; + suspensions.remove(idx); + let got_lock = suspensions.is_empty(); + if got_lock { + trace!("All suspensions are gone, we can have the lock again"); + } + (got_lock, got_lock) + } + }; + if remove_suspension { + // with NLL, we could do that up in the match above... + assert!(got_the_lock); + lock.suspended.remove(&lock_id); + } + if got_the_lock { + match lock.active { + ref mut active @ NoLock => { + *active = WriteLock( + DynamicLifetime { + frame: cur_frame, + region: lock_region, + } + ); + } + _ => { + return err!(MemoryAcquireConflict { + ptr, + len, + kind: AccessKind::Write, + lock: lock.active.clone(), + }) + } + } + } + } + + Ok(()) + } + + pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option) { + let cur_frame = self.cur_frame; + trace!( + "Releasing frame {} locks that expire at {:?}", + cur_frame, + ending_region + ); + let has_ended = |lifetime: &DynamicLifetime| -> bool { + if lifetime.frame != cur_frame { + return false; + } + match ending_region { + None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks + // when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the + // end of a function. Same for a function still having recoveries. + Some(ending_region) => lifetime.region == Some(ending_region), + } + }; + + for alloc in self.alloc_map.values_mut() { + for lock in alloc.locks.iter_mut_all() { + // Delete everything that ends now -- i.e., keep only all the other lifetimes. + let lock_ended = match lock.active { + WriteLock(ref lft) => has_ended(lft), + ReadLock(ref mut lfts) => { + lfts.retain(|lft| !has_ended(lft)); + lfts.is_empty() + } + NoLock => false, + }; + if lock_ended { + lock.active = NoLock; + } + // Also clean up suspended write locks when the function returns + if ending_region.is_none() { + lock.suspended.retain(|id, _suspensions| id.frame != cur_frame); + } + } + // Clean up the map + alloc.locks.retain(|lock| match lock.active { + NoLock => lock.suspended.len() > 0, + _ => true, + }); + } + } +} + +/// Allocation accessors +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<'tcx, M::MemoryKinds>> { + match id.into_alloc_id_kind() { + AllocIdKind::Function(_) => err!(DerefFunctionPointer), + AllocIdKind::Runtime(id) => { + match self.alloc_map.get(&id) { + Some(alloc) => Ok(alloc), + None => err!(DanglingPointerDeref), + } + } + } + } + + fn get_mut_unchecked( + &mut self, + id: AllocId, + ) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> { + match id.into_alloc_id_kind() { + AllocIdKind::Function(_) => err!(DerefFunctionPointer), + AllocIdKind::Runtime(id) => { + match self.alloc_map.get_mut(&id) { + Some(alloc) => Ok(alloc), + None => err!(DanglingPointerDeref), + } + } + } + } + + fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> { + let alloc = self.get_mut_unchecked(id)?; + if alloc.mutable == Mutability::Mutable { + Ok(alloc) + } else { + err!(ModifiedConstantMemory) + } + } + + pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Instance<'tcx>> { + if ptr.offset != 0 { + return err!(InvalidFunctionPointer); + } + debug!("reading fn ptr: {}", ptr.alloc_id); + match ptr.alloc_id.into_alloc_id_kind() { + AllocIdKind::Function(id) => Ok(self.functions[id]), + AllocIdKind::Runtime(_) => err!(ExecuteMemory), + } + } + + /// For debugging, print an allocation and all allocations it points to, recursively. + pub fn dump_alloc(&self, id: AllocId) { + self.dump_allocs(vec![id]); + } + + /// For debugging, print a list of allocations and all allocations they point to, recursively. + pub fn dump_allocs(&self, mut allocs: Vec) { + use std::fmt::Write; + allocs.sort(); + allocs.dedup(); + let mut allocs_to_print = VecDeque::from(allocs); + let mut allocs_seen = HashSet::new(); + + while let Some(id) = allocs_to_print.pop_front() { + let mut msg = format!("Alloc {:<5} ", format!("{}:", id)); + let prefix_len = msg.len(); + let mut relocations = vec![]; + + let alloc = match id.into_alloc_id_kind() { + AllocIdKind::Function(id) => { + trace!("{} {}", msg, self.functions[id]); + continue; + } + AllocIdKind::Runtime(id) => { + match self.alloc_map.get(&id) { + Some(a) => a, + None => { + trace!("{} (deallocated)", msg); + continue; + } + } + } + }; + + for i in 0..(alloc.bytes.len() as u64) { + if let Some(&target_id) = alloc.relocations.get(&i) { + if allocs_seen.insert(target_id) { + allocs_to_print.push_back(target_id); + } + relocations.push((i, target_id)); + } + if alloc.undef_mask.is_range_defined(i, i + 1) { + // this `as usize` is fine, since `i` came from a `usize` + write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap(); + } else { + msg.push_str("__ "); + } + } + + let immutable = match (alloc.kind, alloc.mutable) { + (MemoryKind::UninitializedStatic, _) => { + " (static in the process of initialization)".to_owned() + } + (MemoryKind::Static, Mutability::Mutable) => " (static mut)".to_owned(), + (MemoryKind::Static, Mutability::Immutable) => " (immutable)".to_owned(), + (MemoryKind::Machine(m), _) => format!(" ({:?})", m), + (MemoryKind::Stack, _) => " (stack)".to_owned(), + }; + trace!( + "{}({} bytes, alignment {}){}", + msg, + alloc.bytes.len(), + alloc.align, + immutable + ); + + if !relocations.is_empty() { + msg.clear(); + write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces. + let mut pos = 0; + let relocation_width = (self.pointer_size() - 1) * 3; + for (i, target_id) in relocations { + // this `as usize` is fine, since we can't print more chars than `usize::MAX` + write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap(); + let target = format!("({})", target_id); + // this `as usize` is fine, since we can't print more chars than `usize::MAX` + write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap(); + pos = i + self.pointer_size(); + } + trace!("{}", msg); + } + } + } + + pub fn leak_report(&self) -> usize { + trace!("### LEAK REPORT ###"); + let leaks: Vec<_> = self.alloc_map + .iter() + .filter_map(|(&key, val)| if val.kind != MemoryKind::Static { + Some(AllocIdKind::Runtime(key).into_alloc_id()) + } else { + None + }) + .collect(); + let n = leaks.len(); + self.dump_allocs(leaks); + n + } +} + +/// Byte accessors +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + fn get_bytes_unchecked( + &self, + ptr: MemoryPointer, + size: u64, + align: u64, + ) -> EvalResult<'tcx, &[u8]> { + // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL + self.check_align(ptr.into(), align, Some(AccessKind::Read))?; + if size == 0 { + return Ok(&[]); + } + self.check_locks(ptr, size, AccessKind::Read)?; + self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) + let alloc = self.get(ptr.alloc_id)?; + assert_eq!(ptr.offset as usize as u64, ptr.offset); + assert_eq!(size as usize as u64, size); + let offset = ptr.offset as usize; + Ok(&alloc.bytes[offset..offset + size as usize]) + } + + fn get_bytes_unchecked_mut( + &mut self, + ptr: MemoryPointer, + size: u64, + align: u64, + ) -> EvalResult<'tcx, &mut [u8]> { + // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL + self.check_align(ptr.into(), align, Some(AccessKind::Write))?; + if size == 0 { + return Ok(&mut []); + } + self.check_locks(ptr, size, AccessKind::Write)?; + self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) + let alloc = self.get_mut(ptr.alloc_id)?; + assert_eq!(ptr.offset as usize as u64, ptr.offset); + assert_eq!(size as usize as u64, size); + let offset = ptr.offset as usize; + Ok(&mut alloc.bytes[offset..offset + size as usize]) + } + + fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> { + assert_ne!(size, 0); + if self.relocations(ptr, size)?.count() != 0 { + return err!(ReadPointerAsBytes); + } + self.check_defined(ptr, size)?; + self.get_bytes_unchecked(ptr, size, align) + } + + fn get_bytes_mut( + &mut self, + ptr: MemoryPointer, + size: u64, + align: u64, + ) -> EvalResult<'tcx, &mut [u8]> { + assert_ne!(size, 0); + self.clear_relocations(ptr, size)?; + self.mark_definedness(ptr.into(), size, true)?; + self.get_bytes_unchecked_mut(ptr, size, align) + } +} + +/// Reading and writing +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + /// mark an allocation pointed to by a static as static and initialized + fn mark_inner_allocation_initialized( + &mut self, + alloc: AllocId, + mutability: Mutability, + ) -> EvalResult<'tcx> { + // relocations into other statics are not "inner allocations" + if self.get(alloc).ok().map_or(false, |alloc| { + alloc.kind != MemoryKind::UninitializedStatic + }) + { + self.mark_static_initalized(alloc, mutability)?; + } + Ok(()) + } + + /// mark an allocation as static and initialized, either mutable or not + pub fn mark_static_initalized( + &mut self, + alloc_id: AllocId, + mutability: Mutability, + ) -> EvalResult<'tcx> { + trace!( + "mark_static_initalized {:?}, mutability: {:?}", + alloc_id, + mutability + ); + // do not use `self.get_mut(alloc_id)` here, because we might have already marked a + // sub-element or have circular pointers (e.g. `Rc`-cycles) + let alloc_id = match alloc_id.into_alloc_id_kind() { + AllocIdKind::Function(_) => return Ok(()), + AllocIdKind::Runtime(id) => id, + }; + let relocations = match self.alloc_map.get_mut(&alloc_id) { + Some(&mut Allocation { + ref mut relocations, + ref mut kind, + ref mut mutable, + .. + }) => { + match *kind { + // const eval results can refer to "locals". + // E.g. `const Foo: &u32 = &1;` refers to the temp local that stores the `1` + MemoryKind::Stack | + // The entire point of this function + MemoryKind::UninitializedStatic => {}, + MemoryKind::Machine(m) => M::mark_static_initialized(m)?, + MemoryKind::Static => { + trace!("mark_static_initalized: skipping already initialized static referred to by static currently being initialized"); + return Ok(()); + }, + } + *kind = MemoryKind::Static; + *mutable = mutability; + // take out the relocations vector to free the borrow on self, so we can call + // mark recursively + mem::replace(relocations, Default::default()) + } + None => return err!(DanglingPointerDeref), + }; + // recurse into inner allocations + for &alloc in relocations.values() { + self.mark_inner_allocation_initialized(alloc, mutability)?; + } + // put back the relocations + self.alloc_map + .get_mut(&alloc_id) + .expect("checked above") + .relocations = relocations; + Ok(()) + } + + pub fn copy( + &mut self, + src: Pointer, + dest: Pointer, + size: u64, + align: u64, + nonoverlapping: bool, + ) -> EvalResult<'tcx> { + // Empty accesses don't need to be valid pointers, but they should still be aligned + self.check_align(src, align, Some(AccessKind::Read))?; + self.check_align(dest, align, Some(AccessKind::Write))?; + if size == 0 { + return Ok(()); + } + let src = src.to_ptr()?; + let dest = dest.to_ptr()?; + self.check_relocation_edges(src, size)?; + + // first copy the relocations to a temporary buffer, because + // `get_bytes_mut` will clear the relocations, which is correct, + // since we don't want to keep any relocations at the target. + + let relocations: Vec<_> = self.relocations(src, size)? + .map(|(&offset, &alloc_id)| { + // Update relocation offsets for the new positions in the destination allocation. + (offset + dest.offset - src.offset, alloc_id) + }) + .collect(); + + let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr(); + let dest_bytes = self.get_bytes_mut(dest, size, align)?.as_mut_ptr(); + + // SAFE: The above indexing would have panicked if there weren't at least `size` bytes + // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and + // `dest` could possibly overlap. + unsafe { + assert_eq!(size as usize as u64, size); + if src.alloc_id == dest.alloc_id { + if nonoverlapping { + if (src.offset <= dest.offset && src.offset + size > dest.offset) || + (dest.offset <= src.offset && dest.offset + size > src.offset) + { + return err!(Intrinsic( + format!("copy_nonoverlapping called on overlapping ranges"), + )); + } + } + ptr::copy(src_bytes, dest_bytes, size as usize); + } else { + ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize); + } + } + + self.copy_undef_mask(src, dest, size)?; + // copy back the relocations + self.get_mut(dest.alloc_id)?.relocations.extend(relocations); + + Ok(()) + } + + pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> { + let alloc = self.get(ptr.alloc_id)?; + assert_eq!(ptr.offset as usize as u64, ptr.offset); + let offset = ptr.offset as usize; + match alloc.bytes[offset..].iter().position(|&c| c == 0) { + Some(size) => { + if self.relocations(ptr, (size + 1) as u64)?.count() != 0 { + return err!(ReadPointerAsBytes); + } + self.check_defined(ptr, (size + 1) as u64)?; + self.check_locks(ptr, (size + 1) as u64, AccessKind::Read)?; + Ok(&alloc.bytes[offset..offset + size]) + } + None => err!(UnterminatedCString(ptr)), + } + } + + pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> { + // Empty accesses don't need to be valid pointers, but they should still be non-NULL + self.check_align(ptr, 1, Some(AccessKind::Read))?; + if size == 0 { + return Ok(&[]); + } + self.get_bytes(ptr.to_ptr()?, size, 1) + } + + pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> { + // Empty accesses don't need to be valid pointers, but they should still be non-NULL + self.check_align(ptr, 1, Some(AccessKind::Write))?; + if src.is_empty() { + return Ok(()); + } + let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?; + bytes.clone_from_slice(src); + Ok(()) + } + + pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> { + // Empty accesses don't need to be valid pointers, but they should still be non-NULL + self.check_align(ptr, 1, Some(AccessKind::Write))?; + if count == 0 { + return Ok(()); + } + let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?; + for b in bytes { + *b = val; + } + Ok(()) + } + + pub fn read_primval(&self, ptr: MemoryPointer, size: u64, signed: bool) -> EvalResult<'tcx, PrimVal> { + self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer + let endianess = self.endianess(); + let bytes = self.get_bytes_unchecked(ptr, size, self.int_align(size))?; + // Undef check happens *after* we established that the alignment is correct. + // We must not return Ok() for unaligned pointers! + if self.check_defined(ptr, size).is_err() { + return Ok(PrimVal::Undef.into()); + } + // Now we do the actual reading + let bytes = if signed { + read_target_int(endianess, bytes).unwrap() as u128 + } else { + read_target_uint(endianess, bytes).unwrap() + }; + // See if we got a pointer + if size != self.pointer_size() { + if self.relocations(ptr, size)?.count() != 0 { + return err!(ReadPointerAsBytes); + } + } else { + let alloc = self.get(ptr.alloc_id)?; + match alloc.relocations.get(&ptr.offset) { + Some(&alloc_id) => return Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, bytes as u64))), + None => {}, + } + } + // We don't. Just return the bytes. + Ok(PrimVal::Bytes(bytes)) + } + + pub fn read_ptr_sized_unsigned(&self, ptr: MemoryPointer) -> EvalResult<'tcx, PrimVal> { + self.read_primval(ptr, self.pointer_size(), false) + } + + pub fn write_primval(&mut self, ptr: MemoryPointer, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> { + let endianess = self.endianess(); + + let bytes = match val { + PrimVal::Ptr(val) => { + assert_eq!(size, self.pointer_size()); + val.offset as u128 + } + + PrimVal::Bytes(bytes) => { + // We need to mask here, or the byteorder crate can die when given a u64 larger + // than fits in an integer of the requested size. + let mask = match size { + 1 => !0u8 as u128, + 2 => !0u16 as u128, + 4 => !0u32 as u128, + 8 => !0u64 as u128, + 16 => !0, + n => bug!("unexpected PrimVal::Bytes size: {}", n), + }; + bytes & mask + } + + PrimVal::Undef => { + self.mark_definedness(PrimVal::Ptr(ptr).into(), size, false)?; + return Ok(()); + } + }; + + { + let align = self.int_align(size); + let dst = self.get_bytes_mut(ptr, size, align)?; + if signed { + write_target_int(endianess, dst, bytes as i128).unwrap(); + } else { + write_target_uint(endianess, dst, bytes).unwrap(); + } + } + + // See if we have to also write a relocation + match val { + PrimVal::Ptr(val) => { + self.get_mut(ptr.alloc_id)?.relocations.insert( + ptr.offset, + val.alloc_id, + ); + } + _ => {} + } + + Ok(()) + } + + pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, val: PrimVal) -> EvalResult<'tcx> { + let ptr_size = self.pointer_size(); + self.write_primval(ptr, val, ptr_size, false) + } + + fn int_align(&self, size: u64) -> u64 { + // We assume pointer-sized integers have the same alignment as pointers. + // We also assume signed and unsigned integers of the same size have the same alignment. + match size { + 1 => self.layout.i8_align.abi(), + 2 => self.layout.i16_align.abi(), + 4 => self.layout.i32_align.abi(), + 8 => self.layout.i64_align.abi(), + 16 => self.layout.i128_align.abi(), + _ => bug!("bad integer size: {}", size), + } + } +} + +/// Relocations +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + fn relocations( + &self, + ptr: MemoryPointer, + size: u64, + ) -> EvalResult<'tcx, btree_map::Range> { + let start = ptr.offset.saturating_sub(self.pointer_size() - 1); + let end = ptr.offset + size; + Ok(self.get(ptr.alloc_id)?.relocations.range(start..end)) + } + + fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { + // Find all relocations overlapping the given range. + let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect(); + if keys.is_empty() { + return Ok(()); + } + + // Find the start and end of the given range and its outermost relocations. + let start = ptr.offset; + let end = start + size; + let first = *keys.first().unwrap(); + let last = *keys.last().unwrap() + self.pointer_size(); + + let alloc = self.get_mut(ptr.alloc_id)?; + + // Mark parts of the outermost relocations as undefined if they partially fall outside the + // given range. + if first < start { + alloc.undef_mask.set_range(first, start, false); + } + if last > end { + alloc.undef_mask.set_range(end, last, false); + } + + // Forget all the relocations. + for k in keys { + alloc.relocations.remove(&k); + } + + Ok(()) + } + + fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { + let overlapping_start = self.relocations(ptr, 0)?.count(); + let overlapping_end = self.relocations(ptr.offset(size, self.layout)?, 0)?.count(); + if overlapping_start + overlapping_end != 0 { + return err!(ReadPointerAsBytes); + } + Ok(()) + } +} + +/// Undefined bytes +impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { + // FIXME(solson): This is a very naive, slow version. + fn copy_undef_mask( + &mut self, + src: MemoryPointer, + dest: MemoryPointer, + size: u64, + ) -> EvalResult<'tcx> { + // The bits have to be saved locally before writing to dest in case src and dest overlap. + assert_eq!(size as usize as u64, size); + let mut v = Vec::with_capacity(size as usize); + for i in 0..size { + let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i); + v.push(defined); + } + for (i, defined) in v.into_iter().enumerate() { + self.get_mut(dest.alloc_id)?.undef_mask.set( + dest.offset + + i as u64, + defined, + ); + } + Ok(()) + } + + fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { + let alloc = self.get(ptr.alloc_id)?; + if !alloc.undef_mask.is_range_defined( + ptr.offset, + ptr.offset + size, + ) + { + return err!(ReadUndefBytes); + } + Ok(()) + } + + pub fn mark_definedness( + &mut self, + ptr: Pointer, + size: u64, + new_state: bool, + ) -> EvalResult<'tcx> { + if size == 0 { + return Ok(()); + } + let ptr = ptr.to_ptr()?; + let alloc = self.get_mut(ptr.alloc_id)?; + alloc.undef_mask.set_range( + ptr.offset, + ptr.offset + size, + new_state, + ); + Ok(()) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Methods to access integers in the target endianess +//////////////////////////////////////////////////////////////////////////////// + +fn write_target_uint( + endianess: layout::Endian, + mut target: &mut [u8], + data: u128, +) -> Result<(), io::Error> { + let len = target.len(); + match endianess { + layout::Endian::Little => target.write_uint128::(data, len), + layout::Endian::Big => target.write_uint128::(data, len), + } +} +fn write_target_int( + endianess: layout::Endian, + mut target: &mut [u8], + data: i128, +) -> Result<(), io::Error> { + let len = target.len(); + match endianess { + layout::Endian::Little => target.write_int128::(data, len), + layout::Endian::Big => target.write_int128::(data, len), + } +} + +fn read_target_uint(endianess: layout::Endian, mut source: &[u8]) -> Result { + match endianess { + layout::Endian::Little => source.read_uint128::(source.len()), + layout::Endian::Big => source.read_uint128::(source.len()), + } +} + +fn read_target_int(endianess: layout::Endian, mut source: &[u8]) -> Result { + match endianess { + layout::Endian::Little => source.read_int128::(source.len()), + layout::Endian::Big => source.read_int128::(source.len()), + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Undefined byte tracking +//////////////////////////////////////////////////////////////////////////////// + +type Block = u64; +const BLOCK_SIZE: u64 = 64; + +#[derive(Clone, Debug)] +pub struct UndefMask { + blocks: Vec, + len: u64, +} + +impl UndefMask { + fn new(size: u64) -> Self { + let mut m = UndefMask { + blocks: vec![], + len: 0, + }; + m.grow(size, false); + m + } + + /// Check whether the range `start..end` (end-exclusive) is entirely defined. + pub fn is_range_defined(&self, start: u64, end: u64) -> bool { + if end > self.len { + return false; + } + for i in start..end { + if !self.get(i) { + return false; + } + } + true + } + + fn set_range(&mut self, start: u64, end: u64, new_state: bool) { + let len = self.len; + if end > len { + self.grow(end - len, new_state); + } + self.set_range_inbounds(start, end, new_state); + } + + fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) { + for i in start..end { + self.set(i, new_state); + } + } + + fn get(&self, i: u64) -> bool { + let (block, bit) = bit_index(i); + (self.blocks[block] & 1 << bit) != 0 + } + + fn set(&mut self, i: u64, new_state: bool) { + let (block, bit) = bit_index(i); + if new_state { + self.blocks[block] |= 1 << bit; + } else { + self.blocks[block] &= !(1 << bit); + } + } + + fn grow(&mut self, amount: u64, new_state: bool) { + let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len; + if amount > unused_trailing_bits { + let additional_blocks = amount / BLOCK_SIZE + 1; + assert_eq!(additional_blocks as usize as u64, additional_blocks); + self.blocks.extend( + iter::repeat(0).take(additional_blocks as usize), + ); + } + let start = self.len; + self.len += amount; + self.set_range_inbounds(start, start + amount, new_state); + } +} + +fn bit_index(bits: u64) -> (usize, usize) { + let a = bits / BLOCK_SIZE; + let b = bits % BLOCK_SIZE; + assert_eq!(a as usize as u64, a); + assert_eq!(b as usize as u64, b); + (a as usize, b as usize) +} + +//////////////////////////////////////////////////////////////////////////////// +// Unaligned accesses +//////////////////////////////////////////////////////////////////////////////// + +pub trait HasMemory<'a, 'tcx, M: Machine<'tcx>> { + fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M>; + fn memory(&self) -> &Memory<'a, 'tcx, M>; + + // These are not supposed to be overriden. + fn read_maybe_aligned(&self, aligned: bool, f: F) -> EvalResult<'tcx, T> + where + F: FnOnce(&Self) -> EvalResult<'tcx, T>, + { + let old = self.memory().reads_are_aligned.get(); + // Do alignment checking if *all* nested calls say it has to be aligned. + self.memory().reads_are_aligned.set(old && aligned); + let t = f(self); + self.memory().reads_are_aligned.set(old); + t + } + + fn read_maybe_aligned_mut(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T> + where + F: FnOnce(&mut Self) -> EvalResult<'tcx, T>, + { + let old = self.memory().reads_are_aligned.get(); + // Do alignment checking if *all* nested calls say it has to be aligned. + self.memory().reads_are_aligned.set(old && aligned); + let t = f(self); + self.memory().reads_are_aligned.set(old); + t + } + + fn write_maybe_aligned_mut(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T> + where + F: FnOnce(&mut Self) -> EvalResult<'tcx, T>, + { + let old = self.memory().writes_are_aligned.get(); + // Do alignment checking if *all* nested calls say it has to be aligned. + self.memory().writes_are_aligned.set(old && aligned); + let t = f(self); + self.memory().writes_are_aligned.set(old); + t + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> HasMemory<'a, 'tcx, M> for Memory<'a, 'tcx, M> { + #[inline] + fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> { + self + } + + #[inline] + fn memory(&self) -> &Memory<'a, 'tcx, M> { + self + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> HasMemory<'a, 'tcx, M> for EvalContext<'a, 'tcx, M> { + #[inline] + fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> { + &mut self.memory + } + + #[inline] + fn memory(&self) -> &Memory<'a, 'tcx, M> { + &self.memory + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Pointer arithmetic +//////////////////////////////////////////////////////////////////////////////// + +pub trait PointerArithmetic: layout::HasDataLayout { + // These are not supposed to be overriden. + + //// Trunace the given value to the pointer size; also return whether there was an overflow + fn truncate_to_ptr(self, val: u128) -> (u64, bool) { + let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits(); + ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) + } + + // Overflow checking only works properly on the range from -u64 to +u64. + fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) { + // FIXME: is it possible to over/underflow here? + if i < 0 { + // trickery to ensure that i64::min_value() works fine + // this formula only works for true negative values, it panics for zero! + let n = u64::max_value() - (i as u64) + 1; + val.overflowing_sub(n) + } else { + self.overflowing_offset(val, i as u64) + } + } + + fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) { + let (res, over1) = val.overflowing_add(i); + let (res, over2) = self.truncate_to_ptr(res as u128); + (res, over1 || over2) + } + + fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_signed_offset(val, i as i128); + if over { err!(OverflowingMath) } else { Ok(res) } + } + + fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_offset(val, i); + if over { err!(OverflowingMath) } else { Ok(res) } + } + + fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 { + self.overflowing_signed_offset(val, i as i128).0 + } +} + +impl PointerArithmetic for T {} + +impl<'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout for &'a Memory<'a, 'tcx, M> { + #[inline] + fn data_layout(&self) -> &TargetDataLayout { + self.layout + } +} +impl<'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout for &'a EvalContext<'a, 'tcx, M> { + #[inline] + fn data_layout(&self) -> &TargetDataLayout { + self.memory().layout + } +} + +impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout + for &'c &'b mut EvalContext<'a, 'tcx, M> { + #[inline] + fn data_layout(&self) -> &TargetDataLayout { + self.memory().layout + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/mod.rs b/src/tools/miri/src/librustc_mir/interpret/mod.rs new file mode 100644 index 0000000000..08837c4fb6 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/mod.rs @@ -0,0 +1,42 @@ +//! An interpreter for MIR used in CTFE and by miri + +#[macro_export] +macro_rules! err { + ($($tt:tt)*) => { Err($crate::interpret::EvalErrorKind::$($tt)*.into()) }; +} + +mod cast; +mod const_eval; +mod error; +mod eval_context; +mod lvalue; +mod validation; +mod machine; +mod memory; +mod operator; +mod range_map; +mod step; +mod terminator; +mod traits; +mod value; + +pub use self::error::{EvalError, EvalResult, EvalErrorKind}; + +pub use self::eval_context::{EvalContext, Frame, ResourceLimits, StackPopCleanup, DynamicLifetime, + TyAndPacked, PtrAndAlign, ValTy}; + +pub use self::lvalue::{Lvalue, LvalueExtra, GlobalId}; + +pub use self::memory::{AllocId, Memory, MemoryPointer, MemoryKind, HasMemory, AccessKind, AllocIdKind}; + +use self::memory::{PointerArithmetic, Lock}; + +use self::range_map::RangeMap; + +pub use self::value::{PrimVal, PrimValKind, Value, Pointer}; + +pub use self::const_eval::{eval_body_as_integer, eval_body_as_primval}; + +pub use self::machine::Machine; + +pub use self::validation::{ValidationQuery, AbsLvalue}; diff --git a/src/tools/miri/src/librustc_mir/interpret/operator.rs b/src/tools/miri/src/librustc_mir/interpret/operator.rs new file mode 100644 index 0000000000..7fe4691fff --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/operator.rs @@ -0,0 +1,268 @@ +use rustc::mir; +use rustc::ty::Ty; +use rustc_const_math::ConstFloat; +use syntax::ast::FloatTy; +use std::cmp::Ordering; + +use super::{EvalResult, EvalContext, Lvalue, Machine, ValTy}; + +use super::value::{PrimVal, PrimValKind, Value, bytes_to_f32, bytes_to_f64, f32_to_bytes, + f64_to_bytes}; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + fn binop_with_overflow( + &mut self, + op: mir::BinOp, + left: ValTy<'tcx>, + right: ValTy<'tcx>, + ) -> EvalResult<'tcx, (PrimVal, bool)> { + let left_val = self.value_to_primval(left)?; + let right_val = self.value_to_primval(right)?; + self.binary_op(op, left_val, left.ty, right_val, right.ty) + } + + /// Applies the binary operation `op` to the two operands and writes a tuple of the result + /// and a boolean signifying the potential overflow to the destination. + pub fn intrinsic_with_overflow( + &mut self, + op: mir::BinOp, + left: ValTy<'tcx>, + right: ValTy<'tcx>, + dest: Lvalue, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + let (val, overflowed) = self.binop_with_overflow(op, left, right)?; + let val = Value::ByValPair(val, PrimVal::from_bool(overflowed)); + let valty = ValTy { + value: val, + ty: dest_ty, + }; + self.write_value(valty, dest) + } + + /// Applies the binary operation `op` to the arguments and writes the result to the + /// destination. Returns `true` if the operation overflowed. + pub fn intrinsic_overflowing( + &mut self, + op: mir::BinOp, + left: ValTy<'tcx>, + right: ValTy<'tcx>, + dest: Lvalue, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, bool> { + let (val, overflowed) = self.binop_with_overflow(op, left, right)?; + self.write_primval(dest, val, dest_ty)?; + Ok(overflowed) + } +} + +macro_rules! overflow { + ($op:ident, $l:expr, $r:expr) => ({ + let (val, overflowed) = $l.$op($r); + let primval = PrimVal::Bytes(val as u128); + Ok((primval, overflowed)) + }) +} + +macro_rules! int_arithmetic { + ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({ + let l = $l; + let r = $r; + use super::PrimValKind::*; + match $kind { + I8 => overflow!($int_op, l as i8, r as i8), + I16 => overflow!($int_op, l as i16, r as i16), + I32 => overflow!($int_op, l as i32, r as i32), + I64 => overflow!($int_op, l as i64, r as i64), + I128 => overflow!($int_op, l as i128, r as i128), + U8 => overflow!($int_op, l as u8, r as u8), + U16 => overflow!($int_op, l as u16, r as u16), + U32 => overflow!($int_op, l as u32, r as u32), + U64 => overflow!($int_op, l as u64, r as u64), + U128 => overflow!($int_op, l as u128, r as u128), + _ => bug!("int_arithmetic should only be called on int primvals"), + } + }) +} + +macro_rules! int_shift { + ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({ + let l = $l; + let r = $r; + let r_wrapped = r as u32; + match $kind { + I8 => overflow!($int_op, l as i8, r_wrapped), + I16 => overflow!($int_op, l as i16, r_wrapped), + I32 => overflow!($int_op, l as i32, r_wrapped), + I64 => overflow!($int_op, l as i64, r_wrapped), + I128 => overflow!($int_op, l as i128, r_wrapped), + U8 => overflow!($int_op, l as u8, r_wrapped), + U16 => overflow!($int_op, l as u16, r_wrapped), + U32 => overflow!($int_op, l as u32, r_wrapped), + U64 => overflow!($int_op, l as u64, r_wrapped), + U128 => overflow!($int_op, l as u128, r_wrapped), + _ => bug!("int_shift should only be called on int primvals"), + }.map(|(val, over)| (val, over || r != r_wrapped as u128)) + }) +} + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + /// Returns the result of the specified operation and whether it overflowed. + pub fn binary_op( + &self, + bin_op: mir::BinOp, + left: PrimVal, + left_ty: Ty<'tcx>, + right: PrimVal, + right_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, (PrimVal, bool)> { + use rustc::mir::BinOp::*; + use super::PrimValKind::*; + + let left_kind = self.ty_to_primval_kind(left_ty)?; + let right_kind = self.ty_to_primval_kind(right_ty)?; + //trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); + + // I: Handle operations that support pointers + if !left_kind.is_float() && !right_kind.is_float() { + if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? { + return Ok(handled); + } + } + + // II: From now on, everything must be bytes, no pointers + let l = left.to_bytes()?; + let r = right.to_bytes()?; + + // These ops can have an RHS with a different numeric type. + if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) { + return match bin_op { + Shl => int_shift!(left_kind, overflowing_shl, l, r), + Shr => int_shift!(left_kind, overflowing_shr, l, r), + _ => bug!("it has already been checked that this is a shift op"), + }; + } + + if left_kind != right_kind { + let msg = format!( + "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", + bin_op, + left, + left_kind, + right, + right_kind + ); + return err!(Unimplemented(msg)); + } + + let float_op = |op, l, r, ty| { + let l = ConstFloat { + bits: l, + ty, + }; + let r = ConstFloat { + bits: r, + ty, + }; + match op { + Eq => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Equal), + Ne => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Equal), + Lt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Less), + Le => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Greater), + Gt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Greater), + Ge => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Less), + Add => PrimVal::Bytes((l + r).unwrap().bits), + Sub => PrimVal::Bytes((l - r).unwrap().bits), + Mul => PrimVal::Bytes((l * r).unwrap().bits), + Div => PrimVal::Bytes((l / r).unwrap().bits), + Rem => PrimVal::Bytes((l % r).unwrap().bits), + _ => bug!("invalid float op: `{:?}`", op), + } + }; + + let val = match (bin_op, left_kind) { + (_, F32) => float_op(bin_op, l, r, FloatTy::F32), + (_, F64) => float_op(bin_op, l, r, FloatTy::F64), + + + (Eq, _) => PrimVal::from_bool(l == r), + (Ne, _) => PrimVal::from_bool(l != r), + + (Lt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) < (r as i128)), + (Lt, _) => PrimVal::from_bool(l < r), + (Le, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) <= (r as i128)), + (Le, _) => PrimVal::from_bool(l <= r), + (Gt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) > (r as i128)), + (Gt, _) => PrimVal::from_bool(l > r), + (Ge, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) >= (r as i128)), + (Ge, _) => PrimVal::from_bool(l >= r), + + (BitOr, _) => PrimVal::Bytes(l | r), + (BitAnd, _) => PrimVal::Bytes(l & r), + (BitXor, _) => PrimVal::Bytes(l ^ r), + + (Add, k) if k.is_int() => return int_arithmetic!(k, overflowing_add, l, r), + (Sub, k) if k.is_int() => return int_arithmetic!(k, overflowing_sub, l, r), + (Mul, k) if k.is_int() => return int_arithmetic!(k, overflowing_mul, l, r), + (Div, k) if k.is_int() => return int_arithmetic!(k, overflowing_div, l, r), + (Rem, k) if k.is_int() => return int_arithmetic!(k, overflowing_rem, l, r), + + _ => { + let msg = format!( + "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", + bin_op, + left, + left_kind, + right, + right_kind + ); + return err!(Unimplemented(msg)); + } + }; + + Ok((val, false)) + } +} + +pub fn unary_op<'tcx>( + un_op: mir::UnOp, + val: PrimVal, + val_kind: PrimValKind, +) -> EvalResult<'tcx, PrimVal> { + use rustc::mir::UnOp::*; + use super::PrimValKind::*; + + let bytes = val.to_bytes()?; + + let result_bytes = match (un_op, val_kind) { + (Not, Bool) => !val.to_bool()? as u128, + + (Not, U8) => !(bytes as u8) as u128, + (Not, U16) => !(bytes as u16) as u128, + (Not, U32) => !(bytes as u32) as u128, + (Not, U64) => !(bytes as u64) as u128, + (Not, U128) => !bytes, + + (Not, I8) => !(bytes as i8) as u128, + (Not, I16) => !(bytes as i16) as u128, + (Not, I32) => !(bytes as i32) as u128, + (Not, I64) => !(bytes as i64) as u128, + (Not, I128) => !(bytes as i128) as u128, + + (Neg, I8) => -(bytes as i8) as u128, + (Neg, I16) => -(bytes as i16) as u128, + (Neg, I32) => -(bytes as i32) as u128, + (Neg, I64) => -(bytes as i64) as u128, + (Neg, I128) => -(bytes as i128) as u128, + + (Neg, F32) => f32_to_bytes(-bytes_to_f32(bytes)), + (Neg, F64) => f64_to_bytes(-bytes_to_f64(bytes)), + + _ => { + let msg = format!("unimplemented unary op: {:?}, {:?}", un_op, val); + return err!(Unimplemented(msg)); + } + }; + + Ok(PrimVal::Bytes(result_bytes)) +} diff --git a/src/tools/miri/src/librustc_mir/interpret/range_map.rs b/src/tools/miri/src/librustc_mir/interpret/range_map.rs new file mode 100644 index 0000000000..5cdcbe3512 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/range_map.rs @@ -0,0 +1,250 @@ +//! Implements a map from integer indices to data. +//! Rather than storing data for every index, internally, this maps entire ranges to the data. +//! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as +//! necessary (e.g. when [0,5) is first associated with X, and then [1,2) is mutated). +//! Users must not depend on whether a range is coalesced or not, even though this is observable +//! via the iteration APIs. +use std::collections::BTreeMap; +use std::ops; + +#[derive(Clone, Debug)] +pub struct RangeMap { + map: BTreeMap, +} + +// The derived `Ord` impl sorts first by the first field, then, if the fields are the same, +// by the second field. +// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all +// `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking. +// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check. +// This kind of search breaks, if `end < start`, so don't do that! +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] +struct Range { + start: u64, + end: u64, // Invariant: end > start +} + +impl Range { + fn range(offset: u64, len: u64) -> ops::Range { + assert!(len > 0); + // We select all elements that are within + // the range given by the offset into the allocation and the length. + // This is sound if all ranges that intersect with the argument range, are in the + // resulting range of ranges. + let left = Range { + // lowest range to include `offset` + start: 0, + end: offset + 1, + }; + let right = Range { + // lowest (valid) range not to include `offset+len` + start: offset + len, + end: offset + len + 1, + }; + left..right + } + + /// Tests if all of [offset, offset+len) are contained in this range. + fn overlaps(&self, offset: u64, len: u64) -> bool { + assert!(len > 0); + offset < self.end && offset + len >= self.start + } +} + +impl RangeMap { + pub fn new() -> RangeMap { + RangeMap { map: BTreeMap::new() } + } + + fn iter_with_range<'a>( + &'a self, + offset: u64, + len: u64, + ) -> impl Iterator + 'a { + assert!(len > 0); + self.map.range(Range::range(offset, len)).filter_map( + move |(range, + data)| { + if range.overlaps(offset, len) { + Some((range, data)) + } else { + None + } + }, + ) + } + + pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator + 'a { + self.iter_with_range(offset, len).map(|(_, data)| data) + } + + fn split_entry_at(&mut self, offset: u64) + where + T: Clone, + { + let range = match self.iter_with_range(offset, 1).next() { + Some((&range, _)) => range, + None => return, + }; + assert!( + range.start <= offset && range.end > offset, + "We got a range that doesn't even contain what we asked for." + ); + // There is an entry overlapping this position, see if we have to split it + if range.start < offset { + let data = self.map.remove(&range).unwrap(); + let old = self.map.insert( + Range { + start: range.start, + end: offset, + }, + data.clone(), + ); + assert!(old.is_none()); + let old = self.map.insert( + Range { + start: offset, + end: range.end, + }, + data, + ); + assert!(old.is_none()); + } + } + + pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator + 'a { + self.map.values_mut() + } + + /// Provide mutable iteration over everything in the given range. As a side-effect, + /// this will split entries in the map that are only partially hit by the given range, + /// to make sure that when they are mutated, the effect is constrained to the given range. + pub fn iter_mut_with_gaps<'a>( + &'a mut self, + offset: u64, + len: u64, + ) -> impl Iterator + 'a + where + T: Clone, + { + assert!(len > 0); + // Preparation: Split first and last entry as needed. + self.split_entry_at(offset); + self.split_entry_at(offset + len); + // Now we can provide a mutable iterator + self.map.range_mut(Range::range(offset, len)).filter_map( + move |(&range, data)| { + if range.overlaps(offset, len) { + assert!( + offset <= range.start && offset + len >= range.end, + "The splitting went wrong" + ); + Some(data) + } else { + // Skip this one + None + } + }, + ) + } + + /// Provide a mutable iterator over everything in the given range, with the same side-effects as + /// iter_mut_with_gaps. Furthermore, if there are gaps between ranges, fill them with the given default. + /// This is also how you insert. + pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator + 'a + where + T: Clone + Default, + { + // Do a first iteration to collect the gaps + let mut gaps = Vec::new(); + let mut last_end = offset; + for (range, _) in self.iter_with_range(offset, len) { + if last_end < range.start { + gaps.push(Range { + start: last_end, + end: range.start, + }); + } + last_end = range.end; + } + if last_end < offset + len { + gaps.push(Range { + start: last_end, + end: offset + len, + }); + } + + // Add default for all gaps + for gap in gaps { + let old = self.map.insert(gap, Default::default()); + assert!(old.is_none()); + } + + // Now provide mutable iteration + self.iter_mut_with_gaps(offset, len) + } + + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&T) -> bool, + { + let mut remove = Vec::new(); + for (range, data) in self.map.iter() { + if !f(data) { + remove.push(*range); + } + } + + for range in remove { + self.map.remove(&range); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Query the map at every offset in the range and collect the results. + fn to_vec(map: &RangeMap, offset: u64, len: u64) -> Vec { + (offset..offset + len) + .into_iter() + .map(|i| *map.iter(i, 1).next().unwrap()) + .collect() + } + + #[test] + fn basic_insert() { + let mut map = RangeMap::::new(); + // Insert + for x in map.iter_mut(10, 1) { + *x = 42; + } + // Check + assert_eq!(to_vec(&map, 10, 1), vec![42]); + } + + #[test] + fn gaps() { + let mut map = RangeMap::::new(); + for x in map.iter_mut(11, 1) { + *x = 42; + } + for x in map.iter_mut(15, 1) { + *x = 42; + } + + // Now request a range that needs three gaps filled + for x in map.iter_mut(10, 10) { + if *x != 42 { + *x = 23; + } + } + + assert_eq!( + to_vec(&map, 10, 10), + vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23] + ); + assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 42, 23, 23]); + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/step.rs b/src/tools/miri/src/librustc_mir/interpret/step.rs new file mode 100644 index 0000000000..1f53870752 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/step.rs @@ -0,0 +1,397 @@ +//! This module contains the `EvalContext` methods for executing a single step of the interpreter. +//! +//! The main entry point is the `step` method. + +use rustc::hir::def_id::DefId; +use rustc::hir; +use rustc::mir::visit::{Visitor, LvalueContext}; +use rustc::mir; +use rustc::traits::Reveal; +use rustc::ty; +use rustc::ty::layout::Layout; +use rustc::ty::subst::Substs; +use rustc::middle::const_val::ConstVal; + +use super::{EvalResult, EvalContext, StackPopCleanup, PtrAndAlign, GlobalId, Lvalue, + MemoryKind, Machine, PrimVal}; + +use syntax::codemap::Span; +use syntax::ast::Mutability; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub fn inc_step_counter_and_check_limit(&mut self, n: u64) -> EvalResult<'tcx> { + self.steps_remaining = self.steps_remaining.saturating_sub(n); + if self.steps_remaining > 0 { + Ok(()) + } else { + err!(ExecutionTimeLimitReached) + } + } + + /// Returns true as long as there are more things to do. + pub fn step(&mut self) -> EvalResult<'tcx, bool> { + self.inc_step_counter_and_check_limit(1)?; + if self.stack.is_empty() { + return Ok(false); + } + + let block = self.frame().block; + let stmt_id = self.frame().stmt; + let mir = self.mir(); + let basic_block = &mir.basic_blocks()[block]; + + if let Some(stmt) = basic_block.statements.get(stmt_id) { + let mut new = Ok(0); + ConstantExtractor { + span: stmt.source_info.span, + instance: self.frame().instance, + ecx: self, + mir, + new_constants: &mut new, + }.visit_statement( + block, + stmt, + mir::Location { + block, + statement_index: stmt_id, + }, + ); + // if ConstantExtractor added new frames, we don't execute anything here + // but await the next call to step + if new? == 0 { + self.statement(stmt)?; + } + return Ok(true); + } + + let terminator = basic_block.terminator(); + let mut new = Ok(0); + ConstantExtractor { + span: terminator.source_info.span, + instance: self.frame().instance, + ecx: self, + mir, + new_constants: &mut new, + }.visit_terminator( + block, + terminator, + mir::Location { + block, + statement_index: stmt_id, + }, + ); + // if ConstantExtractor added new frames, we don't execute anything here + // but await the next call to step + if new? == 0 { + self.terminator(terminator)?; + } + Ok(true) + } + + fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> { + trace!("{:?}", stmt); + + use rustc::mir::StatementKind::*; + match stmt.kind { + Assign(ref lvalue, ref rvalue) => self.eval_rvalue_into_lvalue(rvalue, lvalue)?, + + SetDiscriminant { + ref lvalue, + variant_index, + } => { + let dest = self.eval_lvalue(lvalue)?; + let dest_ty = self.lvalue_ty(lvalue); + let dest_layout = self.type_layout(dest_ty)?; + + match *dest_layout { + Layout::General { discr, .. } => { + let discr_size = discr.size().bytes(); + let dest_ptr = self.force_allocation(dest)?.to_ptr()?; + self.memory.write_primval( + dest_ptr, + PrimVal::Bytes(variant_index as u128), + discr_size, + false + )? + } + + Layout::RawNullablePointer { nndiscr, .. } => { + if variant_index as u64 != nndiscr { + self.write_null(dest, dest_ty)?; + } + } + + Layout::StructWrappedNullablePointer { + nndiscr, + ref discrfield_source, + .. + } => { + if variant_index as u64 != nndiscr { + self.write_struct_wrapped_null_pointer( + dest_ty, + nndiscr, + discrfield_source, + dest, + )?; + } + } + + _ => { + bug!( + "SetDiscriminant on {} represented as {:#?}", + dest_ty, + dest_layout + ) + } + } + } + + // Mark locals as alive + StorageLive(local) => { + let old_val = self.frame_mut().storage_live(local)?; + self.deallocate_local(old_val)?; + } + + // Mark locals as dead + StorageDead(local) => { + let old_val = self.frame_mut().storage_dead(local)?; + self.deallocate_local(old_val)?; + } + + // Validity checks. + Validate(op, ref lvalues) => { + for operand in lvalues { + self.validation_op(op, operand)?; + } + } + EndRegion(ce) => { + self.end_region(Some(ce))?; + } + + // Defined to do nothing. These are added by optimization passes, to avoid changing the + // size of MIR constantly. + Nop => {} + + InlineAsm { .. } => return err!(InlineAsm), + } + + self.frame_mut().stmt += 1; + Ok(()) + } + + fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> { + trace!("{:?}", terminator.kind); + self.eval_terminator(terminator)?; + if !self.stack.is_empty() { + trace!("// {:?}", self.frame().block); + } + Ok(()) + } + + /// returns `true` if a stackframe was pushed + fn global_item( + &mut self, + def_id: DefId, + substs: &'tcx Substs<'tcx>, + span: Span, + mutability: Mutability, + ) -> EvalResult<'tcx, bool> { + let instance = self.resolve_associated_const(def_id, substs); + let cid = GlobalId { + instance, + promoted: None, + }; + if self.globals.contains_key(&cid) { + return Ok(false); + } + if self.tcx.has_attr(def_id, "linkage") { + M::global_item_with_linkage(self, cid.instance, mutability)?; + return Ok(false); + } + let mir = self.load_mir(instance.def)?; + let size = self.type_size_with_substs(mir.return_ty, substs)?.expect( + "unsized global", + ); + let align = self.type_align_with_substs(mir.return_ty, substs)?; + let ptr = self.memory.allocate( + size, + align, + MemoryKind::UninitializedStatic, + )?; + let aligned = !self.is_packed(mir.return_ty)?; + self.globals.insert( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned, + }, + ); + let internally_mutable = !mir.return_ty.is_freeze( + self.tcx, + ty::ParamEnv::empty(Reveal::All), + span, + ); + let mutability = if mutability == Mutability::Mutable || internally_mutable { + Mutability::Mutable + } else { + Mutability::Immutable + }; + let cleanup = StackPopCleanup::MarkStatic(mutability); + let name = ty::tls::with(|tcx| tcx.item_path_str(def_id)); + trace!("pushing stack frame for global: {}", name); + self.push_stack_frame( + instance, + span, + mir, + Lvalue::from_ptr(ptr), + cleanup, + )?; + Ok(true) + } +} + +// WARNING: This code pushes new stack frames. Make sure that any methods implemented on this +// type don't ever access ecx.stack[ecx.cur_frame()], as that will change. This includes, e.g., +// using the current stack frame's substitution. +// Basically don't call anything other than `load_mir`, `alloc_ptr`, `push_stack_frame`. +struct ConstantExtractor<'a, 'b: 'a, 'tcx: 'b, M: Machine<'tcx> + 'a> { + span: Span, + ecx: &'a mut EvalContext<'b, 'tcx, M>, + mir: &'tcx mir::Mir<'tcx>, + instance: ty::Instance<'tcx>, + new_constants: &'a mut EvalResult<'tcx, u64>, +} + +impl<'a, 'b, 'tcx, M: Machine<'tcx>> ConstantExtractor<'a, 'b, 'tcx, M> { + fn try EvalResult<'tcx, bool>>(&mut self, f: F) { + // previous constant errored + let n = match *self.new_constants { + Ok(n) => n, + Err(_) => return, + }; + match f(self) { + // everything ok + a new stackframe + Ok(true) => *self.new_constants = Ok(n + 1), + // constant correctly evaluated, but no new stackframe + Ok(false) => {} + // constant eval errored + Err(err) => *self.new_constants = Err(err), + } + } +} + +impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx, M> { + fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Location) { + self.super_constant(constant, location); + match constant.literal { + // already computed by rustc + mir::Literal::Value { value: &ty::Const { val: ConstVal::Unevaluated(def_id, substs), .. } } => { + self.try(|this| { + this.ecx.global_item( + def_id, + substs, + constant.span, + Mutability::Immutable, + ) + }); + } + mir::Literal::Value { .. } => {} + mir::Literal::Promoted { index } => { + let cid = GlobalId { + instance: self.instance, + promoted: Some(index), + }; + if self.ecx.globals.contains_key(&cid) { + return; + } + let mir = &self.mir.promoted[index]; + self.try(|this| { + let size = this.ecx + .type_size_with_substs(mir.return_ty, this.instance.substs)? + .expect("unsized global"); + let align = this.ecx.type_align_with_substs( + mir.return_ty, + this.instance.substs, + )?; + let ptr = this.ecx.memory.allocate( + size, + align, + MemoryKind::UninitializedStatic, + )?; + let aligned = !this.ecx.is_packed(mir.return_ty)?; + this.ecx.globals.insert( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned, + }, + ); + trace!("pushing stack frame for {:?}", index); + this.ecx.push_stack_frame( + this.instance, + constant.span, + mir, + Lvalue::from_ptr(ptr), + StackPopCleanup::MarkStatic(Mutability::Immutable), + )?; + Ok(true) + }); + } + } + } + + fn visit_lvalue( + &mut self, + lvalue: &mir::Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: mir::Location, + ) { + self.super_lvalue(lvalue, context, location); + if let mir::Lvalue::Static(ref static_) = *lvalue { + let def_id = static_.def_id; + let substs = self.ecx.tcx.intern_substs(&[]); + let span = self.span; + if let Some(node_item) = self.ecx.tcx.hir.get_if_local(def_id) { + if let hir::map::Node::NodeItem(&hir::Item { ref node, .. }) = node_item { + if let hir::ItemStatic(_, m, _) = *node { + self.try(|this| { + this.ecx.global_item( + def_id, + substs, + span, + if m == hir::MutMutable { + Mutability::Mutable + } else { + Mutability::Immutable + }, + ) + }); + return; + } else { + bug!("static def id doesn't point to static"); + } + } else { + bug!("static def id doesn't point to item"); + } + } else { + let def = self.ecx.tcx.describe_def(def_id).expect("static not found"); + if let hir::def::Def::Static(_, mutable) = def { + self.try(|this| { + this.ecx.global_item( + def_id, + substs, + span, + if mutable { + Mutability::Mutable + } else { + Mutability::Immutable + }, + ) + }); + } else { + bug!("static found but isn't a static: {:?}", def); + } + } + } + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/terminator/drop.rs b/src/tools/miri/src/librustc_mir/interpret/terminator/drop.rs new file mode 100644 index 0000000000..6596cf951f --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/terminator/drop.rs @@ -0,0 +1,83 @@ +use rustc::mir::BasicBlock; +use rustc::ty::{self, Ty}; +use syntax::codemap::Span; + +use interpret::{EvalResult, EvalContext, Lvalue, LvalueExtra, PrimVal, Value, + Machine, ValTy}; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub(crate) fn drop_lvalue( + &mut self, + lval: Lvalue, + instance: ty::Instance<'tcx>, + ty: Ty<'tcx>, + span: Span, + target: BasicBlock, + ) -> EvalResult<'tcx> { + trace!("drop_lvalue: {:#?}", lval); + // We take the address of the object. This may well be unaligned, which is fine for us here. + // However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared + // by rustc. + let val = match self.force_allocation(lval)? { + Lvalue::Ptr { + ptr, + extra: LvalueExtra::Vtable(vtable), + } => ptr.ptr.to_value_with_vtable(vtable), + Lvalue::Ptr { + ptr, + extra: LvalueExtra::Length(len), + } => ptr.ptr.to_value_with_len(len), + Lvalue::Ptr { + ptr, + extra: LvalueExtra::None, + } => ptr.ptr.to_value(), + _ => bug!("force_allocation broken"), + }; + self.drop(val, instance, ty, span, target) + } + + fn drop( + &mut self, + arg: Value, + instance: ty::Instance<'tcx>, + ty: Ty<'tcx>, + span: Span, + target: BasicBlock, + ) -> EvalResult<'tcx> { + trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def); + + let instance = match ty.sty { + ty::TyDynamic(..) => { + let vtable = match arg { + Value::ByValPair(_, PrimVal::Ptr(vtable)) => vtable, + _ => bug!("expected fat ptr, got {:?}", arg), + }; + match self.read_drop_type_from_vtable(vtable)? { + Some(func) => func, + // no drop fn -> bail out + None => { + self.goto_block(target); + return Ok(()) + }, + } + } + _ => instance, + }; + + // the drop function expects a reference to the value + let valty = ValTy { + value: arg, + ty: self.tcx.mk_mut_ptr(ty), + }; + + let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone(); + + self.eval_fn_call( + instance, + Some((Lvalue::undef(), target)), + &vec![valty], + span, + fn_sig, + ) + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/terminator/mod.rs b/src/tools/miri/src/librustc_mir/interpret/terminator/mod.rs new file mode 100644 index 0000000000..bee0fe23f7 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/terminator/mod.rs @@ -0,0 +1,414 @@ +use rustc::mir; +use rustc::ty::{self, TypeVariants}; +use rustc::ty::layout::Layout; +use syntax::codemap::Span; +use syntax::abi::Abi; + +use super::{EvalResult, EvalContext, eval_context, + PtrAndAlign, Lvalue, PrimVal, Value, Machine, ValTy}; + +use rustc_data_structures::indexed_vec::Idx; + +mod drop; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub fn goto_block(&mut self, target: mir::BasicBlock) { + self.frame_mut().block = target; + self.frame_mut().stmt = 0; + } + + pub(super) fn eval_terminator( + &mut self, + terminator: &mir::Terminator<'tcx>, + ) -> EvalResult<'tcx> { + use rustc::mir::TerminatorKind::*; + match terminator.kind { + Return => { + self.dump_local(self.frame().return_lvalue); + self.pop_stack_frame()? + } + + Goto { target } => self.goto_block(target), + + SwitchInt { + ref discr, + ref values, + ref targets, + .. + } => { + // FIXME(CTFE): forbid branching + let discr_val = self.eval_operand(discr)?; + let discr_prim = self.value_to_primval(discr_val)?; + + // Branch to the `otherwise` case by default, if no match is found. + let mut target_block = targets[targets.len() - 1]; + + for (index, const_int) in values.iter().enumerate() { + let prim = PrimVal::Bytes(const_int.to_u128_unchecked()); + if discr_prim.to_bytes()? == prim.to_bytes()? { + target_block = targets[index]; + break; + } + } + + self.goto_block(target_block); + } + + Call { + ref func, + ref args, + ref destination, + .. + } => { + let destination = match *destination { + Some((ref lv, target)) => Some((self.eval_lvalue(lv)?, target)), + None => None, + }; + + let func_ty = self.operand_ty(func); + let (fn_def, sig) = match func_ty.sty { + ty::TyFnPtr(sig) => { + let fn_ptr = self.eval_operand_to_primval(func)?.to_ptr()?; + let instance = self.memory.get_fn(fn_ptr)?; + let instance_ty = instance.def.def_ty(self.tcx); + let instance_ty = self.monomorphize(instance_ty, instance.substs); + match instance_ty.sty { + ty::TyFnDef(..) => { + let real_sig = instance_ty.fn_sig(self.tcx); + let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig); + let real_sig = self.tcx.erase_late_bound_regions_and_normalize(&real_sig); + if !self.check_sig_compat(sig, real_sig)? { + return err!(FunctionPointerTyMismatch(real_sig, sig)); + } + } + ref other => bug!("instance def ty: {:?}", other), + } + (instance, sig) + } + ty::TyFnDef(def_id, substs) => ( + eval_context::resolve(self.tcx, def_id, substs), + func_ty.fn_sig(self.tcx), + ), + _ => { + let msg = format!("can't handle callee of type {:?}", func_ty); + return err!(Unimplemented(msg)); + } + }; + let args = self.operands_to_args(args)?; + let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig); + self.eval_fn_call( + fn_def, + destination, + &args, + terminator.source_info.span, + sig, + )?; + } + + Drop { + ref location, + target, + .. + } => { + // FIXME(CTFE): forbid drop in const eval + let lval = self.eval_lvalue(location)?; + let ty = self.lvalue_ty(location); + let ty = eval_context::apply_param_substs(self.tcx, self.substs(), &ty); + trace!("TerminatorKind::drop: {:?}, type {}", location, ty); + + let instance = eval_context::resolve_drop_in_place(self.tcx, ty); + self.drop_lvalue( + lval, + instance, + ty, + terminator.source_info.span, + target, + )?; + } + + Assert { + ref cond, + expected, + ref msg, + target, + .. + } => { + let cond_val = self.eval_operand_to_primval(cond)?.to_bool()?; + if expected == cond_val { + self.goto_block(target); + } else { + use rustc::mir::AssertMessage::*; + return match *msg { + BoundsCheck { ref len, ref index } => { + let span = terminator.source_info.span; + let len = self.eval_operand_to_primval(len) + .expect("can't eval len") + .to_u64()?; + let index = self.eval_operand_to_primval(index) + .expect("can't eval index") + .to_u64()?; + err!(ArrayIndexOutOfBounds(span, len, index)) + } + Math(ref err) => { + err!(Math(terminator.source_info.span, err.clone())) + } + GeneratorResumedAfterReturn | + GeneratorResumedAfterPanic => unimplemented!(), + }; + } + } + + Yield { .. } => unimplemented!("{:#?}", terminator.kind), + GeneratorDrop => unimplemented!(), + DropAndReplace { .. } => unimplemented!(), + Resume => unimplemented!(), + Unreachable => return err!(Unreachable), + } + + Ok(()) + } + + /// Decides whether it is okay to call the method with signature `real_sig` using signature `sig`. + /// FIXME: This should take into account the platform-dependent ABI description. + fn check_sig_compat( + &mut self, + sig: ty::FnSig<'tcx>, + real_sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool> { + fn check_ty_compat<'tcx>(ty: ty::Ty<'tcx>, real_ty: ty::Ty<'tcx>) -> bool { + if ty == real_ty { + return true; + } // This is actually a fast pointer comparison + return match (&ty.sty, &real_ty.sty) { + // Permit changing the pointer type of raw pointers and references as well as + // mutability of raw pointers. + // TODO: Should not be allowed when fat pointers are involved. + (&TypeVariants::TyRawPtr(_), &TypeVariants::TyRawPtr(_)) => true, + (&TypeVariants::TyRef(_, _), &TypeVariants::TyRef(_, _)) => { + ty.is_mutable_pointer() == real_ty.is_mutable_pointer() + } + // rule out everything else + _ => false, + }; + } + + if sig.abi == real_sig.abi && sig.variadic == real_sig.variadic && + sig.inputs_and_output.len() == real_sig.inputs_and_output.len() && + sig.inputs_and_output + .iter() + .zip(real_sig.inputs_and_output) + .all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) + { + // Definitely good. + return Ok(true); + } + + if sig.variadic || real_sig.variadic { + // We're not touching this + return Ok(false); + } + + // We need to allow what comes up when a non-capturing closure is cast to a fn(). + match (sig.abi, real_sig.abi) { + (Abi::Rust, Abi::RustCall) // check the ABIs. This makes the test here non-symmetric. + if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => { + // First argument of real_sig must be a ZST + let fst_ty = real_sig.inputs_and_output[0]; + let layout = self.type_layout(fst_ty)?; + let size = layout.size(&self.tcx.data_layout).bytes(); + if size == 0 { + // Second argument must be a tuple matching the argument list of sig + let snd_ty = real_sig.inputs_and_output[1]; + match snd_ty.sty { + TypeVariants::TyTuple(tys, _) if sig.inputs().len() == tys.len() => + if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) { + return Ok(true) + }, + _ => {} + } + } + } + _ => {} + }; + + // Nope, this doesn't work. + return Ok(false); + } + + fn eval_fn_call( + &mut self, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue, mir::BasicBlock)>, + args: &[ValTy<'tcx>], + span: Span, + sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx> { + trace!("eval_fn_call: {:#?}", instance); + match instance.def { + ty::InstanceDef::Intrinsic(..) => { + let (ret, target) = match destination { + Some(dest) => dest, + _ => return err!(Unreachable), + }; + let ty = sig.output(); + if !eval_context::is_inhabited(self.tcx, ty) { + return err!(Unreachable); + } + let layout = self.type_layout(ty)?; + M::call_intrinsic(self, instance, args, ret, ty, layout, target)?; + self.dump_local(ret); + Ok(()) + } + // FIXME: figure out why we can't just go through the shim + ty::InstanceDef::ClosureOnceShim { .. } => { + if M::eval_fn_call(self, instance, destination, args, span, sig)? { + return Ok(()); + } + let mut arg_locals = self.frame().mir.args_iter(); + match sig.abi { + // closure as closure once + Abi::RustCall => { + for (arg_local, &valty) in arg_locals.zip(args) { + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + self.write_value(valty, dest)?; + } + } + // non capture closure as fn ptr + // need to inject zst ptr for closure object (aka do nothing) + // and need to pack arguments + Abi::Rust => { + trace!( + "arg_locals: {:?}", + self.frame().mir.args_iter().collect::>() + ); + trace!("args: {:?}", args); + let local = arg_locals.nth(1).unwrap(); + for (i, &valty) in args.into_iter().enumerate() { + let dest = self.eval_lvalue(&mir::Lvalue::Local(local).field( + mir::Field::new(i), + valty.ty, + ))?; + self.write_value(valty, dest)?; + } + } + _ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi), + } + Ok(()) + } + ty::InstanceDef::FnPtrShim(..) | + ty::InstanceDef::DropGlue(..) | + ty::InstanceDef::CloneShim(..) | + ty::InstanceDef::Item(_) => { + // Push the stack frame, and potentially be entirely done if the call got hooked + if M::eval_fn_call(self, instance, destination, args, span, sig)? { + return Ok(()); + } + + // Pass the arguments + let mut arg_locals = self.frame().mir.args_iter(); + trace!("ABI: {:?}", sig.abi); + trace!( + "arg_locals: {:?}", + self.frame().mir.args_iter().collect::>() + ); + trace!("args: {:?}", args); + match sig.abi { + Abi::RustCall => { + assert_eq!(args.len(), 2); + + { + // write first argument + let first_local = arg_locals.next().unwrap(); + let dest = self.eval_lvalue(&mir::Lvalue::Local(first_local))?; + self.write_value(args[0], dest)?; + } + + // unpack and write all other args + let layout = self.type_layout(args[1].ty)?; + if let (&ty::TyTuple(fields, _), + &Layout::Univariant { ref variant, .. }) = (&args[1].ty.sty, layout) + { + trace!("fields: {:?}", fields); + if self.frame().mir.args_iter().count() == fields.len() + 1 { + let offsets = variant.offsets.iter().map(|s| s.bytes()); + match args[1].value { + Value::ByRef(PtrAndAlign { ptr, aligned }) => { + assert!( + aligned, + "Unaligned ByRef-values cannot occur as function arguments" + ); + for ((offset, ty), arg_local) in + offsets.zip(fields).zip(arg_locals) + { + let arg = Value::by_ref(ptr.offset(offset, &self)?); + let dest = + self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + trace!( + "writing arg {:?} to {:?} (type: {})", + arg, + dest, + ty + ); + let valty = ValTy { + value: arg, + ty, + }; + self.write_value(valty, dest)?; + } + } + Value::ByVal(PrimVal::Undef) => {} + other => { + assert_eq!(fields.len(), 1); + let dest = self.eval_lvalue(&mir::Lvalue::Local( + arg_locals.next().unwrap(), + ))?; + let valty = ValTy { + value: other, + ty: fields[0], + }; + self.write_value(valty, dest)?; + } + } + } else { + trace!("manual impl of rust-call ABI"); + // called a manual impl of a rust-call function + let dest = self.eval_lvalue( + &mir::Lvalue::Local(arg_locals.next().unwrap()), + )?; + self.write_value(args[1], dest)?; + } + } else { + bug!( + "rust-call ABI tuple argument was {:#?}, {:#?}", + args[1].ty, + layout + ); + } + } + _ => { + for (arg_local, &valty) in arg_locals.zip(args) { + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + self.write_value(valty, dest)?; + } + } + } + Ok(()) + } + // cannot use the shim here, because that will only result in infinite recursion + ty::InstanceDef::Virtual(_, idx) => { + let ptr_size = self.memory.pointer_size(); + let (ptr, vtable) = args[0].into_ptr_vtable_pair(&self.memory)?; + let fn_ptr = self.memory.read_ptr_sized_unsigned( + vtable.offset(ptr_size * (idx as u64 + 3), &self)? + )?.to_ptr()?; + let instance = self.memory.get_fn(fn_ptr)?; + let mut args = args.to_vec(); + let ty = self.get_field_ty(args[0].ty, 0)?.ty; // TODO: packed flag is ignored + args[0].ty = ty; + args[0].value = ptr.to_value(); + // recurse with concrete function + self.eval_fn_call(instance, destination, &args, span, sig) + } + } + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/traits.rs b/src/tools/miri/src/librustc_mir/interpret/traits.rs new file mode 100644 index 0000000000..3f7e10a9ea --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/traits.rs @@ -0,0 +1,137 @@ +use rustc::traits::{self, Reveal}; +use rustc::hir::def_id::DefId; +use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty}; +use syntax::codemap::DUMMY_SP; +use syntax::ast::{self, Mutability}; + +use super::{EvalResult, EvalContext, eval_context, MemoryPointer, MemoryKind, Value, PrimVal, + Machine}; + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + pub(crate) fn fulfill_obligation( + &self, + trait_ref: ty::PolyTraitRef<'tcx>, + ) -> traits::Vtable<'tcx, ()> { + // Do the initial selection for the obligation. This yields the shallow result we are + // looking for -- that is, what specific impl. + self.tcx.infer_ctxt().enter(|infcx| { + let mut selcx = traits::SelectionContext::new(&infcx); + + let obligation = traits::Obligation::new( + traits::ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID), + ty::ParamEnv::empty(Reveal::All), + trait_ref.to_poly_trait_predicate(), + ); + let selection = selcx.select(&obligation).unwrap().unwrap(); + + // Currently, we use a fulfillment context to completely resolve all nested obligations. + // This is because they can inform the inference of the impl's type parameters. + let mut fulfill_cx = traits::FulfillmentContext::new(); + let vtable = selection.map(|predicate| { + fulfill_cx.register_predicate_obligation(&infcx, predicate); + }); + infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &vtable) + }) + } + + /// Creates a dynamic vtable for the given type and vtable origin. This is used only for + /// objects. + /// + /// The `trait_ref` encodes the erased self type. Hence if we are + /// making an object `Foo` from a value of type `Foo`, then + /// `trait_ref` would map `T:Trait`. + pub fn get_vtable( + &mut self, + ty: Ty<'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>, + ) -> EvalResult<'tcx, MemoryPointer> { + debug!("get_vtable(trait_ref={:?})", trait_ref); + + let size = self.type_size(trait_ref.self_ty())?.expect( + "can't create a vtable for an unsized type", + ); + let align = self.type_align(trait_ref.self_ty())?; + + let ptr_size = self.memory.pointer_size(); + let methods = ::rustc::traits::get_vtable_methods(self.tcx, trait_ref); + let vtable = self.memory.allocate( + ptr_size * (3 + methods.count() as u64), + ptr_size, + MemoryKind::UninitializedStatic, + )?; + + let drop = eval_context::resolve_drop_in_place(self.tcx, ty); + let drop = self.memory.create_fn_alloc(drop); + self.memory.write_ptr_sized_unsigned(vtable, PrimVal::Ptr(drop))?; + + let size_ptr = vtable.offset(ptr_size, &self)?; + self.memory.write_ptr_sized_unsigned(size_ptr, PrimVal::Bytes(size as u128))?; + let align_ptr = vtable.offset(ptr_size * 2, &self)?; + self.memory.write_ptr_sized_unsigned(align_ptr, PrimVal::Bytes(align as u128))?; + + for (i, method) in ::rustc::traits::get_vtable_methods(self.tcx, trait_ref).enumerate() { + if let Some((def_id, substs)) = method { + let instance = eval_context::resolve(self.tcx, def_id, substs); + let fn_ptr = self.memory.create_fn_alloc(instance); + let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; + self.memory.write_ptr_sized_unsigned(method_ptr, PrimVal::Ptr(fn_ptr))?; + } + } + + self.memory.mark_static_initalized( + vtable.alloc_id, + Mutability::Mutable, + )?; + + Ok(vtable) + } + + pub fn read_drop_type_from_vtable( + &self, + vtable: MemoryPointer, + ) -> EvalResult<'tcx, Option>> { + // we don't care about the pointee type, we just want a pointer + match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? { + // some values don't need to call a drop impl, so the value is null + Value::ByVal(PrimVal::Bytes(0)) => Ok(None), + Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), + _ => err!(ReadBytesAsPointer), + } + } + + pub fn read_size_and_align_from_vtable( + &self, + vtable: MemoryPointer, + ) -> EvalResult<'tcx, (u64, u64)> { + let pointer_size = self.memory.pointer_size(); + let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?)?.to_bytes()? as u64; + let align = self.memory.read_ptr_sized_unsigned( + vtable.offset(pointer_size * 2, self)? + )?.to_bytes()? as u64; + Ok((size, align)) + } + + pub(crate) fn resolve_associated_const( + &self, + def_id: DefId, + substs: &'tcx Substs<'tcx>, + ) -> ty::Instance<'tcx> { + if let Some(trait_id) = self.tcx.trait_of_item(def_id) { + let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, substs)); + let vtable = self.fulfill_obligation(trait_ref); + if let traits::VtableImpl(vtable_impl) = vtable { + let name = self.tcx.item_name(def_id); + let assoc_const_opt = self.tcx.associated_items(vtable_impl.impl_def_id).find( + |item| { + item.kind == ty::AssociatedKind::Const && item.name == name + }, + ); + if let Some(assoc_const) = assoc_const_opt { + return ty::Instance::new(assoc_const.def_id, vtable_impl.substs); + } + } + } + ty::Instance::new(def_id, substs) + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/validation.rs b/src/tools/miri/src/librustc_mir/interpret/validation.rs new file mode 100644 index 0000000000..9be9341ee2 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/validation.rs @@ -0,0 +1,727 @@ +use rustc::hir::{self, Mutability}; +use rustc::hir::Mutability::*; +use rustc::mir::{self, ValidationOp, ValidationOperand}; +use rustc::ty::{self, Ty, TypeFoldable, TyCtxt}; +use rustc::ty::subst::{Substs, Subst}; +use rustc::traits; +use rustc::infer::InferCtxt; +use rustc::traits::Reveal; +use rustc::middle::region; +use rustc_data_structures::indexed_vec::Idx; + +use super::{EvalError, EvalResult, EvalErrorKind, EvalContext, DynamicLifetime, AccessKind, Value, + Lvalue, LvalueExtra, Machine, ValTy}; + +pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, (AbsLvalue<'tcx>, Lvalue)>; + +#[derive(Copy, Clone, Debug, PartialEq)] +enum ValidationMode { + Acquire, + /// Recover because the given region ended + Recover(region::Scope), + ReleaseUntil(Option), +} + +impl ValidationMode { + fn acquiring(self) -> bool { + use self::ValidationMode::*; + match self { + Acquire | Recover(_) => true, + ReleaseUntil(_) => false, + } + } +} + +// Abstract lvalues +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum AbsLvalue<'tcx> { + Local(mir::Local), + Static(hir::def_id::DefId), + Projection(Box>), +} + +type AbsLvalueProjection<'tcx> = mir::Projection<'tcx, AbsLvalue<'tcx>, u64, ()>; +type AbsLvalueElem<'tcx> = mir::ProjectionElem<'tcx, u64, ()>; + +impl<'tcx> AbsLvalue<'tcx> { + pub fn field(self, f: mir::Field) -> AbsLvalue<'tcx> { + self.elem(mir::ProjectionElem::Field(f, ())) + } + + pub fn deref(self) -> AbsLvalue<'tcx> { + self.elem(mir::ProjectionElem::Deref) + } + + pub fn downcast(self, adt_def: &'tcx ty::AdtDef, variant_index: usize) -> AbsLvalue<'tcx> { + self.elem(mir::ProjectionElem::Downcast(adt_def, variant_index)) + } + + pub fn index(self, index: u64) -> AbsLvalue<'tcx> { + self.elem(mir::ProjectionElem::Index(index)) + } + + fn elem(self, elem: AbsLvalueElem<'tcx>) -> AbsLvalue<'tcx> { + AbsLvalue::Projection(Box::new(AbsLvalueProjection { + base: self, + elem, + })) + } +} + +impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { + fn abstract_lvalue_projection(&self, proj: &mir::LvalueProjection<'tcx>) -> EvalResult<'tcx, AbsLvalueProjection<'tcx>> { + use self::mir::ProjectionElem::*; + + let elem = match proj.elem { + Deref => Deref, + Field(f, _) => Field(f, ()), + Index(v) => { + let value = self.frame().get_local(v)?; + let ty = self.tcx.types.usize; + let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?; + Index(n) + }, + ConstantIndex { offset, min_length, from_end } => + ConstantIndex { offset, min_length, from_end }, + Subslice { from, to } => + Subslice { from, to }, + Downcast(adt, sz) => Downcast(adt, sz), + }; + Ok(AbsLvalueProjection { + base: self.abstract_lvalue(&proj.base)?, + elem + }) + } + + fn abstract_lvalue(&self, lval: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, AbsLvalue<'tcx>> { + Ok(match lval { + &mir::Lvalue::Local(l) => AbsLvalue::Local(l), + &mir::Lvalue::Static(ref s) => AbsLvalue::Static(s.def_id), + &mir::Lvalue::Projection(ref p) => + AbsLvalue::Projection(Box::new(self.abstract_lvalue_projection(&*p)?)), + }) + } + + // Validity checks + pub(crate) fn validation_op( + &mut self, + op: ValidationOp, + operand: &ValidationOperand<'tcx, mir::Lvalue<'tcx>>, + ) -> EvalResult<'tcx> { + // If mir-emit-validate is set to 0 (i.e., disabled), we may still see validation commands + // because other crates may have been compiled with mir-emit-validate > 0. Ignore those + // commands. This makes mir-emit-validate also a flag to control whether miri will do + // validation or not. + if self.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 { + return Ok(()); + } + debug_assert!(self.memory.cur_frame == self.cur_frame()); + + // HACK: Determine if this method is whitelisted and hence we do not perform any validation. + // We currently insta-UB on anything passing around uninitialized memory, so we have to whitelist + // the places that are allowed to do that. + // The second group is stuff libstd does that is forbidden even under relaxed validation. + { + // The regexp we use for filtering + use regex::Regex; + lazy_static! { + static ref RE: Regex = Regex::new("^(\ + (std|alloc::heap::__core)::mem::(uninitialized|forget)::|\ + <(std|alloc)::heap::Heap as (std::heap|alloc::allocator)::Alloc>::|\ + <(std|alloc::heap::__core)::mem::ManuallyDrop><.*>::new$|\ + <(std|alloc::heap::__core)::mem::ManuallyDrop as std::ops::DerefMut><.*>::deref_mut$|\ + (std|alloc::heap::__core)::ptr::read::|\ + \ + ><.*>::inner$|\ + ><.*>::drop_slow$|\ + (std::heap|alloc::allocator)::Layout::for_value::|\ + (std|alloc::heap::__core)::mem::(size|align)_of_val::\ + )").unwrap(); + } + // Now test + let name = self.stack[self.cur_frame()].instance.to_string(); + if RE.is_match(&name) { + return Ok(()); + } + } + + // We need to monomorphize ty *without* erasing lifetimes + let ty = operand.ty.subst(self.tcx, self.substs()); + let lval = self.eval_lvalue(&operand.lval)?; + let abs_lval = self.abstract_lvalue(&operand.lval)?; + let query = ValidationQuery { + lval: (abs_lval, lval), + ty, + re: operand.re, + mutbl: operand.mutbl, + }; + + // Check the mode, and also perform mode-specific operations + let mode = match op { + ValidationOp::Acquire => ValidationMode::Acquire, + ValidationOp::Release => ValidationMode::ReleaseUntil(None), + ValidationOp::Suspend(scope) => { + if query.mutbl == MutMutable { + let lft = DynamicLifetime { + frame: self.cur_frame(), + region: Some(scope), // Notably, we only ever suspend things for given regions. + // Suspending for the entire function does not make any sense. + }; + trace!("Suspending {:?} until {:?}", query, scope); + self.suspended.entry(lft).or_insert_with(Vec::new).push( + query.clone(), + ); + } + ValidationMode::ReleaseUntil(Some(scope)) + } + }; + self.validate(query, mode) + } + + /// Release locks and executes suspensions of the given region (or the entire fn, in case of None). + pub(crate) fn end_region(&mut self, scope: Option) -> EvalResult<'tcx> { + debug_assert!(self.memory.cur_frame == self.cur_frame()); + self.memory.locks_lifetime_ended(scope); + match scope { + Some(scope) => { + // Recover suspended lvals + let lft = DynamicLifetime { + frame: self.cur_frame(), + region: Some(scope), + }; + if let Some(queries) = self.suspended.remove(&lft) { + for query in queries { + trace!("Recovering {:?} from suspension", query); + self.validate(query, ValidationMode::Recover(scope))?; + } + } + } + None => { + // Clean suspension table of current frame + let cur_frame = self.cur_frame(); + self.suspended.retain(|lft, _| { + lft.frame != cur_frame // keep only what is in the other (lower) frames + }); + } + } + Ok(()) + } + + fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx> { + return normalize_associated_type(self.tcx, &ty); + + use syntax::codemap::{Span, DUMMY_SP}; + + // We copy a bunch of stuff from rustc/infer/mod.rs to be able to tweak its behavior + fn normalize_projections_in<'a, 'gcx, 'tcx, T>( + self_: &InferCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + value: &T, + ) -> T::Lifted + where + T: TypeFoldable<'tcx> + ty::Lift<'gcx>, + { + let mut selcx = traits::SelectionContext::new(self_); + let cause = traits::ObligationCause::dummy(); + let traits::Normalized { + value: result, + obligations, + } = traits::normalize(&mut selcx, param_env, cause, value); + + let mut fulfill_cx = traits::FulfillmentContext::new(); + + for obligation in obligations { + fulfill_cx.register_predicate_obligation(self_, obligation); + } + + drain_fulfillment_cx_or_panic(self_, DUMMY_SP, &mut fulfill_cx, &result) + } + + fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>( + self_: &InferCtxt<'a, 'gcx, 'tcx>, + span: Span, + fulfill_cx: &mut traits::FulfillmentContext<'tcx>, + result: &T, + ) -> T::Lifted + where + T: TypeFoldable<'tcx> + ty::Lift<'gcx>, + { + // In principle, we only need to do this so long as `result` + // contains unbound type parameters. It could be a slight + // optimization to stop iterating early. + match fulfill_cx.select_all_or_error(self_) { + Ok(()) => { } + Err(errors) => { + span_bug!( + span, + "Encountered errors `{:?}` resolving bounds after type-checking", + errors + ); + } + } + + let result = self_.resolve_type_vars_if_possible(result); + let result = self_.tcx.fold_regions( + &result, + &mut false, + |r, _| match *r { + ty::ReVar(_) => self_.tcx.types.re_erased, + _ => r, + }, + ); + + match self_.tcx.lift_to_global(&result) { + Some(result) => result, + None => { + span_bug!(span, "Uninferred types/regions in `{:?}`", result); + } + } + } + + trait MyTransNormalize<'gcx>: TypeFoldable<'gcx> { + fn my_trans_normalize<'a, 'tcx>( + &self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + ) -> Self; + } + + macro_rules! items { ($($item:item)+) => ($($item)+) } + macro_rules! impl_trans_normalize { + ($lt_gcx:tt, $($ty:ty),+) => { + items!($(impl<$lt_gcx> MyTransNormalize<$lt_gcx> for $ty { + fn my_trans_normalize<'a, 'tcx>(&self, + infcx: &InferCtxt<'a, $lt_gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>) + -> Self { + normalize_projections_in(infcx, param_env, self) + } + })+); + } + } + + impl_trans_normalize!('gcx, + Ty<'gcx>, + &'gcx Substs<'gcx>, + ty::FnSig<'gcx>, + ty::PolyFnSig<'gcx>, + ty::ClosureSubsts<'gcx>, + ty::PolyTraitRef<'gcx>, + ty::ExistentialTraitRef<'gcx> + ); + + fn normalize_associated_type<'a, 'tcx, T>(self_: TyCtxt<'a, 'tcx, 'tcx>, value: &T) -> T + where + T: MyTransNormalize<'tcx>, + { + let param_env = ty::ParamEnv::empty(Reveal::All); + + if !value.has_projections() { + return value.clone(); + } + + self_.infer_ctxt().enter(|infcx| { + value.my_trans_normalize(&infcx, param_env) + }) + } + } + + fn validate_variant( + &mut self, + query: ValidationQuery<'tcx>, + variant: &ty::VariantDef, + subst: &ty::subst::Substs<'tcx>, + mode: ValidationMode, + ) -> EvalResult<'tcx> { + // TODO: Maybe take visibility/privacy into account. + for (idx, field_def) in variant.fields.iter().enumerate() { + let field_ty = field_def.ty(self.tcx, subst); + let field = mir::Field::new(idx); + let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?; + self.validate( + ValidationQuery { + lval: (query.lval.0.clone().field(field), field_lvalue), + ty: field_ty, + ..query + }, + mode, + )?; + } + Ok(()) + } + + fn validate_ptr( + &mut self, + val: Value, + abs_lval: AbsLvalue<'tcx>, + pointee_ty: Ty<'tcx>, + re: Option, + mutbl: Mutability, + mode: ValidationMode, + ) -> EvalResult<'tcx> { + // Check alignment and non-NULLness + let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?; + let ptr = val.into_ptr(&self.memory)?; + self.memory.check_align(ptr, align, None)?; + + // Recurse + let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?; + self.validate( + ValidationQuery { + lval: (abs_lval.deref(), pointee_lvalue), + ty: pointee_ty, + re, + mutbl, + }, + mode, + ) + } + + /// Validate the lvalue at the given type. If `acquire` is false, just do a release of all write locks + fn validate( + &mut self, + mut query: ValidationQuery<'tcx>, + mode: ValidationMode, + ) -> EvalResult<'tcx> { + use rustc::ty::TypeVariants::*; + use rustc::ty::RegionKind::*; + use rustc::ty::AdtKind; + + // No point releasing shared stuff. + if !mode.acquiring() && query.mutbl == MutImmutable { + return Ok(()); + } + // When we recover, we may see data whose validity *just* ended. Do not acquire it. + if let ValidationMode::Recover(ending_ce) = mode { + if query.re == Some(ending_ce) { + return Ok(()); + } + } + + query.ty = self.normalize_type_unerased(&query.ty); + trace!("{:?} on {:?}", mode, query); + + // Decide whether this type *owns* the memory it covers (like integers), or whether it + // just assembles pieces (that each own their memory) together to a larger whole. + // TODO: Currently, we don't acquire locks for padding and discriminants. We should. + let is_owning = match query.ty.sty { + TyInt(_) | TyUint(_) | TyRawPtr(_) | TyBool | TyFloat(_) | TyChar | TyStr | + TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true, + TyAdt(adt, _) if adt.is_box() => true, + TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) | + TyDynamic(..) | TyGenerator(..) => false, + TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => { + bug!("I got an incomplete/unnormalized type for validation") + } + }; + if is_owning { + // We need to lock. So we need memory. So we have to force_acquire. + // Tracking the same state for locals not backed by memory would just duplicate too + // much machinery. + // FIXME: We ignore alignment. + let (ptr, extra) = self.force_allocation(query.lval.1)?.to_ptr_extra_aligned(); + // Determine the size + // FIXME: Can we reuse size_and_align_of_dst for Lvalues? + let len = match self.type_size(query.ty)? { + Some(size) => { + assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type"); + size + } + None => { + // The only unsized typ we concider "owning" is TyStr. + assert_eq!( + query.ty.sty, + TyStr, + "Found a surprising unsized owning type" + ); + // The extra must be the length, in bytes. + match extra { + LvalueExtra::Length(len) => len, + _ => bug!("TyStr must have a length as extra"), + } + } + }; + // Handle locking + if len > 0 { + let ptr = ptr.to_ptr()?; + match query.mutbl { + MutImmutable => { + if mode.acquiring() { + self.memory.acquire_lock( + ptr, + len, + query.re, + AccessKind::Read, + )?; + } + } + // No releasing of read locks, ever. + MutMutable => { + match mode { + ValidationMode::Acquire => { + self.memory.acquire_lock( + ptr, + len, + query.re, + AccessKind::Write, + )? + } + ValidationMode::Recover(ending_ce) => { + self.memory.recover_write_lock( + ptr, + len, + &query.lval.0, + query.re, + ending_ce, + )? + } + ValidationMode::ReleaseUntil(suspended_ce) => { + self.memory.suspend_write_lock( + ptr, + len, + &query.lval.0, + suspended_ce, + )? + } + } + } + } + } + } + + let res = do catch { + match query.ty.sty { + TyInt(_) | TyUint(_) | TyRawPtr(_) => { + if mode.acquiring() { + // Make sure we can read this. + let val = self.read_lvalue(query.lval.1)?; + self.follow_by_ref_value(val, query.ty)?; + // FIXME: It would be great to rule out Undef here, but that doesn't actually work. + // Passing around undef data is a thing that e.g. Vec::extend_with does. + } + Ok(()) + } + TyBool | TyFloat(_) | TyChar => { + if mode.acquiring() { + let val = self.read_lvalue(query.lval.1)?; + let val = self.value_to_primval(ValTy { value: val, ty: query.ty })?; + val.to_bytes()?; + // TODO: Check if these are valid bool/float/codepoint/UTF-8 + } + Ok(()) + } + TyNever => err!(ValidationFailure(format!("The empty type is never valid."))), + TyRef(region, + ty::TypeAndMut { + ty: pointee_ty, + mutbl, + }) => { + let val = self.read_lvalue(query.lval.1)?; + // Sharing restricts our context + if mutbl == MutImmutable { + query.mutbl = MutImmutable; + } + // Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet, + // we record the region of this borrow to the context. + if query.re == None { + match *region { + ReScope(scope) => query.re = Some(scope), + // It is possible for us to encounter erased lifetimes here because the lifetimes in + // this functions' Subst will be erased. + _ => {} + } + } + self.validate_ptr(val, query.lval.0, pointee_ty, query.re, query.mutbl, mode) + } + TyAdt(adt, _) if adt.is_box() => { + let val = self.read_lvalue(query.lval.1)?; + self.validate_ptr(val, query.lval.0, query.ty.boxed_ty(), query.re, query.mutbl, mode) + } + TyFnPtr(_sig) => { + let ptr = self.read_lvalue(query.lval.1)? + .into_ptr(&self.memory)? + .to_ptr()?; + self.memory.get_fn(ptr)?; + // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?). + Ok(()) + } + TyFnDef(..) => { + // This is a zero-sized type with all relevant data sitting in the type. + // There is nothing to validate. + Ok(()) + } + + // Compound types + TyStr => { + // TODO: Validate strings + Ok(()) + } + TySlice(elem_ty) => { + let len = match query.lval.1 { + Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len, + _ => { + bug!( + "acquire_valid of a TySlice given non-slice lvalue: {:?}", + query.lval + ) + } + }; + for i in 0..len { + let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i)?; + self.validate( + ValidationQuery { + lval: (query.lval.0.clone().index(i), inner_lvalue), + ty: elem_ty, + ..query + }, + mode, + )?; + } + Ok(()) + } + TyArray(elem_ty, len) => { + let len = len.val.to_const_int().unwrap().to_u64().unwrap(); + for i in 0..len { + let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i as u64)?; + self.validate( + ValidationQuery { + lval: (query.lval.0.clone().index(i as u64), inner_lvalue), + ty: elem_ty, + ..query + }, + mode, + )?; + } + Ok(()) + } + TyDynamic(_data, _region) => { + // Check that this is a valid vtable + let vtable = match query.lval.1 { + Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable, + _ => { + bug!( + "acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}", + query.lval + ) + } + }; + self.read_size_and_align_from_vtable(vtable)?; + // TODO: Check that the vtable contains all the function pointers we expect it to have. + // Trait objects cannot have any operations performed + // on them directly. We cannot, in general, even acquire any locks as the trait object *could* + // contain an UnsafeCell. If we call functions to get access to data, we will validate + // their return values. So, it doesn't seem like there's anything else to do. + Ok(()) + } + TyAdt(adt, subst) => { + if Some(adt.did) == self.tcx.lang_items().unsafe_cell_type() && + query.mutbl == MutImmutable + { + // No locks for shared unsafe cells. Also no other validation, the only field is private anyway. + return Ok(()); + } + + match adt.adt_kind() { + AdtKind::Enum => { + // TODO: Can we get the discriminant without forcing an allocation? + let ptr = self.force_allocation(query.lval.1)?.to_ptr()?; + let discr = self.read_discriminant_value(ptr, query.ty)?; + + // Get variant index for discriminant + let variant_idx = adt.discriminants(self.tcx).position(|variant_discr| { + variant_discr.to_u128_unchecked() == discr + }); + let variant_idx = match variant_idx { + Some(val) => val, + None => return err!(InvalidDiscriminant), + }; + let variant = &adt.variants[variant_idx]; + + if variant.fields.len() > 0 { + // Downcast to this variant, if needed + let lval = if adt.variants.len() > 1 { + ( + query.lval.0.downcast(adt, variant_idx), + self.eval_lvalue_projection( + query.lval.1, + query.ty, + &mir::ProjectionElem::Downcast(adt, variant_idx), + )?, + ) + } else { + query.lval + }; + + // Recursively validate the fields + self.validate_variant( + ValidationQuery { lval, ..query }, + variant, + subst, + mode, + ) + } else { + // No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum. + Ok(()) + } + } + AdtKind::Struct => { + self.validate_variant(query, adt.struct_variant(), subst, mode) + } + AdtKind::Union => { + // No guarantees are provided for union types. + // TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?) + Ok(()) + } + } + } + TyTuple(ref types, _) => { + for (idx, field_ty) in types.iter().enumerate() { + let field = mir::Field::new(idx); + let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?; + self.validate( + ValidationQuery { + lval: (query.lval.0.clone().field(field), field_lvalue), + ty: field_ty, + ..query + }, + mode, + )?; + } + Ok(()) + } + TyClosure(def_id, ref closure_substs) => { + for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() { + let field = mir::Field::new(idx); + let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?; + self.validate( + ValidationQuery { + lval: (query.lval.0.clone().field(field), field_lvalue), + ty: field_ty, + ..query + }, + mode, + )?; + } + // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?). + // Is there other things we can/should check? Like vtable pointers? + Ok(()) + } + // FIXME: generators aren't validated right now + TyGenerator(..) => Ok(()), + _ => bug!("We already established that this is a type we support. ({})", query.ty), + } + }; + match res { + // ReleaseUntil(None) of an uninitalized variable is a NOP. This is needed because + // we have to release the return value of a function; due to destination-passing-style + // the callee may directly write there. + // TODO: Ideally we would know whether the destination is already initialized, and only + // release if it is. But of course that can't even always be statically determined. + Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. }) + if mode == ValidationMode::ReleaseUntil(None) => { + return Ok(()); + } + res => res, + } + } +} diff --git a/src/tools/miri/src/librustc_mir/interpret/value.rs b/src/tools/miri/src/librustc_mir/interpret/value.rs new file mode 100644 index 0000000000..e052ec1e39 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/interpret/value.rs @@ -0,0 +1,405 @@ +#![allow(unknown_lints)] + +use rustc::ty::layout::HasDataLayout; + +use super::{EvalResult, Memory, MemoryPointer, HasMemory, PointerArithmetic, Machine, PtrAndAlign}; + +pub(super) fn bytes_to_f32(bytes: u128) -> f32 { + f32::from_bits(bytes as u32) +} + +pub(super) fn bytes_to_f64(bytes: u128) -> f64 { + f64::from_bits(bytes as u64) +} + +pub(super) fn f32_to_bytes(f: f32) -> u128 { + f.to_bits() as u128 +} + +pub(super) fn f64_to_bytes(f: f64) -> u128 { + f.to_bits() as u128 +} + +/// A `Value` represents a single self-contained Rust value. +/// +/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve +/// value held directly, outside of any allocation (`ByVal`). For `ByRef`-values, we remember +/// whether the pointer is supposed to be aligned or not (also see Lvalue). +/// +/// For optimization of a few very common cases, there is also a representation for a pair of +/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary +/// operations and fat pointers. This idea was taken from rustc's trans. +#[derive(Clone, Copy, Debug)] +pub enum Value { + ByRef(PtrAndAlign), + ByVal(PrimVal), + ByValPair(PrimVal, PrimVal), +} + +/// A wrapper type around `PrimVal` that cannot be turned back into a `PrimVal` accidentally. +/// This type clears up a few APIs where having a `PrimVal` argument for something that is +/// potentially an integer pointer or a pointer to an allocation was unclear. +/// +/// I (@oli-obk) believe it is less easy to mix up generic primvals and primvals that are just +/// the representation of pointers. Also all the sites that convert between primvals and pointers +/// are explicit now (and rare!) +#[derive(Clone, Copy, Debug)] +pub struct Pointer { + primval: PrimVal, +} + +impl<'tcx> Pointer { + pub fn null() -> Self { + PrimVal::Bytes(0).into() + } + pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + self.primval.to_ptr() + } + pub fn into_inner_primval(self) -> PrimVal { + self.primval + } + + pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + let layout = cx.data_layout(); + match self.primval { + PrimVal::Bytes(b) => { + assert_eq!(b as u64 as u128, b); + Ok(Pointer::from( + PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128), + )) + } + PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn offset(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { + let layout = cx.data_layout(); + match self.primval { + PrimVal::Bytes(b) => { + assert_eq!(b as u64 as u128, b); + Ok(Pointer::from( + PrimVal::Bytes(layout.offset(b as u64, i)? as u128), + )) + } + PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn wrapping_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + let layout = cx.data_layout(); + match self.primval { + PrimVal::Bytes(b) => { + assert_eq!(b as u64 as u128, b); + Ok(Pointer::from(PrimVal::Bytes( + layout.wrapping_signed_offset(b as u64, i) as u128, + ))) + } + PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn is_null(self) -> EvalResult<'tcx, bool> { + match self.primval { + PrimVal::Bytes(b) => Ok(b == 0), + PrimVal::Ptr(_) => Ok(false), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn to_value_with_len(self, len: u64) -> Value { + Value::ByValPair(self.primval, PrimVal::from_u128(len as u128)) + } + + pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value { + Value::ByValPair(self.primval, PrimVal::Ptr(vtable)) + } + + pub fn to_value(self) -> Value { + Value::ByVal(self.primval) + } +} + +impl ::std::convert::From for Pointer { + fn from(primval: PrimVal) -> Self { + Pointer { primval } + } +} + +impl ::std::convert::From for Pointer { + fn from(ptr: MemoryPointer) -> Self { + PrimVal::Ptr(ptr).into() + } +} + +/// A `PrimVal` represents an immediate, primitive value existing outside of a +/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in +/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes +/// of a simple value, a pointer into another `Allocation`, or be undefined. +#[derive(Clone, Copy, Debug)] +pub enum PrimVal { + /// The raw bytes of a simple value. + Bytes(u128), + + /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of + /// relocations, but a `PrimVal` is only large enough to contain one, so we just represent the + /// relocation and its associated offset together as a `MemoryPointer` here. + Ptr(MemoryPointer), + + /// An undefined `PrimVal`, for representing values that aren't safe to examine, but are safe + /// to copy around, just like undefined bytes in an `Allocation`. + Undef, +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum PrimValKind { + I8, I16, I32, I64, I128, + U8, U16, U32, U64, U128, + F32, F64, + Ptr, FnPtr, + Bool, + Char, +} + +impl<'a, 'tcx: 'a> Value { + #[inline] + pub fn by_ref(ptr: Pointer) -> Self { + Value::ByRef(PtrAndAlign { ptr, aligned: true }) + } + + /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef, + /// this may have to perform a load. + pub fn into_ptr>( + &self, + mem: &Memory<'a, 'tcx, M>, + ) -> EvalResult<'tcx, Pointer> { + use self::Value::*; + Ok(match *self { + ByRef(PtrAndAlign { ptr, aligned }) => { + mem.read_maybe_aligned(aligned, |mem| mem.read_ptr_sized_unsigned(ptr.to_ptr()?))? + } + ByVal(ptr) | + ByValPair(ptr, _) => ptr, + }.into()) + } + + pub(super) fn into_ptr_vtable_pair>( + &self, + mem: &Memory<'a, 'tcx, M>, + ) -> EvalResult<'tcx, (Pointer, MemoryPointer)> { + use self::Value::*; + match *self { + ByRef(PtrAndAlign { + ptr: ref_ptr, + aligned, + }) => { + mem.read_maybe_aligned(aligned, |mem| { + let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into(); + let vtable = mem.read_ptr_sized_unsigned( + ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?, + )?.to_ptr()?; + Ok((ptr, vtable)) + }) + } + + ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)), + + ByVal(PrimVal::Undef) => err!(ReadUndefBytes), + _ => bug!("expected ptr and vtable, got {:?}", self), + } + } + + pub(super) fn into_slice>( + &self, + mem: &Memory<'a, 'tcx, M>, + ) -> EvalResult<'tcx, (Pointer, u64)> { + use self::Value::*; + match *self { + ByRef(PtrAndAlign { + ptr: ref_ptr, + aligned, + }) => { + mem.read_maybe_aligned(aligned, |mem| { + let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into(); + let len = mem.read_ptr_sized_unsigned( + ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?, + )?.to_bytes()? as u64; + Ok((ptr, len)) + }) + } + ByValPair(ptr, val) => { + let len = val.to_u128()?; + assert_eq!(len as u64 as u128, len); + Ok((ptr.into(), len as u64)) + } + ByVal(PrimVal::Undef) => err!(ReadUndefBytes), + ByVal(_) => bug!("expected ptr and length, got {:?}", self), + } + } +} + +impl<'tcx> PrimVal { + pub fn from_u128(n: u128) -> Self { + PrimVal::Bytes(n) + } + + pub fn from_i128(n: i128) -> Self { + PrimVal::Bytes(n as u128) + } + + pub fn from_f32(f: f32) -> Self { + PrimVal::Bytes(f32_to_bytes(f)) + } + + pub fn from_f64(f: f64) -> Self { + PrimVal::Bytes(f64_to_bytes(f)) + } + + pub fn from_bool(b: bool) -> Self { + PrimVal::Bytes(b as u128) + } + + pub fn from_char(c: char) -> Self { + PrimVal::Bytes(c as u128) + } + + pub fn to_bytes(self) -> EvalResult<'tcx, u128> { + match self { + PrimVal::Bytes(b) => Ok(b), + PrimVal::Ptr(_) => err!(ReadPointerAsBytes), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + match self { + PrimVal::Bytes(_) => err!(ReadBytesAsPointer), + PrimVal::Ptr(p) => Ok(p), + PrimVal::Undef => err!(ReadUndefBytes), + } + } + + pub fn is_bytes(self) -> bool { + match self { + PrimVal::Bytes(_) => true, + _ => false, + } + } + + pub fn is_ptr(self) -> bool { + match self { + PrimVal::Ptr(_) => true, + _ => false, + } + } + + pub fn is_undef(self) -> bool { + match self { + PrimVal::Undef => true, + _ => false, + } + } + + pub fn to_u128(self) -> EvalResult<'tcx, u128> { + self.to_bytes() + } + + pub fn to_u64(self) -> EvalResult<'tcx, u64> { + self.to_bytes().map(|b| { + assert_eq!(b as u64 as u128, b); + b as u64 + }) + } + + pub fn to_i32(self) -> EvalResult<'tcx, i32> { + self.to_bytes().map(|b| { + assert_eq!(b as i32 as u128, b); + b as i32 + }) + } + + pub fn to_i128(self) -> EvalResult<'tcx, i128> { + self.to_bytes().map(|b| b as i128) + } + + pub fn to_i64(self) -> EvalResult<'tcx, i64> { + self.to_bytes().map(|b| { + assert_eq!(b as i64 as u128, b); + b as i64 + }) + } + + pub fn to_f32(self) -> EvalResult<'tcx, f32> { + self.to_bytes().map(bytes_to_f32) + } + + pub fn to_f64(self) -> EvalResult<'tcx, f64> { + self.to_bytes().map(bytes_to_f64) + } + + pub fn to_bool(self) -> EvalResult<'tcx, bool> { + match self.to_bytes()? { + 0 => Ok(false), + 1 => Ok(true), + _ => err!(InvalidBool), + } + } +} + +impl PrimValKind { + pub fn is_int(self) -> bool { + use self::PrimValKind::*; + match self { + I8 | I16 | I32 | I64 | I128 | U8 | U16 | U32 | U64 | U128 => true, + _ => false, + } + } + + pub fn is_signed_int(self) -> bool { + use self::PrimValKind::*; + match self { + I8 | I16 | I32 | I64 | I128 => true, + _ => false, + } + } + + pub fn is_float(self) -> bool { + use self::PrimValKind::*; + match self { + F32 | F64 => true, + _ => false, + } + } + + pub fn from_uint_size(size: u64) -> Self { + match size { + 1 => PrimValKind::U8, + 2 => PrimValKind::U16, + 4 => PrimValKind::U32, + 8 => PrimValKind::U64, + 16 => PrimValKind::U128, + _ => bug!("can't make uint with size {}", size), + } + } + + pub fn from_int_size(size: u64) -> Self { + match size { + 1 => PrimValKind::I8, + 2 => PrimValKind::I16, + 4 => PrimValKind::I32, + 8 => PrimValKind::I64, + 16 => PrimValKind::I128, + _ => bug!("can't make int with size {}", size), + } + } + + pub fn is_ptr(self) -> bool { + use self::PrimValKind::*; + match self { + Ptr | FnPtr => true, + _ => false, + } + } +} diff --git a/src/tools/miri/src/librustc_mir/lib.rs b/src/tools/miri/src/librustc_mir/lib.rs new file mode 100644 index 0000000000..c640932e50 --- /dev/null +++ b/src/tools/miri/src/librustc_mir/lib.rs @@ -0,0 +1,26 @@ +#![feature( + i128_type, + rustc_private, + conservative_impl_trait, + never_type, + catch_expr, +)] + +// From rustc. +#[macro_use] +extern crate log; +extern crate log_settings; +#[macro_use] +extern crate rustc; +extern crate rustc_const_math; +extern crate rustc_data_structures; +extern crate syntax; + +// From crates.io. +extern crate byteorder; +#[macro_use] +extern crate lazy_static; +extern crate regex; +extern crate backtrace; + +pub mod interpret; diff --git a/src/tools/miri/tests/compile-fail-fullmir/undefined_byte_read.rs b/src/tools/miri/tests/compile-fail-fullmir/undefined_byte_read.rs new file mode 100644 index 0000000000..99404b7d5f --- /dev/null +++ b/src/tools/miri/tests/compile-fail-fullmir/undefined_byte_read.rs @@ -0,0 +1,9 @@ +// This should fail even without validation +// compile-flags: -Zmir-emit-validate=0 + +fn main() { + let v: Vec = Vec::with_capacity(10); + let undef = unsafe { *v.get_unchecked(5) }; + let x = undef + 1; //~ ERROR: attempted to read undefined bytes + panic!("this should never print: {}", x); +} diff --git a/src/tools/miri/tests/compile-fail/alignment.rs b/src/tools/miri/tests/compile-fail/alignment.rs new file mode 100644 index 0000000000..4faaa359df --- /dev/null +++ b/src/tools/miri/tests/compile-fail/alignment.rs @@ -0,0 +1,11 @@ +fn main() { + // miri always gives allocations the worst possible alignment, so a `u8` array is guaranteed + // to be at the virtual location 1 (so one byte offset from the ultimate alignemnt location 0) + let mut x = [0u8; 20]; + let x_ptr: *mut u8 = &mut x[0]; + let y_ptr = x_ptr as *mut u64; + unsafe { + *y_ptr = 42; //~ ERROR tried to access memory with alignment 1, but alignment + } + panic!("unreachable in miri"); +} diff --git a/src/tools/miri/tests/compile-fail/assume.rs b/src/tools/miri/tests/compile-fail/assume.rs new file mode 100644 index 0000000000..69758a5d7f --- /dev/null +++ b/src/tools/miri/tests/compile-fail/assume.rs @@ -0,0 +1,10 @@ +#![feature(core_intrinsics)] + +fn main() { + let x = 5; + unsafe { + std::intrinsics::assume(x < 10); + std::intrinsics::assume(x > 1); + std::intrinsics::assume(x > 42); //~ ERROR: `assume` argument was false + } +} diff --git a/src/tools/miri/tests/compile-fail/bitop-beyond-alignment.rs b/src/tools/miri/tests/compile-fail/bitop-beyond-alignment.rs new file mode 100644 index 0000000000..a30c054ab5 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/bitop-beyond-alignment.rs @@ -0,0 +1,37 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +use std::mem; + +enum Tag { + Tag2(A) +} + +struct Rec { + c8: u8, + t: Tag +} + +fn mk_rec() -> Rec { + return Rec { c8:0, t:Tag::Tag2(0) }; +} + +fn is_u64_aligned(u: &Tag) -> bool { + let p: usize = unsafe { mem::transmute(u) }; + let u64_align = std::mem::align_of::(); + return (p & (u64_align + 1)) == 0; //~ ERROR a raw memory access tried to access part of a pointer value as raw bytes +} + +pub fn main() { + let x = mk_rec(); + assert!(is_u64_aligned(&x.t)); +} diff --git a/src/tools/miri/tests/compile-fail/cast_box_int_to_fn_ptr.rs b/src/tools/miri/tests/compile-fail/cast_box_int_to_fn_ptr.rs new file mode 100644 index 0000000000..912b1bd7d9 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/cast_box_int_to_fn_ptr.rs @@ -0,0 +1,11 @@ +// Validation makes this fail in the wrong place +// compile-flags: -Zmir-emit-validate=0 + +fn main() { + let b = Box::new(42); + let g = unsafe { + std::mem::transmute::<&usize, &fn(i32)>(&b) + }; + + (*g)(42) //~ ERROR a memory access tried to interpret some bytes as a pointer +} diff --git a/src/tools/miri/tests/compile-fail/cast_fn_ptr.rs b/src/tools/miri/tests/compile-fail/cast_fn_ptr.rs new file mode 100644 index 0000000000..7509ae6ed7 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/cast_fn_ptr.rs @@ -0,0 +1,9 @@ +fn main() { + fn f() {} + + let g = unsafe { + std::mem::transmute::(f) + }; + + g(42) //~ ERROR tried to call a function with sig fn() through a function pointer of type fn(i32) +} diff --git a/src/tools/miri/tests/compile-fail/cast_fn_ptr2.rs b/src/tools/miri/tests/compile-fail/cast_fn_ptr2.rs new file mode 100644 index 0000000000..5d902e1f9a --- /dev/null +++ b/src/tools/miri/tests/compile-fail/cast_fn_ptr2.rs @@ -0,0 +1,9 @@ +fn main() { + fn f(_ : (i32,i32)) {} + + let g = unsafe { + std::mem::transmute::(f) + }; + + g(42) //~ ERROR tried to call a function with sig fn((i32, i32)) through a function pointer of type fn(i32) +} diff --git a/src/tools/miri/tests/compile-fail/cast_fn_ptr_unsafe.rs b/src/tools/miri/tests/compile-fail/cast_fn_ptr_unsafe.rs new file mode 100644 index 0000000000..568681da3c --- /dev/null +++ b/src/tools/miri/tests/compile-fail/cast_fn_ptr_unsafe.rs @@ -0,0 +1,10 @@ +// just making sure that fn -> unsafe fn casts are handled by rustc so miri doesn't have to +fn main() { + fn f() {} + + let g = f as fn() as unsafe fn(i32); //~ERROR: non-primitive cast: `fn()` as `unsafe fn(i32)` + + unsafe { + g(42); + } +} diff --git a/src/tools/miri/tests/compile-fail/cast_fn_ptr_unsafe2.rs b/src/tools/miri/tests/compile-fail/cast_fn_ptr_unsafe2.rs new file mode 100644 index 0000000000..314365939f --- /dev/null +++ b/src/tools/miri/tests/compile-fail/cast_fn_ptr_unsafe2.rs @@ -0,0 +1,10 @@ +// just making sure that fn -> unsafe fn casts are handled by rustc so miri doesn't have to +fn main() { + fn f() {} + + let g = f as fn() as fn(i32) as unsafe fn(i32); //~ERROR: non-primitive cast: `fn()` as `fn(i32)` + + unsafe { + g(42); + } +} diff --git a/src/tools/miri/tests/compile-fail/cast_int_to_fn_ptr.rs b/src/tools/miri/tests/compile-fail/cast_int_to_fn_ptr.rs new file mode 100644 index 0000000000..23f85dbaf3 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/cast_int_to_fn_ptr.rs @@ -0,0 +1,10 @@ +// Validation makes this fail in the wrong place +// compile-flags: -Zmir-emit-validate=0 + +fn main() { + let g = unsafe { + std::mem::transmute::(42) + }; + + g(42) //~ ERROR a memory access tried to interpret some bytes as a pointer +} diff --git a/src/tools/miri/tests/compile-fail/copy_nonoverlapping.rs b/src/tools/miri/tests/compile-fail/copy_nonoverlapping.rs new file mode 100644 index 0000000000..f4acbadfd5 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/copy_nonoverlapping.rs @@ -0,0 +1,24 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(core_intrinsics)] + +use std::intrinsics::*; + +//error-pattern: copy_nonoverlapping called on overlapping ranges + +fn main() { + let mut data = [0u8; 16]; + unsafe { + let a = &data[0] as *const _; + let b = &mut data[1] as *mut _; + std::ptr::copy_nonoverlapping(a, b, 2); + } +} diff --git a/src/tools/miri/tests/compile-fail/ctlz_nonzero.rs b/src/tools/miri/tests/compile-fail/ctlz_nonzero.rs new file mode 100644 index 0000000000..704c4d4b7d --- /dev/null +++ b/src/tools/miri/tests/compile-fail/ctlz_nonzero.rs @@ -0,0 +1,15 @@ +#![feature(intrinsics)] + +mod rusti { + extern "rust-intrinsic" { + pub fn ctlz_nonzero(x: T) -> T; + } +} + +pub fn main() { + unsafe { + use rusti::*; + + ctlz_nonzero(0u8); //~ ERROR: ctlz_nonzero called on 0 + } +} diff --git a/src/tools/miri/tests/compile-fail/cttz_nonzero.rs b/src/tools/miri/tests/compile-fail/cttz_nonzero.rs new file mode 100644 index 0000000000..eda25c6615 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/cttz_nonzero.rs @@ -0,0 +1,15 @@ +#![feature(intrinsics)] + +mod rusti { + extern "rust-intrinsic" { + pub fn cttz_nonzero(x: T) -> T; + } +} + +pub fn main() { + unsafe { + use rusti::*; + + cttz_nonzero(0u8); //~ ERROR: cttz_nonzero called on 0 + } +} diff --git a/src/tools/miri/tests/compile-fail/dangling_pointer_deref.rs b/src/tools/miri/tests/compile-fail/dangling_pointer_deref.rs new file mode 100644 index 0000000000..0ede7c96f0 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/dangling_pointer_deref.rs @@ -0,0 +1,8 @@ +fn main() { + let p = { + let b = Box::new(42); + &*b as *const i32 + }; + let x = unsafe { *p }; //~ ERROR: dangling pointer was dereferenced + panic!("this should never print: {}", x); +} diff --git a/src/tools/miri/tests/compile-fail/deallocate-bad-alignment.rs b/src/tools/miri/tests/compile-fail/deallocate-bad-alignment.rs new file mode 100644 index 0000000000..c1ae7477c8 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/deallocate-bad-alignment.rs @@ -0,0 +1,15 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + Heap.dealloc(x, Layout::from_size_align_unchecked(1, 2)); + } +} diff --git a/src/tools/miri/tests/compile-fail/deallocate-bad-size.rs b/src/tools/miri/tests/compile-fail/deallocate-bad-size.rs new file mode 100644 index 0000000000..5577f10736 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/deallocate-bad-size.rs @@ -0,0 +1,15 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + Heap.dealloc(x, Layout::from_size_align_unchecked(2, 1)); + } +} diff --git a/src/tools/miri/tests/compile-fail/deallocate-twice.rs b/src/tools/miri/tests/compile-fail/deallocate-twice.rs new file mode 100644 index 0000000000..e11df0eb41 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/deallocate-twice.rs @@ -0,0 +1,16 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate dangling pointer + +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + Heap.dealloc(x, Layout::from_size_align_unchecked(1, 1)); + Heap.dealloc(x, Layout::from_size_align_unchecked(1, 1)); + } +} diff --git a/src/tools/miri/tests/compile-fail/deref_fn_ptr.rs b/src/tools/miri/tests/compile-fail/deref_fn_ptr.rs new file mode 100644 index 0000000000..c1eaf7eaa6 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/deref_fn_ptr.rs @@ -0,0 +1,8 @@ +fn f() {} + +fn main() { + let x: i32 = unsafe { + *std::mem::transmute::(f) //~ ERROR: tried to dereference a function pointer + }; + panic!("this should never print: {}", x); +} diff --git a/src/libstd/os/nacl/mod.rs b/src/tools/miri/tests/compile-fail/div-by-zero-2.rs similarity index 81% rename from src/libstd/os/nacl/mod.rs rename to src/tools/miri/tests/compile-fail/div-by-zero-2.rs index 7dfa2eabe3..3e869ad4a5 100644 --- a/src/libstd/os/nacl/mod.rs +++ b/src/tools/miri/tests/compile-fail/div-by-zero-2.rs @@ -8,9 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Nacl-specific definitions +#![allow(const_err)] -#![stable(feature = "raw_ext", since = "1.1.0")] - -pub mod raw; -pub mod fs; +fn main() { + let _n = 1 / 0; //~ ERROR: DivisionByZero +} diff --git a/src/tools/miri/tests/compile-fail/div-by-zero.rs b/src/tools/miri/tests/compile-fail/div-by-zero.rs new file mode 100644 index 0000000000..4ac6214d88 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/div-by-zero.rs @@ -0,0 +1,21 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(core_intrinsics)] + +use std::intrinsics::*; + +//error-pattern: Division by 0 in unchecked_div + +fn main() { + unsafe { + let _n = unchecked_div(1i64, 0); + } +} diff --git a/src/tools/miri/tests/compile-fail/execute_memory.rs b/src/tools/miri/tests/compile-fail/execute_memory.rs new file mode 100644 index 0000000000..87d975e1f9 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/execute_memory.rs @@ -0,0 +1,12 @@ +// Validation makes this fail in the wrong place +// compile-flags: -Zmir-emit-validate=0 + +#![feature(box_syntax)] + +fn main() { + let x = box 42; + unsafe { + let f = std::mem::transmute::, fn()>(x); + f() //~ ERROR: tried to treat a memory pointer as a function pointer + } +} diff --git a/src/tools/miri/tests/compile-fail/fn_ptr_offset.rs b/src/tools/miri/tests/compile-fail/fn_ptr_offset.rs new file mode 100644 index 0000000000..45e32142a8 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/fn_ptr_offset.rs @@ -0,0 +1,14 @@ +// Validation makes this fail in the wrong place +// compile-flags: -Zmir-emit-validate=0 + +use std::mem; + +fn f() {} + +fn main() { + let x : fn() = f; + let y : *mut u8 = unsafe { mem::transmute(x) }; + let y = y.wrapping_offset(1); + let x : fn() = unsafe { mem::transmute(y) }; + x(); //~ ERROR: tried to use a function pointer after offsetting it +} diff --git a/src/tools/miri/tests/compile-fail/invalid_bool.rs b/src/tools/miri/tests/compile-fail/invalid_bool.rs new file mode 100644 index 0000000000..c30c9b439a --- /dev/null +++ b/src/tools/miri/tests/compile-fail/invalid_bool.rs @@ -0,0 +1,4 @@ +fn main() { + let b = unsafe { std::mem::transmute::(2) }; //~ ERROR: invalid boolean value read + if b { unreachable!() } else { unreachable!() } +} diff --git a/src/tools/miri/tests/compile-fail/invalid_enum_discriminant.rs b/src/tools/miri/tests/compile-fail/invalid_enum_discriminant.rs new file mode 100644 index 0000000000..9ce6d44ca4 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/invalid_enum_discriminant.rs @@ -0,0 +1,17 @@ +// Validation makes this fail in the wrong place +// compile-flags: -Zmir-emit-validate=0 + +#[repr(C)] +pub enum Foo { + A, B, C, D +} + +fn main() { + let f = unsafe { std::mem::transmute::(42) }; + match f { + Foo::A => {}, //~ ERROR invalid enum discriminant value read + Foo::B => {}, + Foo::C => {}, + Foo::D => {}, + } +} diff --git a/src/tools/miri/tests/compile-fail/match_char.rs b/src/tools/miri/tests/compile-fail/match_char.rs new file mode 100644 index 0000000000..4fee6e692b --- /dev/null +++ b/src/tools/miri/tests/compile-fail/match_char.rs @@ -0,0 +1,8 @@ +fn main() { + assert!(std::char::from_u32(-1_i32 as u32).is_none()); + match unsafe { std::mem::transmute::(-1) } { //~ERROR tried to interpret an invalid 32-bit value as a char: 4294967295 + 'a' => {}, + 'b' => {}, + _ => {}, + } +} diff --git a/src/tools/miri/tests/compile-fail/memleak.rs b/src/tools/miri/tests/compile-fail/memleak.rs new file mode 100644 index 0000000000..71b4e2f442 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/memleak.rs @@ -0,0 +1,5 @@ +//error-pattern: the evaluated program leaked memory + +fn main() { + std::mem::forget(Box::new(42)); +} diff --git a/src/tools/miri/tests/compile-fail/memleak_rc.rs b/src/tools/miri/tests/compile-fail/memleak_rc.rs new file mode 100644 index 0000000000..b2bc6722af --- /dev/null +++ b/src/tools/miri/tests/compile-fail/memleak_rc.rs @@ -0,0 +1,12 @@ +//error-pattern: the evaluated program leaked memory + +use std::rc::Rc; +use std::cell::RefCell; + +struct Dummy(Rc>>); + +fn main() { + let x = Dummy(Rc::new(RefCell::new(None))); + let y = Dummy(x.0.clone()); + *x.0.borrow_mut() = Some(y); +} diff --git a/src/tools/miri/tests/compile-fail/modifying_constants.rs b/src/tools/miri/tests/compile-fail/modifying_constants.rs new file mode 100644 index 0000000000..cb2e7217d5 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/modifying_constants.rs @@ -0,0 +1,6 @@ +fn main() { + let x = &1; // the `&1` is promoted to a constant, but it used to be that only the pointer is marked static, not the pointee + let y = unsafe { &mut *(x as *const i32 as *mut i32) }; + *y = 42; //~ ERROR tried to modify constant memory + assert_eq!(*x, 42); +} diff --git a/src/tools/miri/tests/compile-fail/never_say_never.rs b/src/tools/miri/tests/compile-fail/never_say_never.rs new file mode 100644 index 0000000000..3e80cb20b3 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/never_say_never.rs @@ -0,0 +1,12 @@ +#![feature(never_type)] +#![allow(unreachable_code)] + +fn main() { + let y = &5; + let x: ! = unsafe { + *(y as *const _ as *const !) //~ ERROR tried to access a dead local variable + }; + f(x) +} + +fn f(x: !) -> ! { x } diff --git a/src/tools/miri/tests/compile-fail/never_transmute_humans.rs b/src/tools/miri/tests/compile-fail/never_transmute_humans.rs new file mode 100644 index 0000000000..38406eeb3f --- /dev/null +++ b/src/tools/miri/tests/compile-fail/never_transmute_humans.rs @@ -0,0 +1,14 @@ +#![feature(never_type)] +#![allow(unreachable_code)] +#![allow(unused_variables)] + +struct Human; + +fn main() { + let x: ! = unsafe { + std::mem::transmute::(Human) //~ ERROR entered unreachable code + }; + f(x) +} + +fn f(x: !) -> ! { x } diff --git a/src/tools/miri/tests/compile-fail/never_transmute_void.rs b/src/tools/miri/tests/compile-fail/never_transmute_void.rs new file mode 100644 index 0000000000..3fffacc55e --- /dev/null +++ b/src/tools/miri/tests/compile-fail/never_transmute_void.rs @@ -0,0 +1,16 @@ +#![feature(never_type)] +#![allow(unreachable_code)] +#![allow(unused_variables)] + +enum Void {} + +fn f(v: Void) -> ! { + match v {} +} + +fn main() { + let v: Void = unsafe { + std::mem::transmute::<(), Void>(()) //~ ERROR entered unreachable code + }; + f(v); +} diff --git a/src/tools/miri/tests/compile-fail/null_pointer_deref.rs b/src/tools/miri/tests/compile-fail/null_pointer_deref.rs new file mode 100644 index 0000000000..5a26856eba --- /dev/null +++ b/src/tools/miri/tests/compile-fail/null_pointer_deref.rs @@ -0,0 +1,4 @@ +fn main() { + let x: i32 = unsafe { *std::ptr::null() }; //~ ERROR: invalid use of NULL pointer + panic!("this should never print: {}", x); +} diff --git a/src/tools/miri/tests/compile-fail/oom.rs b/src/tools/miri/tests/compile-fail/oom.rs new file mode 100644 index 0000000000..d4aebb912e --- /dev/null +++ b/src/tools/miri/tests/compile-fail/oom.rs @@ -0,0 +1,7 @@ +#![feature(custom_attribute, attr_literals)] +#![miri(memory_size=4095)] + +fn main() { + let _x = [42; 1024]; + //~^ERROR tried to allocate 4096 more bytes, but only +} diff --git a/src/tools/miri/tests/compile-fail/oom2.rs b/src/tools/miri/tests/compile-fail/oom2.rs new file mode 100644 index 0000000000..f439ac8c13 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/oom2.rs @@ -0,0 +1,10 @@ +// Validation forces more allocation; disable it. +// compile-flags: -Zmir-emit-validate=0 +#![feature(box_syntax, custom_attribute, attr_literals)] +#![miri(memory_size=2048)] + +fn main() { + loop { + ::std::mem::forget(box 42); //~ ERROR tried to allocate 4 more bytes + } +} diff --git a/src/tools/miri/tests/compile-fail/out_of_bounds_ptr_1.rs b/src/tools/miri/tests/compile-fail/out_of_bounds_ptr_1.rs new file mode 100644 index 0000000000..8dce7e5786 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/out_of_bounds_ptr_1.rs @@ -0,0 +1,8 @@ +// error-pattern: pointer computed at offset 5, outside bounds of allocation +fn main() { + let v = [0i8; 4]; + let x = &v as *const i8; + // The error is inside another function, so we cannot match it by line + let x = unsafe { x.offset(5) }; + panic!("this should never print: {:?}", x); +} diff --git a/src/tools/miri/tests/compile-fail/out_of_bounds_ptr_2.rs b/src/tools/miri/tests/compile-fail/out_of_bounds_ptr_2.rs new file mode 100644 index 0000000000..f754649457 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/out_of_bounds_ptr_2.rs @@ -0,0 +1,7 @@ +// error-pattern: overflowing math +fn main() { + let v = [0i8; 4]; + let x = &v as *const i8; + let x = unsafe { x.offset(-1) }; + panic!("this should never print: {:?}", x); +} diff --git a/src/tools/miri/tests/compile-fail/out_of_bounds_read.rs b/src/tools/miri/tests/compile-fail/out_of_bounds_read.rs new file mode 100644 index 0000000000..8c56b14bdf --- /dev/null +++ b/src/tools/miri/tests/compile-fail/out_of_bounds_read.rs @@ -0,0 +1,5 @@ +fn main() { + let v: Vec = vec![1, 2]; + let x = unsafe { *v.as_ptr().wrapping_offset(5) }; //~ ERROR: which has size 2 + panic!("this should never print: {}", x); +} diff --git a/src/tools/miri/tests/compile-fail/out_of_bounds_read2.rs b/src/tools/miri/tests/compile-fail/out_of_bounds_read2.rs new file mode 100644 index 0000000000..d29b22ffb2 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/out_of_bounds_read2.rs @@ -0,0 +1,5 @@ +fn main() { + let v: Vec = vec![1, 2]; + let x = unsafe { *v.as_ptr().wrapping_offset(5) }; //~ ERROR: memory access at offset 6, outside bounds of allocation + panic!("this should never print: {}", x); +} diff --git a/src/tools/miri/tests/compile-fail/overflowing-lsh-neg.rs b/src/tools/miri/tests/compile-fail/overflowing-lsh-neg.rs new file mode 100644 index 0000000000..3a889be741 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/overflowing-lsh-neg.rs @@ -0,0 +1,16 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(exceeding_bitshifts)] +#![allow(const_err)] + +fn main() { + let _n = 2i64 << -1; //~ Overflow(Shl) +} diff --git a/src/tools/miri/tests/compile-fail/overflowing-rsh-2.rs b/src/tools/miri/tests/compile-fail/overflowing-rsh-2.rs new file mode 100644 index 0000000000..ac09a1740c --- /dev/null +++ b/src/tools/miri/tests/compile-fail/overflowing-rsh-2.rs @@ -0,0 +1,16 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(exceeding_bitshifts, const_err)] + +fn main() { + // Make sure we catch overflows that would be hidden by first casting the RHS to u32 + let _n = 1i64 >> (u32::max_value() as i64 + 1); //~ Overflow(Shr) +} diff --git a/src/tools/miri/tests/compile-fail/overflowing-rsh.rs b/src/tools/miri/tests/compile-fail/overflowing-rsh.rs new file mode 100644 index 0000000000..a7ac9d1d50 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/overflowing-rsh.rs @@ -0,0 +1,15 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(exceeding_bitshifts)] + +fn main() { + let _n = 1i64 >> 64; //~ Overflow(Shr) +} diff --git a/src/tools/miri/tests/compile-fail/overflowing-unchecked-rsh.rs b/src/tools/miri/tests/compile-fail/overflowing-unchecked-rsh.rs new file mode 100644 index 0000000000..b8291e1300 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/overflowing-unchecked-rsh.rs @@ -0,0 +1,21 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(core_intrinsics)] + +use std::intrinsics::*; + +//error-pattern: Overflowing shift by 64 in unchecked_shr + +fn main() { + unsafe { + let _n = unchecked_shr(1i64, 64); + } +} diff --git a/src/tools/miri/tests/compile-fail/overwriting_part_of_relocation_makes_the_rest_undefined.rs b/src/tools/miri/tests/compile-fail/overwriting_part_of_relocation_makes_the_rest_undefined.rs new file mode 100644 index 0000000000..50f51d0ba9 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/overwriting_part_of_relocation_makes_the_rest_undefined.rs @@ -0,0 +1,11 @@ +fn main() { + let mut p = &42; + unsafe { + let ptr: *mut _ = &mut p; + *(ptr as *mut u8) = 123; // if we ever support 8 bit pointers, this is gonna cause + // "attempted to interpret some raw bytes as a pointer address" instead of + // "attempted to read undefined bytes" + } + let x = *p; //~ ERROR: attempted to read undefined bytes + panic!("this should never print: {}", x); +} diff --git a/src/tools/miri/tests/compile-fail/panic.rs b/src/tools/miri/tests/compile-fail/panic.rs new file mode 100644 index 0000000000..80149eeffa --- /dev/null +++ b/src/tools/miri/tests/compile-fail/panic.rs @@ -0,0 +1,7 @@ +// FIXME: Something in panic handling fails validation with full-MIR +// compile-flags: -Zmir-emit-validate=0 +//error-pattern: the evaluated program panicked + +fn main() { + assert_eq!(5, 6); +} diff --git a/src/tools/miri/tests/compile-fail/pointer_byte_read_1.rs b/src/tools/miri/tests/compile-fail/pointer_byte_read_1.rs new file mode 100644 index 0000000000..342eb28a97 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/pointer_byte_read_1.rs @@ -0,0 +1,7 @@ +fn main() { + let x = 13; + let y = &x; + let z = &y as *const &i32 as *const usize; + let ptr_bytes = unsafe { *z }; // the actual deref is fine, because we read the entire pointer at once + let _ = ptr_bytes % 432; //~ ERROR: tried to access part of a pointer value as raw bytes +} diff --git a/src/tools/miri/tests/compile-fail/pointer_byte_read_2.rs b/src/tools/miri/tests/compile-fail/pointer_byte_read_2.rs new file mode 100644 index 0000000000..b0f619332e --- /dev/null +++ b/src/tools/miri/tests/compile-fail/pointer_byte_read_2.rs @@ -0,0 +1,7 @@ +fn main() { + let x = 13; + let y = &x; + let z = &y as *const &i32 as *const u8; + // the deref fails, because we are reading only a part of the pointer + let _ = unsafe { *z }; //~ ERROR: tried to access part of a pointer value as raw bytes +} diff --git a/src/tools/miri/tests/compile-fail/pointers_to_different_allocations_are_unorderable.rs b/src/tools/miri/tests/compile-fail/pointers_to_different_allocations_are_unorderable.rs new file mode 100644 index 0000000000..245b7527c5 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/pointers_to_different_allocations_are_unorderable.rs @@ -0,0 +1,7 @@ +fn main() { + let x: *const u8 = &1; + let y: *const u8 = &2; + if x < y { //~ ERROR: attempted to do invalid arithmetic on pointers + unreachable!() + } +} diff --git a/src/tools/miri/tests/compile-fail/ptr_bitops.rs b/src/tools/miri/tests/compile-fail/ptr_bitops.rs new file mode 100644 index 0000000000..78fd8e912b --- /dev/null +++ b/src/tools/miri/tests/compile-fail/ptr_bitops.rs @@ -0,0 +1,7 @@ +fn main() { + let bytes = [0i8, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let one = bytes.as_ptr().wrapping_offset(1); + let three = bytes.as_ptr().wrapping_offset(3); + let res = (one as usize) | (three as usize); //~ ERROR a raw memory access tried to access part of a pointer value as raw bytes + println!("{}", res); +} diff --git a/src/tools/miri/tests/compile-fail/ptr_int_cast.rs b/src/tools/miri/tests/compile-fail/ptr_int_cast.rs new file mode 100644 index 0000000000..396c71ebb0 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/ptr_int_cast.rs @@ -0,0 +1,8 @@ +fn main() { + let x = &1; + // Casting down to u8 and back up to a pointer loses too much precision; this must not work. + let x = x as *const i32; + let x = x as u8; //~ ERROR: a raw memory access tried to access part of a pointer value as raw bytes + let x = x as *const i32; + let _ = unsafe { *x }; +} diff --git a/src/tools/miri/tests/compile-fail/ptr_offset_overflow.rs b/src/tools/miri/tests/compile-fail/ptr_offset_overflow.rs new file mode 100644 index 0000000000..578468c339 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/ptr_offset_overflow.rs @@ -0,0 +1,6 @@ +//error-pattern: overflowing math +fn main() { + let v = [1i8, 2]; + let x = &v[1] as *const i8; + let _ = unsafe { x.offset(isize::min_value()) }; +} diff --git a/src/tools/miri/tests/compile-fail/reading_half_a_pointer.rs b/src/tools/miri/tests/compile-fail/reading_half_a_pointer.rs new file mode 100644 index 0000000000..cc41b52f33 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/reading_half_a_pointer.rs @@ -0,0 +1,29 @@ +#![allow(dead_code)] + +// We use packed structs to get around alignment restrictions +#[repr(packed)] +struct Data { + pad: u8, + ptr: &'static i32, +} + +// But we need to gurantee some alignment +struct Wrapper { + align: u64, + data: Data, +} + +static G : i32 = 0; + +fn main() { + let mut w = Wrapper { align: 0, data: Data { pad: 0, ptr: &G } }; + + // Get a pointer to the beginning of the Data struct (one u8 byte, then the pointer bytes). + // Thanks to the wrapper, we know this is aligned-enough to perform a load at ptr size. + // We load at pointer type, so having a relocation is okay -- but here, the relocation + // starts 1 byte to the right, so using it would actually be wrong! + let d_alias = &mut w.data as *mut _ as *mut *const u8; + unsafe { + let _x = *d_alias; //~ ERROR: tried to access part of a pointer value as raw bytes + } +} diff --git a/src/tools/miri/tests/compile-fail/reallocate-bad-alignment-2.rs b/src/tools/miri/tests/compile-fail/reallocate-bad-alignment-2.rs new file mode 100644 index 0000000000..cd6214440f --- /dev/null +++ b/src/tools/miri/tests/compile-fail/reallocate-bad-alignment-2.rs @@ -0,0 +1,16 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + // Try realloc with a too big alignment. + let _y = Heap.realloc(x, Layout::from_size_align_unchecked(1, 2), Layout::from_size_align_unchecked(1, 1)).unwrap(); + } +} diff --git a/src/tools/miri/tests/compile-fail/reallocate-bad-alignment.rs b/src/tools/miri/tests/compile-fail/reallocate-bad-alignment.rs new file mode 100644 index 0000000000..da5fe1d819 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/reallocate-bad-alignment.rs @@ -0,0 +1,16 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 2)).unwrap(); + // Try realloc with a too small alignment. + let _y = Heap.realloc(x, Layout::from_size_align_unchecked(1, 1), Layout::from_size_align_unchecked(1, 2)).unwrap(); + } +} diff --git a/src/tools/miri/tests/compile-fail/reallocate-bad-size.rs b/src/tools/miri/tests/compile-fail/reallocate-bad-size.rs new file mode 100644 index 0000000000..953178742c --- /dev/null +++ b/src/tools/miri/tests/compile-fail/reallocate-bad-size.rs @@ -0,0 +1,15 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + let _y = Heap.realloc(x, Layout::from_size_align_unchecked(2, 1), Layout::from_size_align_unchecked(1, 1)).unwrap(); + } +} diff --git a/src/tools/miri/tests/compile-fail/reallocate-change-alloc.rs b/src/tools/miri/tests/compile-fail/reallocate-change-alloc.rs new file mode 100644 index 0000000000..290c966a2b --- /dev/null +++ b/src/tools/miri/tests/compile-fail/reallocate-change-alloc.rs @@ -0,0 +1,14 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + let _y = Heap.realloc(x, Layout::from_size_align_unchecked(1, 1), Layout::from_size_align_unchecked(1, 1)).unwrap(); + let _z = *x; //~ ERROR: dangling pointer was dereferenced + } +} diff --git a/src/tools/miri/tests/compile-fail/reallocate-dangling.rs b/src/tools/miri/tests/compile-fail/reallocate-dangling.rs new file mode 100644 index 0000000000..6225879a5a --- /dev/null +++ b/src/tools/miri/tests/compile-fail/reallocate-dangling.rs @@ -0,0 +1,16 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: dangling pointer was dereferenced + +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + Heap.dealloc(x, Layout::from_size_align_unchecked(1, 1)); + Heap.realloc(x, Layout::from_size_align_unchecked(1, 1), Layout::from_size_align_unchecked(1, 1)); + } +} diff --git a/src/tools/miri/tests/compile-fail/reference_to_packed.rs b/src/tools/miri/tests/compile-fail/reference_to_packed.rs new file mode 100644 index 0000000000..cc927f8795 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/reference_to_packed.rs @@ -0,0 +1,19 @@ +// This should fail even without validation +// compile-flags: -Zmir-emit-validate=0 + +#![allow(dead_code, unused_variables)] + +#[repr(packed)] +struct Foo { + x: i32, + y: i32, +} + +fn main() { + let foo = Foo { + x: 42, + y: 99, + }; + let p = &foo.x; + let i = *p; //~ ERROR tried to access memory with alignment 1, but alignment 4 is required +} diff --git a/src/tools/miri/tests/compile-fail/repeat.rs b/src/tools/miri/tests/compile-fail/repeat.rs new file mode 100644 index 0000000000..abe89e233e --- /dev/null +++ b/src/tools/miri/tests/compile-fail/repeat.rs @@ -0,0 +1,5 @@ +fn main() { + let data: [u8; std::usize::MAX] = [42; std::usize::MAX]; + //~^ ERROR: rustc layout computation failed: SizeOverflow([u8; + assert_eq!(data.len(), 1024); +} diff --git a/src/tools/miri/tests/compile-fail/repeat2.rs b/src/tools/miri/tests/compile-fail/repeat2.rs new file mode 100644 index 0000000000..d489342b85 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/repeat2.rs @@ -0,0 +1,5 @@ +fn main() { + let data: [u8; 1024*1024*1024] = [42; 1024*1024*1024]; + //~^ ERROR: reached the configured maximum execution time + assert_eq!(data.len(), 1024*1024*1024); +} diff --git a/src/tools/miri/tests/compile-fail/stack_free.rs b/src/tools/miri/tests/compile-fail/stack_free.rs new file mode 100644 index 0000000000..96006c884e --- /dev/null +++ b/src/tools/miri/tests/compile-fail/stack_free.rs @@ -0,0 +1,7 @@ +// error-pattern: tried to deallocate Stack memory but gave Machine(Rust) as the kind + +fn main() { + let x = 42; + let bad_box = unsafe { std::mem::transmute::<&i32, Box>(&x) }; + drop(bad_box); +} diff --git a/src/tools/miri/tests/compile-fail/stack_limit.rs b/src/tools/miri/tests/compile-fail/stack_limit.rs new file mode 100644 index 0000000000..c6aaf80e6a --- /dev/null +++ b/src/tools/miri/tests/compile-fail/stack_limit.rs @@ -0,0 +1,20 @@ +#![feature(custom_attribute, attr_literals)] +#![miri(stack_limit=16)] + +//error-pattern: reached the configured maximum number of stack frames + +fn bar() { + foo(); +} + +fn foo() { + cake(); +} + +fn cake() { + bar(); +} + +fn main() { + bar(); +} diff --git a/src/tools/miri/tests/compile-fail/static_memory_modification.rs b/src/tools/miri/tests/compile-fail/static_memory_modification.rs new file mode 100644 index 0000000000..11961becb2 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/static_memory_modification.rs @@ -0,0 +1,9 @@ +static X: usize = 5; + +#[allow(mutable_transmutes)] +fn main() { + unsafe { + *std::mem::transmute::<&usize, &mut usize>(&X) = 6; //~ ERROR: tried to modify constant memory + assert_eq!(X, 6); + } +} diff --git a/src/tools/miri/tests/compile-fail/static_memory_modification2.rs b/src/tools/miri/tests/compile-fail/static_memory_modification2.rs new file mode 100644 index 0000000000..f030a9c281 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/static_memory_modification2.rs @@ -0,0 +1,12 @@ +// Validation detects that we are casting & to &mut and so it changes why we fail +// compile-flags: -Zmir-emit-validate=0 + +use std::mem::transmute; + +#[allow(mutable_transmutes)] +fn main() { + unsafe { + let s = "this is a test"; + transmute::<&[u8], &mut [u8]>(s.as_bytes())[4] = 42; //~ ERROR: tried to modify constant memory + } +} diff --git a/src/tools/miri/tests/compile-fail/static_memory_modification3.rs b/src/tools/miri/tests/compile-fail/static_memory_modification3.rs new file mode 100644 index 0000000000..743fbe60ef --- /dev/null +++ b/src/tools/miri/tests/compile-fail/static_memory_modification3.rs @@ -0,0 +1,9 @@ +use std::mem::transmute; + +#[allow(mutable_transmutes)] +fn main() { + unsafe { + let bs = b"this is a test"; + transmute::<&[u8], &mut [u8]>(bs)[4] = 42; //~ ERROR: tried to modify constant memory + } +} diff --git a/src/tools/miri/tests/compile-fail/timeout.rs b/src/tools/miri/tests/compile-fail/timeout.rs new file mode 100644 index 0000000000..edd4c31866 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/timeout.rs @@ -0,0 +1,9 @@ +//error-pattern: reached the configured maximum execution time +#![feature(custom_attribute, attr_literals)] +#![miri(step_limit=1000)] + +fn main() { + for i in 0..1000000 { + assert!(i < 1000); + } +} diff --git a/src/tools/miri/tests/compile-fail/transmute-pair-undef.rs b/src/tools/miri/tests/compile-fail/transmute-pair-undef.rs new file mode 100644 index 0000000000..acc6098af7 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/transmute-pair-undef.rs @@ -0,0 +1,20 @@ +#![feature(core_intrinsics)] + +use std::mem; + +fn main() { + let x: Option> = unsafe { + let z = std::intrinsics::add_with_overflow(0usize, 0usize); + std::mem::transmute::<(usize, bool), Option>>(z) + }; + let y = &x; + // Now read this bytewise. There should be (ptr_size+1) def bytes followed by (ptr_size-1) undef bytes (the padding after the bool) in there. + let z : *const u8 = y as *const _ as *const _; + let first_undef = mem::size_of::() as isize + 1; + for i in 0..first_undef { + let byte = unsafe { *z.offset(i) }; + assert_eq!(byte, 0); + } + let v = unsafe { *z.offset(first_undef) }; + if v == 0 {} //~ ERROR attempted to read undefined bytes +} diff --git a/src/tools/miri/tests/compile-fail/transmute_fat.rs b/src/tools/miri/tests/compile-fail/transmute_fat.rs new file mode 100644 index 0000000000..7d5d95a1dc --- /dev/null +++ b/src/tools/miri/tests/compile-fail/transmute_fat.rs @@ -0,0 +1,15 @@ +// This should fail even without validation +// compile-flags: -Zmir-emit-validate=0 +#![feature(i128_type)] + +fn main() { + #[cfg(target_pointer_width="64")] + let bad = unsafe { + std::mem::transmute::<&[u8], u128>(&[1u8]) + }; + #[cfg(target_pointer_width="32")] + let bad = unsafe { + std::mem::transmute::<&[u8], u64>(&[1u8]) + }; + bad + 1; //~ ERROR a raw memory access tried to access part of a pointer value as raw bytes +} diff --git a/src/tools/miri/tests/compile-fail/transmute_fat2.rs b/src/tools/miri/tests/compile-fail/transmute_fat2.rs new file mode 100644 index 0000000000..028ed613ee --- /dev/null +++ b/src/tools/miri/tests/compile-fail/transmute_fat2.rs @@ -0,0 +1,13 @@ +#![feature(i128_type)] + +fn main() { + #[cfg(target_pointer_width="64")] + let bad = unsafe { + std::mem::transmute::(42) + }; + #[cfg(target_pointer_width="32")] + let bad = unsafe { + std::mem::transmute::(42) + }; + bad[0]; //~ ERROR index out of bounds: the len is 0 but the index is 0 +} diff --git a/src/tools/miri/tests/compile-fail/unaligned_ptr_cast.rs b/src/tools/miri/tests/compile-fail/unaligned_ptr_cast.rs new file mode 100644 index 0000000000..8ad1b32325 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/unaligned_ptr_cast.rs @@ -0,0 +1,6 @@ +fn main() { + let x = &2u16; + let x = x as *const _ as *const u32; + // This must fail because alignment is violated + let _x = unsafe { *x }; //~ ERROR: tried to access memory with alignment 2, but alignment 4 is required +} diff --git a/src/tools/miri/tests/compile-fail/unaligned_ptr_cast2.rs b/src/tools/miri/tests/compile-fail/unaligned_ptr_cast2.rs new file mode 100644 index 0000000000..15fb7dd313 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/unaligned_ptr_cast2.rs @@ -0,0 +1,7 @@ +fn main() { + let x = &2u16; + let x = x as *const _ as *const *const u8; + // This must fail because alignment is violated. Test specifically for loading pointers, which have special code + // in miri's memory. + let _x = unsafe { *x }; //~ ERROR: tried to access memory with alignment 2, but alignment +} diff --git a/src/tools/miri/tests/compile-fail/unaligned_ptr_cast_zst.rs b/src/tools/miri/tests/compile-fail/unaligned_ptr_cast_zst.rs new file mode 100644 index 0000000000..fc60384068 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/unaligned_ptr_cast_zst.rs @@ -0,0 +1,6 @@ +fn main() { + let x = &2u16; + let x = x as *const _ as *const [u32; 0]; + // This must fail because alignment is violated. Test specifically for loading ZST. + let _x = unsafe { *x }; //~ ERROR: tried to access memory with alignment 2, but alignment 4 is required +} diff --git a/src/tools/miri/tests/compile-fail/validation_aliasing_mut1.rs b/src/tools/miri/tests/compile-fail/validation_aliasing_mut1.rs new file mode 100644 index 0000000000..86aa57447f --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_aliasing_mut1.rs @@ -0,0 +1,10 @@ +#![allow(unused_variables)] + +mod safe { + pub fn safe(x: &mut i32, y: &mut i32) {} //~ ERROR: in conflict with lock WriteLock +} + +fn main() { + let x = &mut 0 as *mut _; + unsafe { safe::safe(&mut *x, &mut *x) }; +} diff --git a/src/tools/miri/tests/compile-fail/validation_aliasing_mut2.rs b/src/tools/miri/tests/compile-fail/validation_aliasing_mut2.rs new file mode 100644 index 0000000000..ed7497e5e5 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_aliasing_mut2.rs @@ -0,0 +1,10 @@ +#![allow(unused_variables)] + +mod safe { + pub fn safe(x: &i32, y: &mut i32) {} //~ ERROR: in conflict with lock ReadLock +} + +fn main() { + let x = &mut 0 as *mut _; + unsafe { safe::safe(&*x, &mut *x) }; +} diff --git a/src/tools/miri/tests/compile-fail/validation_aliasing_mut3.rs b/src/tools/miri/tests/compile-fail/validation_aliasing_mut3.rs new file mode 100644 index 0000000000..69fbbc167c --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_aliasing_mut3.rs @@ -0,0 +1,10 @@ +#![allow(unused_variables)] + +mod safe { + pub fn safe(x: &mut i32, y: &i32) {} //~ ERROR: in conflict with lock WriteLock +} + +fn main() { + let x = &mut 0 as *mut _; + unsafe { safe::safe(&mut *x, &*x) }; +} diff --git a/src/tools/miri/tests/compile-fail/validation_aliasing_mut4.rs b/src/tools/miri/tests/compile-fail/validation_aliasing_mut4.rs new file mode 100644 index 0000000000..3dac55aeaa --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_aliasing_mut4.rs @@ -0,0 +1,13 @@ +#![allow(unused_variables)] + +mod safe { + use std::cell::Cell; + + // Make sure &mut UnsafeCell also has a lock to it + pub fn safe(x: &mut Cell, y: &i32) {} //~ ERROR: in conflict with lock WriteLock +} + +fn main() { + let x = &mut 0 as *mut _; + unsafe { safe::safe(&mut *(x as *mut _), &*x) }; +} diff --git a/src/tools/miri/tests/compile-fail/validation_buggy_as_mut_slice.rs b/src/tools/miri/tests/compile-fail/validation_buggy_as_mut_slice.rs new file mode 100644 index 0000000000..98eca8d360 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_buggy_as_mut_slice.rs @@ -0,0 +1,20 @@ +#![allow(unused_variables)] + +// For some reason, the error location is different when using fullmir +// error-pattern: in conflict with lock WriteLock + +mod safe { + use std::slice::from_raw_parts_mut; + + pub fn as_mut_slice(self_: &Vec) -> &mut [T] { + unsafe { + from_raw_parts_mut(self_.as_ptr() as *mut T, self_.len()) + } + } +} + +fn main() { + let v = vec![0,1,2]; + let v1_ = safe::as_mut_slice(&v); + let v2_ = safe::as_mut_slice(&v); +} diff --git a/src/tools/miri/tests/compile-fail/validation_buggy_split_at_mut.rs b/src/tools/miri/tests/compile-fail/validation_buggy_split_at_mut.rs new file mode 100644 index 0000000000..9e67b2a4ab --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_buggy_split_at_mut.rs @@ -0,0 +1,22 @@ +#![allow(unused_variables)] + +mod safe { + use std::slice::from_raw_parts_mut; + + pub fn split_at_mut(self_: &mut [T], mid: usize) -> (&mut [T], &mut [T]) { + let len = self_.len(); + let ptr = self_.as_mut_ptr(); + + unsafe { + assert!(mid <= len); + + (from_raw_parts_mut(ptr, len - mid), // BUG: should be "mid" instead of "len - mid" + from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) + } + } +} + +fn main() { + let mut array = [1,2,3,4]; + let _x = safe::split_at_mut(&mut array, 0); //~ ERROR: in conflict with lock WriteLock +} diff --git a/src/tools/miri/tests/compile-fail/validation_illegal_write.rs b/src/tools/miri/tests/compile-fail/validation_illegal_write.rs new file mode 100644 index 0000000000..1432f4cc9f --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_illegal_write.rs @@ -0,0 +1,15 @@ +#![allow(unused_variables)] + +mod safe { + pub(crate) fn safe(x: &u32) { + let x : &mut u32 = unsafe { &mut *(x as *const _ as *mut _) }; + *x = 42; //~ ERROR: in conflict with lock ReadLock + } +} + +fn main() { + let target = &mut 42; + let target_ref = ⌖ + // do a reborrow, but we keep the lock + safe::safe(&*target); +} diff --git a/src/tools/miri/tests/compile-fail/validation_lock_confusion.rs b/src/tools/miri/tests/compile-fail/validation_lock_confusion.rs new file mode 100644 index 0000000000..b352346114 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_lock_confusion.rs @@ -0,0 +1,24 @@ +// Make sure validation can handle many overlapping shared borrows for different parts of a data structure +#![allow(unused_variables)] +use std::cell::RefCell; + +fn evil(x: *mut i32) { + unsafe { *x = 0; } //~ ERROR: in conflict with lock WriteLock +} + +fn test(r: &mut RefCell) { + let x = &*r; // releasing write lock, first suspension recorded + let mut x_ref = x.borrow_mut(); + let x_inner : &mut i32 = &mut *x_ref; // new inner write lock, with same lifetime as outer lock + { + let x_inner_shr = &*x_inner; // releasing inner write lock, recording suspension + let y = &*r; // second suspension for the outer write lock + let x_inner_shr2 = &*x_inner; // 2nd suspension for inner write lock + } + // If the two locks are mixed up, here we should have a write lock, but we do not. + evil(x_inner as *mut _); +} + +fn main() { + test(&mut RefCell::new(0)); +} diff --git a/src/tools/miri/tests/compile-fail/validation_pointer_smuggling.rs b/src/tools/miri/tests/compile-fail/validation_pointer_smuggling.rs new file mode 100644 index 0000000000..3320d2a89d --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_pointer_smuggling.rs @@ -0,0 +1,20 @@ +#![allow(unused_variables)] + +static mut PTR: *mut u8 = 0 as *mut _; + +fn fun1(x: &mut u8) { + unsafe { + PTR = x; + } +} + +fn fun2() { + // Now we use a pointer we are not allowed to use + let _x = unsafe { *PTR }; //~ ERROR: in conflict with lock WriteLock +} + +fn main() { + let mut val = 0; + fun1(&mut val); + fun2(); +} diff --git a/src/tools/miri/tests/compile-fail/validation_recover1.rs b/src/tools/miri/tests/compile-fail/validation_recover1.rs new file mode 100644 index 0000000000..55c38a694c --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_recover1.rs @@ -0,0 +1,16 @@ +#![allow(unused_variables)] + +#[repr(u32)] +enum Bool { True } + +mod safe { + pub(crate) fn safe(x: &mut super::Bool) { + let x = x as *mut _ as *mut u32; + unsafe { *x = 44; } // out-of-bounds enum discriminant + } +} + +fn main() { + let mut x = Bool::True; + safe::safe(&mut x); //~ ERROR: invalid enum discriminant +} diff --git a/src/tools/miri/tests/compile-fail/validation_recover2.rs b/src/tools/miri/tests/compile-fail/validation_recover2.rs new file mode 100644 index 0000000000..756be9fde6 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_recover2.rs @@ -0,0 +1,14 @@ +#![allow(unused_variables)] + +mod safe { + // This makes a ref that was passed to us via &mut alias with things it should not alias with + pub(crate) fn safe(x: &mut &u32, target: &mut u32) { + unsafe { *x = &mut *(target as *mut _); } + } +} + +fn main() { + let target = &mut 42; + let mut target_alias = &42; // initial dummy value + safe::safe(&mut target_alias, target); //~ ERROR: in conflict with lock ReadLock +} diff --git a/src/tools/miri/tests/compile-fail/validation_recover3.rs b/src/tools/miri/tests/compile-fail/validation_recover3.rs new file mode 100644 index 0000000000..afe6fe7c0b --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_recover3.rs @@ -0,0 +1,15 @@ +#![allow(unused_variables)] + +mod safe { + pub(crate) fn safe(x: *mut u32) { + unsafe { *x = 42; } //~ ERROR: in conflict with lock WriteLock + } +} + +fn main() { + let target = &mut 42u32; + let target2 = target as *mut _; + drop(&mut *target); // reborrow + // Now make sure we still got the lock + safe::safe(target2); +} diff --git a/src/tools/miri/tests/compile-fail/validation_undef.rs b/src/tools/miri/tests/compile-fail/validation_undef.rs new file mode 100644 index 0000000000..b889b1ea53 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/validation_undef.rs @@ -0,0 +1,14 @@ +#![allow(unused_variables)] +// error-pattern: attempted to read undefined bytes + +mod safe { + use std::mem; + + pub(crate) fn make_float() -> f32 { + unsafe { mem::uninitialized() } + } +} + +fn main() { + let _x = safe::make_float(); +} diff --git a/src/tools/miri/tests/compile-fail/wild_pointer_deref.rs b/src/tools/miri/tests/compile-fail/wild_pointer_deref.rs new file mode 100644 index 0000000000..57da8dfc01 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/wild_pointer_deref.rs @@ -0,0 +1,5 @@ +fn main() { + let p = 44 as *const i32; + let x = unsafe { *p }; //~ ERROR: a memory access tried to interpret some bytes as a pointer + panic!("this should never print: {}", x); +} diff --git a/src/tools/miri/tests/compile-fail/zst.rs b/src/tools/miri/tests/compile-fail/zst.rs new file mode 100644 index 0000000000..3439824047 --- /dev/null +++ b/src/tools/miri/tests/compile-fail/zst.rs @@ -0,0 +1,4 @@ +fn main() { + let x = &() as *const () as *const i32; + let _ = unsafe { *x }; //~ ERROR: tried to access memory with alignment 1, but alignment 4 is required +} diff --git a/src/tools/miri/tests/compiletest.rs b/src/tools/miri/tests/compiletest.rs new file mode 100644 index 0000000000..b1ea3fc8b0 --- /dev/null +++ b/src/tools/miri/tests/compiletest.rs @@ -0,0 +1,213 @@ +#![feature(slice_concat_ext)] + +extern crate compiletest_rs as compiletest; + +use std::slice::SliceConcatExt; +use std::path::{PathBuf, Path}; +use std::io::Write; +use std::env; + +macro_rules! eprintln { + ($($arg:tt)*) => { + let stderr = std::io::stderr(); + writeln!(stderr.lock(), $($arg)*).unwrap(); + } +} + +fn miri_path() -> PathBuf { + if rustc_test_suite().is_some() { + PathBuf::from(option_env!("MIRI_PATH").unwrap()) + } else { + PathBuf::from(concat!("target/", env!("PROFILE"), "/miri")) + } +} + +fn rustc_test_suite() -> Option { + option_env!("RUSTC_TEST_SUITE").map(PathBuf::from) +} + +fn rustc_lib_path() -> PathBuf { + option_env!("RUSTC_LIB_PATH").unwrap().into() +} + +fn compile_fail(sysroot: &Path, path: &str, target: &str, host: &str, fullmir: bool) { + eprintln!( + "## Running compile-fail tests in {} against miri for target {}", + path, + target + ); + let mut config = compiletest::Config::default().tempdir(); + config.mode = "compile-fail".parse().expect("Invalid mode"); + config.rustc_path = miri_path(); + let mut flags = Vec::new(); + if rustc_test_suite().is_some() { + config.run_lib_path = rustc_lib_path(); + config.compile_lib_path = rustc_lib_path(); + } + // if we are building as part of the rustc test suite, we already have fullmir for everything + if fullmir && rustc_test_suite().is_none() { + if host != target { + // skip fullmir on nonhost + return; + } + let sysroot = std::env::home_dir().unwrap() + .join(".xargo") + .join("HOST"); + config.target_rustcflags = Some(format!("--sysroot {}", sysroot.to_str().unwrap())); + config.src_base = PathBuf::from(path.to_string()); + } else { + config.target_rustcflags = Some(format!("--sysroot {}", sysroot.to_str().unwrap())); + config.src_base = PathBuf::from(path.to_string()); + } + flags.push("-Zmir-emit-validate=1".to_owned()); + config.target_rustcflags = Some(flags.join(" ")); + config.target = target.to_owned(); + compiletest::run_tests(&config); +} + +fn run_pass(path: &str) { + eprintln!("## Running run-pass tests in {} against rustc", path); + let mut config = compiletest::Config::default().tempdir(); + config.mode = "run-pass".parse().expect("Invalid mode"); + config.src_base = PathBuf::from(path); + if let Some(rustc_path) = rustc_test_suite() { + config.rustc_path = rustc_path; + config.run_lib_path = rustc_lib_path(); + config.compile_lib_path = rustc_lib_path(); + config.target_rustcflags = Some(format!("-Dwarnings --sysroot {}", get_sysroot().display())); + } else { + config.target_rustcflags = Some("-Dwarnings".to_owned()); + } + config.host_rustcflags = Some("-Dwarnings".to_string()); + compiletest::run_tests(&config); +} + +fn miri_pass(path: &str, target: &str, host: &str, fullmir: bool, opt: bool) { + let opt_str = if opt { " with optimizations" } else { "" }; + eprintln!( + "## Running run-pass tests in {} against miri for target {}{}", + path, + target, + opt_str + ); + let mut config = compiletest::Config::default().tempdir(); + config.mode = "ui".parse().expect("Invalid mode"); + config.src_base = PathBuf::from(path); + config.target = target.to_owned(); + config.host = host.to_owned(); + config.rustc_path = miri_path(); + if rustc_test_suite().is_some() { + config.run_lib_path = rustc_lib_path(); + config.compile_lib_path = rustc_lib_path(); + } + let mut flags = Vec::new(); + // Control miri logging. This is okay despite concurrent test execution as all tests + // will set this env var to the same value. + env::set_var("MIRI_LOG", "warn"); + // if we are building as part of the rustc test suite, we already have fullmir for everything + if fullmir && rustc_test_suite().is_none() { + if host != target { + // skip fullmir on nonhost + return; + } + let sysroot = std::env::home_dir().unwrap() + .join(".xargo") + .join("HOST"); + + flags.push(format!("--sysroot {}", sysroot.to_str().unwrap())); + } + if opt { + flags.push("-Zmir-opt-level=3".to_owned()); + } else { + flags.push("-Zmir-opt-level=0".to_owned()); + // For now, only validate without optimizations. Inlining breaks validation. + flags.push("-Zmir-emit-validate=1".to_owned()); + } + config.target_rustcflags = Some(flags.join(" ")); + compiletest::run_tests(&config); +} + +fn is_target_dir>(path: P) -> bool { + let mut path = path.into(); + path.push("lib"); + path.metadata().map(|m| m.is_dir()).unwrap_or(false) +} + +fn for_all_targets(sysroot: &Path, mut f: F) { + let target_dir = sysroot.join("lib").join("rustlib"); + for entry in std::fs::read_dir(target_dir).expect("invalid sysroot") { + let entry = entry.unwrap(); + if !is_target_dir(entry.path()) { + continue; + } + let target = entry.file_name().into_string().unwrap(); + f(target); + } +} + +fn get_sysroot() -> PathBuf { + let sysroot = std::env::var("MIRI_SYSROOT").unwrap_or_else(|_| { + let sysroot = std::process::Command::new("rustc") + .arg("--print") + .arg("sysroot") + .output() + .expect("rustc not found") + .stdout; + String::from_utf8(sysroot).expect("sysroot is not utf8") + }); + PathBuf::from(sysroot.trim()) +} + +fn get_host() -> String { + let rustc = rustc_test_suite().unwrap_or(PathBuf::from("rustc")); + println!("using rustc at {}", rustc.display()); + let host = std::process::Command::new(rustc) + .arg("-vV") + .output() + .expect("rustc not found for -vV") + .stdout; + let host = std::str::from_utf8(&host).expect("sysroot is not utf8"); + let host = host.split("\nhost: ").nth(1).expect( + "no host: part in rustc -vV", + ); + let host = host.split('\n').next().expect("no \n after host"); + String::from(host) +} + +fn run_pass_miri(opt: bool) { + let sysroot = get_sysroot(); + let host = get_host(); + + for_all_targets(&sysroot, |target| { + miri_pass("tests/run-pass", &target, &host, false, opt); + }); + miri_pass("tests/run-pass-fullmir", &host, &host, true, opt); +} + +#[test] +fn run_pass_miri_noopt() { + run_pass_miri(false); +} + +#[test] +#[ignore] // FIXME: Disabled for now, as the optimizer is pretty broken and crashes... +fn run_pass_miri_opt() { + run_pass_miri(true); +} + +#[test] +fn run_pass_rustc() { + run_pass("tests/run-pass"); + run_pass("tests/run-pass-fullmir"); +} + +#[test] +fn compile_fail_miri() { + let sysroot = get_sysroot(); + let host = get_host(); + + for_all_targets(&sysroot, |target| { + compile_fail(&sysroot, "tests/compile-fail", &target, &host, false); + }); + compile_fail(&sysroot, "tests/compile-fail-fullmir", &host, &host, true); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/catch.rs b/src/tools/miri/tests/run-pass-fullmir/catch.rs new file mode 100644 index 0000000000..490f17d4cf --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/catch.rs @@ -0,0 +1,8 @@ +//ignore-msvc +use std::panic::{catch_unwind, AssertUnwindSafe}; + +fn main() { + let mut i = 3; + let _ = catch_unwind(AssertUnwindSafe(|| {i -= 2;} )); + println!("{}", i); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/catch.stdout b/src/tools/miri/tests/run-pass-fullmir/catch.stdout new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/catch.stdout @@ -0,0 +1 @@ +1 diff --git a/src/tools/miri/tests/run-pass-fullmir/foreign-fn-linkname.rs b/src/tools/miri/tests/run-pass-fullmir/foreign-fn-linkname.rs new file mode 100644 index 0000000000..20cb713590 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/foreign-fn-linkname.rs @@ -0,0 +1,37 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-msvc +#![feature(libc)] + +extern crate libc; +use std::ffi::CString; + +mod mlibc { + use libc::{c_char, size_t}; + + extern { + #[link_name = "strlen"] + pub fn my_strlen(str: *const c_char) -> size_t; + } +} + +fn strlen(str: String) -> usize { + // C string is terminated with a zero + let s = CString::new(str).unwrap(); + unsafe { + mlibc::my_strlen(s.as_ptr()) as usize + } +} + +pub fn main() { + let len = strlen("Rust".to_string()); + assert_eq!(len, 4); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/format.rs b/src/tools/miri/tests/run-pass-fullmir/format.rs new file mode 100644 index 0000000000..a14d7054e7 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/format.rs @@ -0,0 +1,4 @@ +//ignore-msvc +fn main() { + println!("Hello {}", 13); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/format.stdout b/src/tools/miri/tests/run-pass-fullmir/format.stdout new file mode 100644 index 0000000000..e193b8ae89 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/format.stdout @@ -0,0 +1 @@ +Hello 13 diff --git a/src/tools/miri/tests/run-pass-fullmir/from_utf8.rs b/src/tools/miri/tests/run-pass-fullmir/from_utf8.rs new file mode 100644 index 0000000000..c5d4abcfda --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/from_utf8.rs @@ -0,0 +1,4 @@ +//ignore-msvc +fn main() { + let _ = ::std::str::from_utf8(b"a"); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/hashmap.rs b/src/tools/miri/tests/run-pass-fullmir/hashmap.rs new file mode 100644 index 0000000000..99f05e2598 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/hashmap.rs @@ -0,0 +1,26 @@ +//ignore-msvc +use std::collections::{self, HashMap}; +use std::hash::BuildHasherDefault; + +fn main() { + let mut map : HashMap> = Default::default(); + map.insert(0, 0); + assert_eq!(map.values().fold(0, |x, y| x+y), 0); + + let table_base = map.get(&0).unwrap() as *const _; + + let num = 22; // large enough to trigger a resize + for i in 1..num { + map.insert(i, i); + } + assert!(table_base != map.get(&0).unwrap() as *const _); // make sure relocation happened + assert_eq!(map.values().fold(0, |x, y| x+y), num*(num-1)/2); // check the right things are in the table now + + // Inserting again replaces the existing entries + for i in 0..num { + map.insert(i, num-1-i); + } + assert_eq!(map.values().fold(0, |x, y| x+y), num*(num-1)/2); + + // TODO: Test Entry API +} diff --git a/src/tools/miri/tests/run-pass-fullmir/heap.rs b/src/tools/miri/tests/run-pass-fullmir/heap.rs new file mode 100644 index 0000000000..917d51d0e4 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/heap.rs @@ -0,0 +1,35 @@ +//ignore-msvc +#![feature(box_syntax)] + +fn make_box() -> Box<(i16, i16)> { + Box::new((1, 2)) +} + +fn make_box_syntax() -> Box<(i16, i16)> { + box (1, 2) +} + +fn allocate_reallocate() { + let mut s = String::new(); + + // 6 byte heap alloc (__rust_allocate) + s.push_str("foobar"); + assert_eq!(s.len(), 6); + assert_eq!(s.capacity(), 6); + + // heap size doubled to 12 (__rust_reallocate) + s.push_str("baz"); + assert_eq!(s.len(), 9); + assert_eq!(s.capacity(), 12); + + // heap size reduced to 9 (__rust_reallocate) + s.shrink_to_fit(); + assert_eq!(s.len(), 9); + assert_eq!(s.capacity(), 9); +} + +fn main() { + assert_eq!(*make_box(), (1, 2)); + assert_eq!(*make_box_syntax(), (1, 2)); + allocate_reallocate(); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/hello.rs b/src/tools/miri/tests/run-pass-fullmir/hello.rs new file mode 100644 index 0000000000..986efcaf90 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/hello.rs @@ -0,0 +1,4 @@ +//ignore-msvc +fn main() { + println!("Hello, world!"); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/hello.stdout b/src/tools/miri/tests/run-pass-fullmir/hello.stdout new file mode 100644 index 0000000000..af5626b4a1 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/hello.stdout @@ -0,0 +1 @@ +Hello, world! diff --git a/src/tools/miri/tests/run-pass-fullmir/integer-ops.rs b/src/tools/miri/tests/run-pass-fullmir/integer-ops.rs new file mode 100644 index 0000000000..97c694fd56 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/integer-ops.rs @@ -0,0 +1,175 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// FIXME: remove -Zmir-opt-level once https://github.com/rust-lang/rust/issues/43359 is fixed +// compile-flags: -Zmir-opt-level=0 + +//ignore-msvc +use std::i32; + +pub fn main() { + // This tests that do (not) do sign extension properly when loading integers + assert_eq!(u32::max_value() as i64, 4294967295); + assert_eq!(i32::min_value() as i64, -2147483648); + + assert_eq!(i8::min_value(), -128); + + assert_eq!(i8::max_value(), 127); + + assert_eq!(i32::from_str_radix("A", 16), Ok(10)); + + let n = -0b1000_0000i8; + assert_eq!(n.count_ones(), 1); + + let n = -0b1000_0000i8; + assert_eq!(n.count_zeros(), 7); + + let n = -1i16; + assert_eq!(n.leading_zeros(), 0); + + let n = -4i8; + assert_eq!(n.trailing_zeros(), 2); + + let n = 0x0123456789ABCDEFi64; + let m = -0x76543210FEDCBA99i64; + assert_eq!(n.rotate_left(32), m); + + let n = 0x0123456789ABCDEFi64; + let m = -0xFEDCBA987654322i64; + assert_eq!(n.rotate_right(4), m); + + let n = 0x0123456789ABCDEFi64; + let m = -0x1032547698BADCFFi64; + assert_eq!(n.swap_bytes(), m); + + let n = 0x0123456789ABCDEFi64; + if cfg!(target_endian = "big") { + assert_eq!(i64::from_be(n), n) + } else { + assert_eq!(i64::from_be(n), n.swap_bytes()) + } + + let n = 0x0123456789ABCDEFi64; + if cfg!(target_endian = "little") { + assert_eq!(i64::from_le(n), n) + } else { + assert_eq!(i64::from_le(n), n.swap_bytes()) + } + + let n = 0x0123456789ABCDEFi64; + if cfg!(target_endian = "big") { + assert_eq!(n.to_be(), n) + } else { + assert_eq!(n.to_be(), n.swap_bytes()) + } + + let n = 0x0123456789ABCDEFi64; + if cfg!(target_endian = "little") { + assert_eq!(n.to_le(), n) + } else { + assert_eq!(n.to_le(), n.swap_bytes()) + } + + assert_eq!(7i16.checked_add(32760), Some(32767)); + assert_eq!(8i16.checked_add(32760), None); + + assert_eq!((-127i8).checked_sub(1), Some(-128)); + assert_eq!((-128i8).checked_sub(1), None); + + assert_eq!(6i8.checked_mul(21), Some(126)); + assert_eq!(6i8.checked_mul(22), None); + + assert_eq!((-127i8).checked_div(-1), Some(127)); + assert_eq!((-128i8).checked_div(-1), None); + assert_eq!((1i8).checked_div(0), None); + + assert_eq!(5i32.checked_rem(2), Some(1)); + assert_eq!(5i32.checked_rem(0), None); + assert_eq!(i32::MIN.checked_rem(-1), None); + + assert_eq!(5i32.checked_neg(), Some(-5)); + assert_eq!(i32::MIN.checked_neg(), None); + + assert_eq!(0x10i32.checked_shl(4), Some(0x100)); + assert_eq!(0x10i32.checked_shl(33), None); + + assert_eq!(0x10i32.checked_shr(4), Some(0x1)); + assert_eq!(0x10i32.checked_shr(33), None); + + assert_eq!((-5i32).checked_abs(), Some(5)); + assert_eq!(i32::MIN.checked_abs(), None); + + assert_eq!(100i8.saturating_add(1), 101); + assert_eq!(100i8.saturating_add(127), 127); + + assert_eq!(100i8.saturating_sub(127), -27); + assert_eq!((-100i8).saturating_sub(127), -128); + + assert_eq!(100i32.saturating_mul(127), 12700); + assert_eq!((1i32 << 23).saturating_mul(1 << 23), i32::MAX); + assert_eq!((-1i32 << 23).saturating_mul(1 << 23), i32::MIN); + + assert_eq!(100i8.wrapping_add(27), 127); + assert_eq!(100i8.wrapping_add(127), -29); + + assert_eq!(0i8.wrapping_sub(127), -127); + assert_eq!((-2i8).wrapping_sub(127), 127); + + assert_eq!(10i8.wrapping_mul(12), 120); + assert_eq!(11i8.wrapping_mul(12), -124); + + assert_eq!(100u8.wrapping_div(10), 10); + assert_eq!((-128i8).wrapping_div(-1), -128); + + assert_eq!(100i8.wrapping_rem(10), 0); + assert_eq!((-128i8).wrapping_rem(-1), 0); + + assert_eq!(100i8.wrapping_neg(), -100); + assert_eq!((-128i8).wrapping_neg(), -128); + + assert_eq!((-1i8).wrapping_shl(7), -128); + assert_eq!((-1i8).wrapping_shl(8), -1); + + assert_eq!((-128i8).wrapping_shr(7), -1); + assert_eq!((-128i8).wrapping_shr(8), -128); + + assert_eq!(100i8.wrapping_abs(), 100); + assert_eq!((-100i8).wrapping_abs(), 100); + assert_eq!((-128i8).wrapping_abs(), -128); + assert_eq!((-128i8).wrapping_abs() as u8, 128); + + assert_eq!(5i32.overflowing_add(2), (7, false)); + assert_eq!(i32::MAX.overflowing_add(1), (i32::MIN, true)); + + assert_eq!(5i32.overflowing_sub(2), (3, false)); + assert_eq!(i32::MIN.overflowing_sub(1), (i32::MAX, true)); + + assert_eq!(5i32.overflowing_mul(2), (10, false)); + assert_eq!(1_000_000_000i32.overflowing_mul(10), (1410065408, true)); + + assert_eq!(5i32.overflowing_div(2), (2, false)); + assert_eq!(i32::MIN.overflowing_div(-1), (i32::MIN, true)); + + assert_eq!(5i32.overflowing_rem(2), (1, false)); + assert_eq!(i32::MIN.overflowing_rem(-1), (0, true)); + + assert_eq!(2i32.overflowing_neg(), (-2, false)); + assert_eq!(i32::MIN.overflowing_neg(), (i32::MIN, true)); + + assert_eq!(0x10i32.overflowing_shl(4), (0x100, false)); + assert_eq!(0x10i32.overflowing_shl(36), (0x100, true)); + + assert_eq!(0x10i32.overflowing_shr(4), (0x1, false)); + assert_eq!(0x10i32.overflowing_shr(36), (0x1, true)); + + assert_eq!(10i8.overflowing_abs(), (10,false)); + assert_eq!((-10i8).overflowing_abs(), (10,false)); + assert_eq!((-128i8).overflowing_abs(), (-128,true)); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/issue-15080.rs b/src/tools/miri/tests/run-pass-fullmir/issue-15080.rs new file mode 100644 index 0000000000..4a84f2bc5d --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/issue-15080.rs @@ -0,0 +1,34 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-msvc + +#![feature(slice_patterns)] + +fn main() { + let mut x: &[_] = &[1, 2, 3, 4]; + + let mut result = vec!(); + loop { + x = match *x { + [1, n, 3, ref rest..] => { + result.push(n); + rest + } + [n, ref rest..] => { + result.push(n); + rest + } + [] => + break + } + } + assert_eq!(result, [2, 4]); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/issue-3794.rs b/src/tools/miri/tests/run-pass-fullmir/issue-3794.rs new file mode 100644 index 0000000000..8d55af58ee --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/issue-3794.rs @@ -0,0 +1,42 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-msvc +#![feature(box_syntax)] + +trait T { + fn print(&self); +} + +#[derive(Debug)] +struct S { + s: isize, +} + +impl T for S { + fn print(&self) { + println!("{:?}", self); + } +} + +fn print_t(t: &T) { + t.print(); +} + +fn print_s(s: &S) { + s.print(); +} + +pub fn main() { + let s: Box = box S { s: 5 }; + print_s(&*s); + let t: Box = s as Box; + print_t(&*t); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/issue-3794.stdout b/src/tools/miri/tests/run-pass-fullmir/issue-3794.stdout new file mode 100644 index 0000000000..e4afe6fa55 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/issue-3794.stdout @@ -0,0 +1,2 @@ +S { s: 5 } +S { s: 5 } diff --git a/src/tools/miri/tests/run-pass-fullmir/loop-break-value.rs b/src/tools/miri/tests/run-pass-fullmir/loop-break-value.rs new file mode 100644 index 0000000000..8a0ea113c5 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/loop-break-value.rs @@ -0,0 +1,143 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-msvc + +#![feature(never_type)] +#![allow(unreachable_code)] + +#[allow(unused)] +fn never_returns() { + loop { + break loop {}; + } +} + +pub fn main() { + let value = 'outer: loop { + if 1 == 1 { + break 13; + } else { + let _never: ! = loop { + break loop { + break 'outer panic!(); + } + }; + } + }; + assert_eq!(value, 13); + + let x = [1, 3u32, 5]; + let y = [17]; + let z = []; + let coerced: &[_] = loop { + match 2 { + 1 => break &x, + 2 => break &y, + 3 => break &z, + _ => (), + } + }; + assert_eq!(coerced, &[17u32]); + + let trait_unified = loop { + break if true { + break Default::default() + } else { + break [13, 14] + }; + }; + assert_eq!(trait_unified, [0, 0]); + + let trait_unified_2 = loop { + if false { + break [String::from("Hello")] + } else { + break Default::default() + }; + }; + assert_eq!(trait_unified_2, [""]); + + let trait_unified_3 = loop { + break if false { + break [String::from("Hello")] + } else { + ["Yes".into()] + }; + }; + assert_eq!(trait_unified_3, ["Yes"]); + + let regular_break = loop { + if true { + break; + } else { + break break Default::default(); + } + }; + assert_eq!(regular_break, ()); + + let regular_break_2 = loop { + if true { + break Default::default(); + } else { + break; + } + }; + assert_eq!(regular_break_2, ()); + + let regular_break_3 = loop { + break if true { + Default::default() + } else { + break; + } + }; + assert_eq!(regular_break_3, ()); + + let regular_break_4 = loop { + break (); + break; + }; + assert_eq!(regular_break_4, ()); + + let regular_break_5 = loop { + break; + break (); + }; + assert_eq!(regular_break_5, ()); + + let nested_break_value = 'outer2: loop { + let _a: u32 = 'inner: loop { + if true { + break 'outer2 "hello"; + } else { + break 'inner 17; + } + }; + panic!(); + }; + assert_eq!(nested_break_value, "hello"); + + let break_from_while_cond = loop { + 'inner_loop: while break 'inner_loop { + panic!(); + } + break 123; + }; + assert_eq!(break_from_while_cond, 123); + + let break_from_while_to_outer = 'outer_loop: loop { + while break 'outer_loop 567 { + panic!("from_inner"); + } + panic!("from outer"); + }; + assert_eq!(break_from_while_to_outer, 567); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/move-arg-2-unique.rs b/src/tools/miri/tests/run-pass-fullmir/move-arg-2-unique.rs new file mode 100644 index 0000000000..f3c6566237 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/move-arg-2-unique.rs @@ -0,0 +1,22 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-msvc + +#![allow(unused_features, unused_variables)] +#![feature(box_syntax)] + +fn test(foo: Box> ) { assert_eq!((*foo)[0], 10); } + +pub fn main() { + let x = box vec![10]; + // Test forgetting a local by move-in + test(x); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/regions-mock-trans.rs b/src/tools/miri/tests/run-pass-fullmir/regions-mock-trans.rs new file mode 100644 index 0000000000..cef62e47a5 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/regions-mock-trans.rs @@ -0,0 +1,66 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// FIXME: We handle uninitialzied storage here, which currently makes validation fail. +// compile-flags: -Zmir-emit-validate=0 + +//ignore-msvc + +#![feature(libc)] + +#![allow(dead_code)] + +extern crate libc; +use std::mem; + +struct Arena(()); + +struct Bcx<'a> { + fcx: &'a Fcx<'a> +} + +struct Fcx<'a> { + arena: &'a Arena, + ccx: &'a Ccx +} + +struct Ccx { + x: isize +} + +fn alloc<'a>(_bcx : &'a Arena) -> &'a Bcx<'a> { + unsafe { + mem::transmute(libc::malloc(mem::size_of::>() + as libc::size_t)) + } +} + +fn h<'a>(bcx : &'a Bcx<'a>) -> &'a Bcx<'a> { + return alloc(bcx.fcx.arena); +} + +fn g(fcx : &Fcx) { + let bcx = Bcx { fcx: fcx }; + let bcx2 = h(&bcx); + unsafe { + libc::free(mem::transmute(bcx2)); + } +} + +fn f(ccx : &Ccx) { + let a = Arena(()); + let fcx = Fcx { arena: &a, ccx: ccx }; + return g(&fcx); +} + +pub fn main() { + let ccx = Ccx { x: 0 }; + f(&ccx); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/u128.rs b/src/tools/miri/tests/run-pass-fullmir/u128.rs new file mode 100644 index 0000000000..5b2efdd205 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/u128.rs @@ -0,0 +1,79 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-msvc + +#![feature(i128_type)] + +fn b(t: T) -> T { t } + +fn main() { + let x: u128 = 0xFFFF_FFFF_FFFF_FFFF__FFFF_FFFF_FFFF_FFFF; + assert_eq!(0, !x); + assert_eq!(0, !x); + let y: u128 = 0xFFFF_FFFF_FFFF_FFFF__FFFF_FFFF_FFFF_FFFE; + assert_eq!(!1, y); + assert_eq!(x, y | 1); + assert_eq!(0xFAFF_0000_FF8F_0000__FFFF_0000_FFFF_FFFE, + y & + 0xFAFF_0000_FF8F_0000__FFFF_0000_FFFF_FFFF); + let z: u128 = 0xABCD_EF; + assert_eq!(z * z, 0x734C_C2F2_A521); + assert_eq!(z * z * z * z, 0x33EE_0E2A_54E2_59DA_A0E7_8E41); + assert_eq!(z + z + z + z, 0x2AF3_7BC); + let k: u128 = 0x1234_5678_9ABC_DEFF_EDCB_A987_6543_210; + assert_eq!(k + k, 0x2468_ACF1_3579_BDFF_DB97_530E_CA86_420); + assert_eq!(0, k - k); + assert_eq!(0x1234_5678_9ABC_DEFF_EDCB_A987_5A86_421, k - z); + assert_eq!(0x1000_0000_0000_0000_0000_0000_0000_000, + k - 0x234_5678_9ABC_DEFF_EDCB_A987_6543_210); + assert_eq!(0x6EF5_DE4C_D3BC_2AAA_3BB4_CC5D_D6EE_8, k / 42); + assert_eq!(0, k % 42); + assert_eq!(15, z % 42); + assert_eq!(0x169D_A8020_CEC18, k % 0x3ACB_FE49_FF24_AC); + assert_eq!(0x91A2_B3C4_D5E6_F7, k >> 65); + assert_eq!(0xFDB9_7530_ECA8_6420_0000_0000_0000_0000, k << 65); + assert!(k > z); + assert!(y > k); + assert!(y < x); + assert_eq!(x as u64, !0); + assert_eq!(z as u64, 0xABCD_EF); + assert_eq!(k as u64, 0xFEDC_BA98_7654_3210); + assert_eq!(k as i128, 0x1234_5678_9ABC_DEFF_EDCB_A987_6543_210); + assert_eq!((z as f64) as u128, z); + assert_eq!((z as f32) as u128, z); + assert_eq!((z as f64 * 16.0) as u128, z * 16); + assert_eq!((z as f32 * 16.0) as u128, z * 16); + let l :u128 = 432 << 100; + assert_eq!((l as f32) as u128, l); + assert_eq!((l as f64) as u128, l); + // formatting + let j: u128 = 1 << 67; + assert_eq!("147573952589676412928", format!("{}", j)); + assert_eq!("80000000000000000", format!("{:x}", j)); + assert_eq!("20000000000000000000000", format!("{:o}", j)); + assert_eq!("10000000000000000000000000000000000000000000000000000000000000000000", + format!("{:b}", j)); + assert_eq!("340282366920938463463374607431768211455", + format!("{}", u128::max_value())); + assert_eq!("147573952589676412928", format!("{:?}", j)); + // common traits + assert_eq!(x, b(x.clone())); + // overflow checks + assert_eq!((z).checked_mul(z), Some(0x734C_C2F2_A521)); + assert_eq!((k).checked_mul(k), None); + let l: u128 = b(u128::max_value() - 10); + let o: u128 = b(17); + assert_eq!(l.checked_add(b(11)), None); + assert_eq!(l.checked_sub(l), Some(0)); + assert_eq!(o.checked_sub(b(18)), None); + assert_eq!(b(1u128).checked_shl(b(127)), Some(1 << 127)); + assert_eq!(o.checked_shl(b(128)), None); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/unsized-tuple-impls.rs b/src/tools/miri/tests/run-pass-fullmir/unsized-tuple-impls.rs new file mode 100644 index 0000000000..828e5c2692 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/unsized-tuple-impls.rs @@ -0,0 +1,25 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-msvc + +#![feature(unsized_tuple_coercion)] +use std::mem; + +fn main() { + let x : &(i32, i32, [i32]) = &(0, 1, [2, 3]); + let y : &(i32, i32, [i32]) = &(0, 1, [2, 3, 4]); + let mut a = [y, x]; + a.sort(); + assert_eq!(a, [x, y]); + + assert_eq!(&format!("{:?}", a), "[(0, 1, [2, 3]), (0, 1, [2, 3, 4])]"); + assert_eq!(mem::size_of_val(x), 16); +} diff --git a/src/tools/miri/tests/run-pass-fullmir/vecs.rs b/src/tools/miri/tests/run-pass-fullmir/vecs.rs new file mode 100644 index 0000000000..9a8912a6b9 --- /dev/null +++ b/src/tools/miri/tests/run-pass-fullmir/vecs.rs @@ -0,0 +1,52 @@ +//ignore-msvc + +fn make_vec() -> Vec { + let mut v = Vec::with_capacity(4); + v.push(1); + v.push(2); + v +} + +fn make_vec_macro() -> Vec { + vec![1, 2] +} + +fn make_vec_macro_repeat() -> Vec { + vec![42; 5] +} + +fn make_vec_macro_repeat_zeroed() -> Vec { + vec![0; 7] +} + +fn vec_into_iter() -> u8 { + vec![1, 2, 3, 4] + .into_iter() + .map(|x| x * x) + .fold(0, |x, y| x + y) +} + +fn vec_into_iter_zst() -> usize { + vec![[0u64; 0], [0u64; 0]] + .into_iter() + .map(|x| x.len()) + .sum() +} + +fn vec_reallocate() -> Vec { + let mut v = vec![1, 2]; + v.push(3); + v.push(4); + v.push(5); + v +} + +fn main() { + assert_eq!(vec_reallocate().len(), 5); + assert_eq!(vec_into_iter(), 30); + assert_eq!(vec_into_iter_zst(), 0); + assert_eq!(make_vec().capacity(), 4); + assert_eq!(make_vec_macro(), [1, 2]); + assert_eq!(make_vec_macro_repeat(), [42; 5]); + assert_eq!(make_vec_macro_repeat_zeroed(), [0; 7]); +} diff --git a/src/tools/miri/tests/run-pass/arrays.rs b/src/tools/miri/tests/run-pass/arrays.rs new file mode 100644 index 0000000000..469dde3091 --- /dev/null +++ b/src/tools/miri/tests/run-pass/arrays.rs @@ -0,0 +1,45 @@ +fn empty_array() -> [u16; 0] { + [] +} + +fn mini_array() -> [u16; 1] { + [42] +} + +fn big_array() -> [u16; 5] { + [5, 4, 3, 2, 1] +} + +fn array_array() -> [[u8; 2]; 3] { + [[5, 4], [3, 2], [1, 0]] +} + +fn index_unsafe() -> i32 { + let a = [0, 10, 20, 30]; + unsafe { *a.get_unchecked(2) } +} + +fn index() -> i32 { + let a = [0, 10, 20, 30]; + a[2] +} + +fn array_repeat() -> [u8; 8] { + [42; 8] +} + +fn slice_index() -> u8 { + let arr: &[_] = &[101, 102, 103, 104, 105, 106]; + arr[5] +} + +fn main() { + assert_eq!(empty_array(), []); + assert_eq!(index_unsafe(), 20); + assert_eq!(index(), 20); + assert_eq!(slice_index(), 106); + assert_eq!(big_array(), [5, 4, 3, 2, 1]); + assert_eq!(array_array(), [[5, 4], [3, 2], [1, 0]]); + assert_eq!(array_repeat(), [42; 8]); + assert_eq!(mini_array(), [42]); +} diff --git a/src/tools/miri/tests/run-pass/associated-const.rs b/src/tools/miri/tests/run-pass/associated-const.rs new file mode 100644 index 0000000000..fe5da49f80 --- /dev/null +++ b/src/tools/miri/tests/run-pass/associated-const.rs @@ -0,0 +1,21 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +trait Foo { + const ID: i32; +} + +impl Foo for i32 { + const ID: i32 = 1; +} + +fn main() { + assert_eq!(1, ::ID); +} diff --git a/src/tools/miri/tests/run-pass/assume_bug.rs b/src/tools/miri/tests/run-pass/assume_bug.rs new file mode 100644 index 0000000000..e14f875c02 --- /dev/null +++ b/src/tools/miri/tests/run-pass/assume_bug.rs @@ -0,0 +1,3 @@ +fn main() { + vec![()].into_iter(); +} diff --git a/src/tools/miri/tests/run-pass/atomic-access-bool.rs b/src/tools/miri/tests/run-pass/atomic-access-bool.rs new file mode 100644 index 0000000000..ada5847054 --- /dev/null +++ b/src/tools/miri/tests/run-pass/atomic-access-bool.rs @@ -0,0 +1,30 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT}; +use std::sync::atomic::Ordering::*; + +static mut ATOMIC: AtomicBool = ATOMIC_BOOL_INIT; + +fn main() { + unsafe { + assert_eq!(*ATOMIC.get_mut(), false); + ATOMIC.store(true, SeqCst); + assert_eq!(*ATOMIC.get_mut(), true); + ATOMIC.fetch_or(false, SeqCst); + assert_eq!(*ATOMIC.get_mut(), true); + ATOMIC.fetch_and(false, SeqCst); + assert_eq!(*ATOMIC.get_mut(), false); + ATOMIC.fetch_nand(true, SeqCst); + assert_eq!(*ATOMIC.get_mut(), true); + ATOMIC.fetch_xor(true, SeqCst); + assert_eq!(*ATOMIC.get_mut(), false); + } +} diff --git a/src/tools/miri/tests/run-pass/atomic-compare_exchange.rs b/src/tools/miri/tests/run-pass/atomic-compare_exchange.rs new file mode 100644 index 0000000000..61e9a96588 --- /dev/null +++ b/src/tools/miri/tests/run-pass/atomic-compare_exchange.rs @@ -0,0 +1,36 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT}; +use std::sync::atomic::Ordering::*; + +static ATOMIC: AtomicIsize = ATOMIC_ISIZE_INIT; + +fn main() { + // Make sure trans can emit all the intrinsics correctly + ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, Acquire, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, Release, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, Acquire, Acquire).ok(); + ATOMIC.compare_exchange(0, 1, AcqRel, Acquire).ok(); + ATOMIC.compare_exchange(0, 1, SeqCst, Acquire).ok(); + ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok(); + ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok(); + ATOMIC.compare_exchange_weak(0, 1, AcqRel, Acquire).ok(); + ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire).ok(); + ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst).ok(); +} diff --git a/src/tools/miri/tests/run-pass/aux_test.rs b/src/tools/miri/tests/run-pass/aux_test.rs new file mode 100644 index 0000000000..beed82e058 --- /dev/null +++ b/src/tools/miri/tests/run-pass/aux_test.rs @@ -0,0 +1,9 @@ +// aux-build:dep.rs + +// ignore-cross-compile + +extern crate dep; + +fn main() { + dep::foo(); +} diff --git a/src/tools/miri/tests/run-pass/auxiliary/dep.rs b/src/tools/miri/tests/run-pass/auxiliary/dep.rs new file mode 100644 index 0000000000..b76b4321d6 --- /dev/null +++ b/src/tools/miri/tests/run-pass/auxiliary/dep.rs @@ -0,0 +1 @@ +pub fn foo() {} diff --git a/src/tools/miri/tests/run-pass/bad_substs.rs b/src/tools/miri/tests/run-pass/bad_substs.rs new file mode 100644 index 0000000000..d8da2de5d6 --- /dev/null +++ b/src/tools/miri/tests/run-pass/bad_substs.rs @@ -0,0 +1,4 @@ +fn main() { + let f: fn(i32) -> Option = Some::; + f(42); +} diff --git a/src/tools/miri/tests/run-pass/binops.rs b/src/tools/miri/tests/run-pass/binops.rs new file mode 100644 index 0000000000..a03b96fa49 --- /dev/null +++ b/src/tools/miri/tests/run-pass/binops.rs @@ -0,0 +1,91 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Binop corner cases + +fn test_nil() { + assert_eq!((), ()); + assert!((!(() != ()))); + assert!((!(() < ()))); + assert!((() <= ())); + assert!((!(() > ()))); + assert!((() >= ())); +} + +fn test_bool() { + assert!((!(true < false))); + assert!((!(true <= false))); + assert!((true > false)); + assert!((true >= false)); + + assert!((false < true)); + assert!((false <= true)); + assert!((!(false > true))); + assert!((!(false >= true))); + + // Bools support bitwise binops + assert_eq!(false & false, false); + assert_eq!(true & false, false); + assert_eq!(true & true, true); + assert_eq!(false | false, false); + assert_eq!(true | false, true); + assert_eq!(true | true, true); + assert_eq!(false ^ false, false); + assert_eq!(true ^ false, true); + assert_eq!(true ^ true, false); +} + +fn test_ptr() { + unsafe { + let p1: *const u8 = ::std::mem::transmute(0_usize); + let p2: *const u8 = ::std::mem::transmute(0_usize); + let p3: *const u8 = ::std::mem::transmute(1_usize); + + assert_eq!(p1, p2); + assert!(p1 != p3); + assert!(p1 < p3); + assert!(p1 <= p3); + assert!(p3 > p1); + assert!(p3 >= p3); + assert!(p1 <= p2); + assert!(p1 >= p2); + } +} + +#[derive(PartialEq, Debug)] +struct P { + x: isize, + y: isize, +} + +fn p(x: isize, y: isize) -> P { + P { + x: x, + y: y + } +} + +fn test_class() { + let q = p(1, 2); + let mut r = p(1, 2); + + assert_eq!(q, r); + r.y = 17; + assert!((r.y != q.y)); + assert_eq!(r.y, 17); + assert!((q != r)); +} + +pub fn main() { + test_nil(); + test_bool(); + test_ptr(); + test_class(); +} diff --git a/src/tools/miri/tests/run-pass/bools.rs b/src/tools/miri/tests/run-pass/bools.rs new file mode 100644 index 0000000000..103d7eac27 --- /dev/null +++ b/src/tools/miri/tests/run-pass/bools.rs @@ -0,0 +1,28 @@ +fn boolean() -> bool { + true +} + +fn if_false() -> i64 { + let c = false; + if c { 1 } else { 0 } +} + +fn if_true() -> i64 { + let c = true; + if c { 1 } else { 0 } +} + +fn match_bool() -> i16 { + let b = true; + match b { + true => 1, + _ => 0, + } +} + +fn main() { + assert!(boolean()); + assert_eq!(if_false(), 0); + assert_eq!(if_true(), 1); + assert_eq!(match_bool(), 1); +} diff --git a/src/tools/miri/tests/run-pass/box_box_trait.rs b/src/tools/miri/tests/run-pass/box_box_trait.rs new file mode 100644 index 0000000000..57eef52d57 --- /dev/null +++ b/src/tools/miri/tests/run-pass/box_box_trait.rs @@ -0,0 +1,29 @@ +#![feature(box_syntax)] + +struct DroppableStruct; + +static mut DROPPED: bool = false; + +impl Drop for DroppableStruct { + fn drop(&mut self) { + unsafe { DROPPED = true; } + } +} + +trait MyTrait { fn dummy(&self) { } } +impl MyTrait for Box {} + +struct Whatever { w: Box } +impl Whatever { + fn new(w: Box) -> Whatever { + Whatever { w: w } + } +} + +fn main() { + { + let f: Box<_> = box DroppableStruct; + let _a = Whatever::new(box f as Box); + } + assert!(unsafe { DROPPED }); +} diff --git a/src/tools/miri/tests/run-pass/btreemap.rs b/src/tools/miri/tests/run-pass/btreemap.rs new file mode 100644 index 0000000000..0fd28d6f1e --- /dev/null +++ b/src/tools/miri/tests/run-pass/btreemap.rs @@ -0,0 +1,17 @@ +// mir validation can't cope with `mem::uninitialized()`, so this test fails with validation & full-MIR. +// compile-flags: -Zmir-emit-validate=0 + +#[derive(PartialEq, Eq, PartialOrd, Ord)] +pub enum Foo { + A(&'static str), + _B, + _C, +} + +pub fn main() { + let mut b = std::collections::BTreeSet::new(); + b.insert(Foo::A("\'")); + b.insert(Foo::A("/=")); + b.insert(Foo::A("#")); + b.insert(Foo::A("0o")); +} diff --git a/src/tools/miri/tests/run-pass/c_enums.rs b/src/tools/miri/tests/run-pass/c_enums.rs new file mode 100644 index 0000000000..11897b73eb --- /dev/null +++ b/src/tools/miri/tests/run-pass/c_enums.rs @@ -0,0 +1,32 @@ +enum Foo { + Bar = 42, + Baz, + Quux = 100, +} + +enum Signed { + Bar = -42, + Baz, + Quux = 100, +} + +fn foo() -> [u8; 3] { + [Foo::Bar as u8, Foo::Baz as u8, Foo::Quux as u8] +} + +fn signed() -> [i8; 3] { + [Signed::Bar as i8, Signed::Baz as i8, Signed::Quux as i8] +} + +fn unsafe_match() -> bool { + match unsafe { std::mem::transmute::(43) } { + Foo::Baz => true, + _ => false, + } +} + +fn main() { + assert_eq!(foo(), [42, 43, 100]); + assert_eq!(signed(), [-42, -41, 100]); + assert!(unsafe_match()); +} diff --git a/src/tools/miri/tests/run-pass/call_drop_on_array_elements.rs b/src/tools/miri/tests/run-pass/call_drop_on_array_elements.rs new file mode 100644 index 0000000000..c9b59f635e --- /dev/null +++ b/src/tools/miri/tests/run-pass/call_drop_on_array_elements.rs @@ -0,0 +1,22 @@ +struct Bar(u16); // ZSTs are tested separately + +static mut DROP_COUNT: usize = 0; + +impl Drop for Bar { + fn drop(&mut self) { + assert_eq!(self.0 as usize, unsafe { DROP_COUNT }); // tests whether we are called at a valid address + unsafe { DROP_COUNT += 1; } + } +} + +fn main() { + let b = [Bar(0), Bar(1), Bar(2), Bar(3)]; + assert_eq!(unsafe { DROP_COUNT }, 0); + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); + + // check empty case + let b : [Bar; 0] = []; + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); +} diff --git a/src/tools/miri/tests/run-pass/call_drop_on_fat_ptr_array_elements.rs b/src/tools/miri/tests/run-pass/call_drop_on_fat_ptr_array_elements.rs new file mode 100644 index 0000000000..a1ab5c45e3 --- /dev/null +++ b/src/tools/miri/tests/run-pass/call_drop_on_fat_ptr_array_elements.rs @@ -0,0 +1,20 @@ +trait Foo {} + +struct Bar; + +impl Foo for Bar {} + +static mut DROP_COUNT: usize = 0; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_COUNT += 1; } + } +} + +fn main() { + let b: [Box; 4] = [Box::new(Bar), Box::new(Bar), Box::new(Bar), Box::new(Bar)]; + assert_eq!(unsafe { DROP_COUNT }, 0); + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); +} diff --git a/src/tools/miri/tests/run-pass/call_drop_on_zst_array_elements.rs b/src/tools/miri/tests/run-pass/call_drop_on_zst_array_elements.rs new file mode 100644 index 0000000000..1887130fdd --- /dev/null +++ b/src/tools/miri/tests/run-pass/call_drop_on_zst_array_elements.rs @@ -0,0 +1,21 @@ +struct Bar; + +static mut DROP_COUNT: usize = 0; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_COUNT += 1; } + } +} + +fn main() { + let b = [Bar, Bar, Bar, Bar]; + assert_eq!(unsafe { DROP_COUNT }, 0); + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); + + // check empty case + let b : [Bar; 0] = []; + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); +} diff --git a/src/tools/miri/tests/run-pass/call_drop_through_owned_slice.rs b/src/tools/miri/tests/run-pass/call_drop_through_owned_slice.rs new file mode 100644 index 0000000000..3ec6be65ed --- /dev/null +++ b/src/tools/miri/tests/run-pass/call_drop_through_owned_slice.rs @@ -0,0 +1,16 @@ +struct Bar; + +static mut DROP_COUNT: usize = 0; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_COUNT += 1; } + } +} + +fn main() { + let b: Box<[Bar]> = vec![Bar, Bar, Bar, Bar].into_boxed_slice(); + assert_eq!(unsafe { DROP_COUNT }, 0); + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); +} diff --git a/src/tools/miri/tests/run-pass/call_drop_through_trait_object.rs b/src/tools/miri/tests/run-pass/call_drop_through_trait_object.rs new file mode 100644 index 0000000000..9b6acf0b14 --- /dev/null +++ b/src/tools/miri/tests/run-pass/call_drop_through_trait_object.rs @@ -0,0 +1,20 @@ +trait Foo {} + +struct Bar; + +static mut DROP_CALLED: bool = false; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_CALLED = true; } + } +} + +impl Foo for Bar {} + +fn main() { + let b: Box = Box::new(Bar); + assert!(unsafe { !DROP_CALLED }); + drop(b); + assert!(unsafe { DROP_CALLED }); +} diff --git a/src/tools/miri/tests/run-pass/call_drop_through_trait_object_rc.rs b/src/tools/miri/tests/run-pass/call_drop_through_trait_object_rc.rs new file mode 100644 index 0000000000..ce56ca6a1c --- /dev/null +++ b/src/tools/miri/tests/run-pass/call_drop_through_trait_object_rc.rs @@ -0,0 +1,22 @@ +trait Foo {} + +struct Bar; + +static mut DROP_CALLED: bool = false; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_CALLED = true; } + } +} + +impl Foo for Bar {} + +use std::rc::Rc; + +fn main() { + let b: Rc = Rc::new(Bar); + assert!(unsafe { !DROP_CALLED }); + drop(b); + assert!(unsafe { DROP_CALLED }); +} diff --git a/src/tools/miri/tests/run-pass/calls.rs b/src/tools/miri/tests/run-pass/calls.rs new file mode 100644 index 0000000000..c4ba4a9b70 --- /dev/null +++ b/src/tools/miri/tests/run-pass/calls.rs @@ -0,0 +1,45 @@ +#![feature(const_fn)] + +fn call() -> i32 { + fn increment(x: i32) -> i32 { + x + 1 + } + increment(1) +} + +fn factorial_recursive() -> i64 { + fn fact(n: i64) -> i64 { + if n == 0 { + 1 + } else { + n * fact(n - 1) + } + } + fact(10) +} + +fn call_generic() -> (i16, bool) { + fn id(t: T) -> T { t } + (id(42), id(true)) +} + +// Test calling a very simple function from the standard library. +fn cross_crate_fn_call() -> i64 { + if 1i32.is_positive() { 1 } else { 0 } +} + +const fn foo(i: i64) -> i64 { *&i + 1 } + +fn const_fn_call() -> i64 { + let x = 5 + foo(5); + assert_eq!(x, 11); + x +} + +fn main() { + assert_eq!(call(), 2); + assert_eq!(factorial_recursive(), 3628800); + assert_eq!(call_generic(), (42, true)); + assert_eq!(cross_crate_fn_call(), 1); + assert_eq!(const_fn_call(), 11); +} diff --git a/src/tools/miri/tests/run-pass/cast-rfc0401-vtable-kinds.rs b/src/tools/miri/tests/run-pass/cast-rfc0401-vtable-kinds.rs new file mode 100644 index 0000000000..afbd4760a3 --- /dev/null +++ b/src/tools/miri/tests/run-pass/cast-rfc0401-vtable-kinds.rs @@ -0,0 +1,59 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +// FIXME: remove the next line when https://github.com/rust-lang/rust/issues/43358 is resolved +// compile-flags: -Zmir-opt-level=0 + +// Check that you can cast between different pointers to trait objects +// whose vtable have the same kind (both lengths, or both trait pointers). + +trait Foo { + fn foo(&self, _: T) -> u32 { 42 } +} + +trait Bar { + fn bar(&self) { println!("Bar!"); } +} + +impl Foo for () {} +impl Foo for u32 { fn foo(&self, _: u32) -> u32 { self+43 } } +impl Bar for () {} + +unsafe fn round_trip_and_call<'a>(t: *const (Foo+'a)) -> u32 { + let foo_e : *const Foo = t as *const _; + let r_1 = foo_e as *mut Foo; + + (&*r_1).foo(0) +} + +#[repr(C)] +struct FooS(T); +#[repr(C)] +struct BarS(T); + +fn foo_to_bar(u: *const FooS) -> *const BarS { + u as *const BarS +} + +fn main() { + let x = 4u32; + let y : &Foo = &x; + let fl = unsafe { round_trip_and_call(y as *const Foo) }; + assert_eq!(fl, (43+4)); + + let s = FooS([0,1,2]); + let u: &FooS<[u32]> = &s; + let u: *const FooS<[u32]> = u; + let bar_ref : *const BarS<[u32]> = foo_to_bar(u); + let z : &BarS<[u32]> = unsafe{&*bar_ref}; + assert_eq!(&z.0, &[0,1,2]); + // If validation fails here, that's likely because an immutable suspension is recovered mutably. +} diff --git a/src/tools/miri/tests/run-pass/cast_fn_ptr.rs b/src/tools/miri/tests/run-pass/cast_fn_ptr.rs new file mode 100644 index 0000000000..109e8dfc2a --- /dev/null +++ b/src/tools/miri/tests/run-pass/cast_fn_ptr.rs @@ -0,0 +1,9 @@ +fn main() { + fn f(_: *const u8) {} + + let g = unsafe { + std::mem::transmute::(f) + }; + + g(&42 as *const _); +} diff --git a/src/tools/miri/tests/run-pass/cast_fn_ptr_unsafe.rs b/src/tools/miri/tests/run-pass/cast_fn_ptr_unsafe.rs new file mode 100644 index 0000000000..0cabb369bf --- /dev/null +++ b/src/tools/miri/tests/run-pass/cast_fn_ptr_unsafe.rs @@ -0,0 +1,8 @@ +fn main() { + fn f() {} + + let g = f as fn() as unsafe fn(); + unsafe { + g(); + } +} diff --git a/src/tools/miri/tests/run-pass/char.rs b/src/tools/miri/tests/run-pass/char.rs new file mode 100644 index 0000000000..505c09b0ad --- /dev/null +++ b/src/tools/miri/tests/run-pass/char.rs @@ -0,0 +1,9 @@ +fn main() { + let c = 'x'; + assert_eq!(c, 'x'); + assert!('a' < 'z'); + assert!('1' < '9'); + assert_eq!(std::char::from_u32('x' as u32).unwrap(), 'x'); + // FIXME: + // assert_eq!(std::char::from_u32('x' as u32), Some('x')); +} diff --git a/src/tools/miri/tests/run-pass/closure-drop.rs b/src/tools/miri/tests/run-pass/closure-drop.rs new file mode 100644 index 0000000000..f1bdafaeb1 --- /dev/null +++ b/src/tools/miri/tests/run-pass/closure-drop.rs @@ -0,0 +1,25 @@ +struct Foo<'a>(&'a mut bool); + +impl<'a> Drop for Foo<'a> { + fn drop(&mut self) { + *self.0 = true; + } +} + +fn f(t: T) { + t() +} + +fn main() { + let mut ran_drop = false; + { + let x = Foo(&mut ran_drop); + // this closure never by val uses its captures + // so it's basically a fn(&self) + // the shim used to not drop the `x` + let x = move || { let _ = x; }; + f(x); + } + assert!(ran_drop); +} + diff --git a/src/tools/miri/tests/run-pass/closure-field-ty.rs b/src/tools/miri/tests/run-pass/closure-field-ty.rs new file mode 100644 index 0000000000..0d27728d22 --- /dev/null +++ b/src/tools/miri/tests/run-pass/closure-field-ty.rs @@ -0,0 +1,10 @@ +// miri issue #304 +fn main() { + let mut y = 0; + { + let mut box_maybe_closure = Box::new(None); + *box_maybe_closure = Some(|| { y += 1; }); + (box_maybe_closure.unwrap())(); + } + assert_eq!(y, 1); +} diff --git a/src/tools/miri/tests/run-pass/closures.rs b/src/tools/miri/tests/run-pass/closures.rs new file mode 100644 index 0000000000..9b379051eb --- /dev/null +++ b/src/tools/miri/tests/run-pass/closures.rs @@ -0,0 +1,48 @@ +fn simple() -> i32 { + let y = 10; + let f = |x| x + y; + f(2) +} + +fn crazy_closure() -> (i32, i32, i32) { + fn inner(t: T) -> (i32, T, T) { + struct NonCopy; + let x = NonCopy; + + let a = 2; + let b = 40; + let f = move |y, z, asdf| { + drop(x); + (a + b + y + z, asdf, t) + }; + f(a, b, t) + } + + inner(10) +} + +fn closure_arg_adjustment_problem() -> i64 { + fn once(f: F) { f(2); } + let mut y = 1; + { + let f = |x| y += x; + once(f); + } + y +} + +fn fn_once_closure_with_multiple_args() -> i64 { + fn once i64>(f: F) -> i64 { f(2, 3) } + let y = 1; + { + let f = |x, z| x + y + z; + once(f) + } +} + +fn main() { + assert_eq!(simple(), 12); + assert_eq!(crazy_closure(), (84, 10, 10)); + assert_eq!(closure_arg_adjustment_problem(), 3); + assert_eq!(fn_once_closure_with_multiple_args(), 6); +} diff --git a/src/tools/miri/tests/run-pass/const-vec-of-fns.rs b/src/tools/miri/tests/run-pass/const-vec-of-fns.rs new file mode 100644 index 0000000000..0338a766e2 --- /dev/null +++ b/src/tools/miri/tests/run-pass/const-vec-of-fns.rs @@ -0,0 +1,29 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// pretty-expanded FIXME #23616 + +/*! + * Try to double-check that static fns have the right size (with or + * without dummy env ptr, as appropriate) by iterating a size-2 array. + * If the static size differs from the runtime size, the second element + * should be read as a null or otherwise wrong pointer and crash. + */ + +fn f() { } +static mut CLOSURES: &'static mut [fn()] = &mut [f as fn(), f as fn()]; + +pub fn main() { + unsafe { + for closure in &mut *CLOSURES { + (*closure)() + } + } +} diff --git a/src/tools/miri/tests/run-pass/constants.rs b/src/tools/miri/tests/run-pass/constants.rs new file mode 100644 index 0000000000..718c852601 --- /dev/null +++ b/src/tools/miri/tests/run-pass/constants.rs @@ -0,0 +1,9 @@ +const A: usize = *&5; + +fn foo() -> usize { + A +} + +fn main() { + assert_eq!(foo(), A); +} diff --git a/src/tools/miri/tests/run-pass/deriving-associated-types.rs b/src/tools/miri/tests/run-pass/deriving-associated-types.rs new file mode 100644 index 0000000000..b67ef85acf --- /dev/null +++ b/src/tools/miri/tests/run-pass/deriving-associated-types.rs @@ -0,0 +1,208 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub trait DeclaredTrait { + type Type; +} + +impl DeclaredTrait for i32 { + type Type = i32; +} + +pub trait WhereTrait { + type Type; +} + +impl WhereTrait for i32 { + type Type = i32; +} + +// Make sure we don't add a bound that just shares a name with an associated +// type. +pub mod module { + pub type Type = i32; +} + +#[derive(PartialEq, Debug)] +struct PrivateStruct(T); + +#[derive(PartialEq, Debug)] +struct TupleStruct( + module::Type, + Option, + A, + PrivateStruct, + B, + B::Type, + Option, + ::Type, + Option<::Type>, + C, + C::Type, + Option, + ::Type, + Option<::Type>, + ::Type, +) where C: WhereTrait; + +#[derive(PartialEq, Debug)] +pub struct Struct where C: WhereTrait { + m1: module::Type, + m2: Option, + a1: A, + a2: PrivateStruct, + b: B, + b1: B::Type, + b2: Option, + b3: ::Type, + b4: Option<::Type>, + c: C, + c1: C::Type, + c2: Option, + c3: ::Type, + c4: Option<::Type>, + d: ::Type, +} + +#[derive(PartialEq, Debug)] +enum Enum where C: WhereTrait { + Unit, + Seq( + module::Type, + Option, + A, + PrivateStruct, + B, + B::Type, + Option, + ::Type, + Option<::Type>, + C, + C::Type, + Option, + ::Type, + Option<::Type>, + ::Type, + ), + Map { + m1: module::Type, + m2: Option, + a1: A, + a2: PrivateStruct, + b: B, + b1: B::Type, + b2: Option, + b3: ::Type, + b4: Option<::Type>, + c: C, + c1: C::Type, + c2: Option, + c3: ::Type, + c4: Option<::Type>, + d: ::Type, + }, +} + +fn main() { + + let e: Enum< + i32, + i32, + i32, + > = Enum::Seq( + 0, + None, + 0, + PrivateStruct(0), + 0, + 0, + None, + 0, + None, + 0, + 0, + None, + 0, + None, + 0, + ); + assert_eq!(e, e); + + let e: Enum< + i32, + i32, + i32, + > = Enum::Map { + m1: 0, + m2: None, + a1: 0, + a2: PrivateStruct(0), + b: 0, + b1: 0, + b2: None, + b3: 0, + b4: None, + c: 0, + c1: 0, + c2: None, + c3: 0, + c4: None, + d: 0, + }; + assert_eq!(e, e); + let e: TupleStruct< + i32, + i32, + i32, + > = TupleStruct( + 0, + None, + 0, + PrivateStruct(0), + 0, + 0, + None, + 0, + None, + 0, + 0, + None, + 0, + None, + 0, + ); + assert_eq!(e, e); + + let e: Struct< + i32, + i32, + i32, + > = Struct { + m1: 0, + m2: None, + a1: 0, + a2: PrivateStruct(0), + b: 0, + b1: 0, + b2: None, + b3: 0, + b4: None, + c: 0, + c1: 0, + c2: None, + c3: 0, + c4: None, + d: 0, + }; + assert_eq!(e, e); + + let e = Enum::Unit::; + assert_eq!(e, e); +} diff --git a/src/tools/miri/tests/run-pass/drop_empty_slice.rs b/src/tools/miri/tests/run-pass/drop_empty_slice.rs new file mode 100644 index 0000000000..b21c8a612c --- /dev/null +++ b/src/tools/miri/tests/run-pass/drop_empty_slice.rs @@ -0,0 +1,7 @@ +#![feature(box_syntax)] + +fn main() { + // With the nested Vec, this is calling Offset(Unique::empty(), 0) on drop. + let args : Vec> = Vec::new(); + let _ = box args; +} diff --git a/src/tools/miri/tests/run-pass/dst-field-align.rs b/src/tools/miri/tests/run-pass/dst-field-align.rs new file mode 100644 index 0000000000..5631b65ed9 --- /dev/null +++ b/src/tools/miri/tests/run-pass/dst-field-align.rs @@ -0,0 +1,77 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +struct Foo { + a: u16, + b: T +} + +trait Bar { + fn get(&self) -> usize; +} + +impl Bar for usize { + fn get(&self) -> usize { *self } +} + +struct Baz { + a: T +} + +struct HasDrop { + ptr: Box, + data: T +} + +fn main() { + // Test that zero-offset works properly + let b : Baz = Baz { a: 7 }; + assert_eq!(b.a.get(), 7); + let b : &Baz = &b; + assert_eq!(b.a.get(), 7); + + // Test that the field is aligned properly + let f : Foo = Foo { a: 0, b: 11 }; + assert_eq!(f.b.get(), 11); + let ptr1 : *const u8 = &f.b as *const _ as *const u8; + + let f : &Foo = &f; + let ptr2 : *const u8 = &f.b as *const _ as *const u8; + assert_eq!(f.b.get(), 11); + + // The pointers should be the same + assert_eq!(ptr1, ptr2); + + // Test that nested DSTs work properly + let f : Foo> = Foo { a: 0, b: Foo { a: 1, b: 17 }}; + assert_eq!(f.b.b.get(), 17); + let f : &Foo> = &f; + assert_eq!(f.b.b.get(), 17); + + // Test that get the pointer via destructuring works + + let f : Foo = Foo { a: 0, b: 11 }; + let f : &Foo = &f; + let &Foo { a: _, b: ref bar } = f; + assert_eq!(bar.get(), 11); + + // Make sure that drop flags don't screw things up + + let d : HasDrop> = HasDrop { + ptr: Box::new(0), + data: Baz { a: [1,2,3,4] } + }; + assert_eq!([1,2,3,4], d.data.a); + + let d : &HasDrop> = &d; + assert_eq!(&[1,2,3,4], &d.data.a); +} diff --git a/src/tools/miri/tests/run-pass/dst-irrefutable-bind.rs b/src/tools/miri/tests/run-pass/dst-irrefutable-bind.rs new file mode 100644 index 0000000000..9f8067f372 --- /dev/null +++ b/src/tools/miri/tests/run-pass/dst-irrefutable-bind.rs @@ -0,0 +1,24 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +struct Test(T); + +fn main() { + let x = Test([1,2,3]); + let x : &Test<[i32]> = &x; + + let & ref _y = x; + + // Make sure binding to a fat pointer behind a reference + // still works + let slice = &[1,2,3]; + let x = Test(&slice); + let Test(&_slice) = x; +} diff --git a/src/tools/miri/tests/run-pass/dst-raw.rs b/src/tools/miri/tests/run-pass/dst-raw.rs new file mode 100644 index 0000000000..3a74626b02 --- /dev/null +++ b/src/tools/miri/tests/run-pass/dst-raw.rs @@ -0,0 +1,113 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Test DST raw pointers + + +trait Trait { + fn foo(&self) -> isize; +} + +struct A { + f: isize +} +impl Trait for A { + fn foo(&self) -> isize { + self.f + } +} + +struct Foo { + f: T +} + +pub fn main() { + // raw trait object + let x = A { f: 42 }; + let z: *const Trait = &x; + let r = unsafe { + (&*z).foo() + }; + assert_eq!(r, 42); + + // raw DST struct + let p = Foo {f: A { f: 42 }}; + let o: *const Foo = &p; + let r = unsafe { + (&*o).f.foo() + }; + assert_eq!(r, 42); + + // raw slice + let a: *const [_] = &[1, 2, 3]; + unsafe { + let b = (*a)[2]; + assert_eq!(b, 3); + let len = (*a).len(); + assert_eq!(len, 3); + } + + // raw slice with explicit cast + let a = &[1, 2, 3] as *const [i32]; + unsafe { + let b = (*a)[2]; + assert_eq!(b, 3); + let len = (*a).len(); + assert_eq!(len, 3); + } + + // raw DST struct with slice + let c: *const Foo<[_]> = &Foo {f: [1, 2, 3]}; + unsafe { + let b = (&*c).f[0]; + assert_eq!(b, 1); + let len = (&*c).f.len(); + assert_eq!(len, 3); + } + + // all of the above with *mut + let mut x = A { f: 42 }; + let z: *mut Trait = &mut x; + let r = unsafe { + (&*z).foo() + }; + assert_eq!(r, 42); + + let mut p = Foo {f: A { f: 42 }}; + let o: *mut Foo = &mut p; + let r = unsafe { + (&*o).f.foo() + }; + assert_eq!(r, 42); + + let a: *mut [_] = &mut [1, 2, 3]; + unsafe { + let b = (*a)[2]; + assert_eq!(b, 3); + let len = (*a).len(); + assert_eq!(len, 3); + } + + let a = &mut [1, 2, 3] as *mut [i32]; + unsafe { + let b = (*a)[2]; + assert_eq!(b, 3); + let len = (*a).len(); + assert_eq!(len, 3); + } + + let c: *mut Foo<[_]> = &mut Foo {f: [1, 2, 3]}; + unsafe { + let b = (&*c).f[0]; + assert_eq!(b, 1); + let len = (&*c).f.len(); + assert_eq!(len, 3); + } +} diff --git a/src/tools/miri/tests/run-pass/dst-struct-sole.rs b/src/tools/miri/tests/run-pass/dst-struct-sole.rs new file mode 100644 index 0000000000..58d7b35a52 --- /dev/null +++ b/src/tools/miri/tests/run-pass/dst-struct-sole.rs @@ -0,0 +1,85 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// As dst-struct.rs, but the unsized field is the only field in the struct. + + +struct Fat { + ptr: T +} + +// x is a fat pointer +fn foo(x: &Fat<[isize]>) { + let y = &x.ptr; + assert_eq!(x.ptr.len(), 3); + assert_eq!(y[0], 1); + assert_eq!(x.ptr[1], 2); +} + +fn foo2(x: &Fat<[T]>) { + let y = &x.ptr; + let bar = Bar; + assert_eq!(x.ptr.len(), 3); + assert_eq!(y[0].to_bar(), bar); + assert_eq!(x.ptr[1].to_bar(), bar); +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +struct Bar; + +trait ToBar { + fn to_bar(&self) -> Bar; +} + +impl ToBar for Bar { + fn to_bar(&self) -> Bar { + *self + } +} + +pub fn main() { + // With a vec of ints. + let f1 = Fat { ptr: [1, 2, 3] }; + foo(&f1); + let f2 = &f1; + foo(f2); + let f3: &Fat<[isize]> = f2; + foo(f3); + let f4: &Fat<[isize]> = &f1; + foo(f4); + let f5: &Fat<[isize]> = &Fat { ptr: [1, 2, 3] }; + foo(f5); + + // With a vec of Bars. + let bar = Bar; + let f1 = Fat { ptr: [bar, bar, bar] }; + foo2(&f1); + let f2 = &f1; + foo2(f2); + let f3: &Fat<[Bar]> = f2; + foo2(f3); + let f4: &Fat<[Bar]> = &f1; + foo2(f4); + let f5: &Fat<[Bar]> = &Fat { ptr: [bar, bar, bar] }; + foo2(f5); + + // Assignment. + let f5: &mut Fat<[isize]> = &mut Fat { ptr: [1, 2, 3] }; + f5.ptr[1] = 34; + assert_eq!(f5.ptr[0], 1); + assert_eq!(f5.ptr[1], 34); + assert_eq!(f5.ptr[2], 3); + + // Zero size vec. + let f5: &Fat<[isize]> = &Fat { ptr: [] }; + assert!(f5.ptr.is_empty()); + let f5: &Fat<[Bar]> = &Fat { ptr: [] }; + assert!(f5.ptr.is_empty()); +} diff --git a/src/tools/miri/tests/run-pass/dst-struct.rs b/src/tools/miri/tests/run-pass/dst-struct.rs new file mode 100644 index 0000000000..932b571ecc --- /dev/null +++ b/src/tools/miri/tests/run-pass/dst-struct.rs @@ -0,0 +1,134 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +#![allow(unused_features)] +#![feature(box_syntax)] + +struct Fat { + f1: isize, + f2: &'static str, + ptr: T +} + +// x is a fat pointer +fn foo(x: &Fat<[isize]>) { + let y = &x.ptr; + assert_eq!(x.ptr.len(), 3); + assert_eq!(y[0], 1); + assert_eq!(x.ptr[1], 2); + assert_eq!(x.f1, 5); + assert_eq!(x.f2, "some str"); +} + +fn foo2(x: &Fat<[T]>) { + let y = &x.ptr; + let bar = Bar; + assert_eq!(x.ptr.len(), 3); + assert_eq!(y[0].to_bar(), bar); + assert_eq!(x.ptr[1].to_bar(), bar); + assert_eq!(x.f1, 5); + assert_eq!(x.f2, "some str"); +} + +fn foo3(x: &Fat>) { + let y = &x.ptr.ptr; + assert_eq!(x.f1, 5); + assert_eq!(x.f2, "some str"); + assert_eq!(x.ptr.f1, 8); + assert_eq!(x.ptr.f2, "deep str"); + assert_eq!(x.ptr.ptr.len(), 3); + assert_eq!(y[0], 1); + assert_eq!(x.ptr.ptr[1], 2); +} + + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +struct Bar; + +trait ToBar { + fn to_bar(&self) -> Bar; +} + +impl ToBar for Bar { + fn to_bar(&self) -> Bar { + *self + } +} + +pub fn main() { + // With a vec of ints. + let f1 = Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }; + foo(&f1); + let f2 = &f1; + foo(f2); + let f3: &Fat<[isize]> = f2; + foo(f3); + let f4: &Fat<[isize]> = &f1; + foo(f4); + let f5: &Fat<[isize]> = &Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }; + foo(f5); + + // With a vec of Bars. + let bar = Bar; + let f1 = Fat { f1: 5, f2: "some str", ptr: [bar, bar, bar] }; + foo2(&f1); + let f2 = &f1; + foo2(f2); + let f3: &Fat<[Bar]> = f2; + foo2(f3); + let f4: &Fat<[Bar]> = &f1; + foo2(f4); + let f5: &Fat<[Bar]> = &Fat { f1: 5, f2: "some str", ptr: [bar, bar, bar] }; + foo2(f5); + + // Assignment. + let f5: &mut Fat<[isize]> = &mut Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }; + f5.ptr[1] = 34; + assert_eq!(f5.ptr[0], 1); + assert_eq!(f5.ptr[1], 34); + assert_eq!(f5.ptr[2], 3); + + // Zero size vec. + let f5: &Fat<[isize]> = &Fat { f1: 5, f2: "some str", ptr: [] }; + assert!(f5.ptr.is_empty()); + let f5: &Fat<[Bar]> = &Fat { f1: 5, f2: "some str", ptr: [] }; + assert!(f5.ptr.is_empty()); + + // Deeply nested. + let f1 = Fat { f1: 5, f2: "some str", ptr: Fat { f1: 8, f2: "deep str", ptr: [1, 2, 3]} }; + foo3(&f1); + let f2 = &f1; + foo3(f2); + let f3: &Fat> = f2; + foo3(f3); + let f4: &Fat> = &f1; + foo3(f4); + let f5: &Fat> = + &Fat { f1: 5, f2: "some str", ptr: Fat { f1: 8, f2: "deep str", ptr: [1, 2, 3]} }; + foo3(f5); + + // Box. + let f1 = Box::new([1, 2, 3]); + assert_eq!((*f1)[1], 2); + let f2: Box<[isize]> = f1; + assert_eq!((*f2)[1], 2); + + // Nested Box. + let f1 : Box> = box Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }; + foo(&*f1); + let f2 : Box> = f1; + foo(&*f2); + + // FIXME (#22405): Replace `Box::new` with `box` here when/if possible. + let f3 : Box> = + Box::>::new(Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }); + foo(&*f3); +} diff --git a/src/tools/miri/tests/run-pass/enum-nullable-const-null-with-fields.rs b/src/tools/miri/tests/run-pass/enum-nullable-const-null-with-fields.rs new file mode 100644 index 0000000000..1342c4e104 --- /dev/null +++ b/src/tools/miri/tests/run-pass/enum-nullable-const-null-with-fields.rs @@ -0,0 +1,22 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +use std::result::Result; +use std::result::Result::Ok; + +static C: Result<(), Box> = Ok(()); + +// This is because of yet another bad assertion (ICE) about the null side of a nullable enum. +// So we won't actually compile if the bug is present, but we check the value in main anyway. + +pub fn main() { + assert!(C.is_ok()); +} diff --git a/src/tools/miri/tests/run-pass/enums.rs b/src/tools/miri/tests/run-pass/enums.rs new file mode 100644 index 0000000000..1f27292904 --- /dev/null +++ b/src/tools/miri/tests/run-pass/enums.rs @@ -0,0 +1,34 @@ +enum MyEnum { + MyEmptyVariant, + MyNewtypeVariant(i32), + MyTupleVariant(i32, i32), + MyStructVariant { + my_first_field: i32, + my_second_field: i32, + } +} + +fn test(me: MyEnum) { + match me { + MyEnum::MyEmptyVariant => {}, + MyEnum::MyNewtypeVariant(ref val) => assert_eq!(val, &42), + MyEnum::MyTupleVariant(ref a, ref b) => { + assert_eq!(a, &43); + assert_eq!(b, &44); + }, + MyEnum::MyStructVariant { ref my_first_field, ref my_second_field } => { + assert_eq!(my_first_field, &45); + assert_eq!(my_second_field, &46); + }, + } +} + +fn main() { + test(MyEnum::MyEmptyVariant); + test(MyEnum::MyNewtypeVariant(42)); + test(MyEnum::MyTupleVariant(43, 44)); + test(MyEnum::MyStructVariant{ + my_first_field: 45, + my_second_field: 46, + }); +} diff --git a/src/tools/miri/tests/run-pass/float_fast_math.rs b/src/tools/miri/tests/run-pass/float_fast_math.rs new file mode 100644 index 0000000000..c1b4b55bd3 --- /dev/null +++ b/src/tools/miri/tests/run-pass/float_fast_math.rs @@ -0,0 +1,30 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(core_intrinsics)] + +use std::intrinsics::{fadd_fast, fsub_fast, fmul_fast, fdiv_fast, frem_fast}; + +#[inline(never)] +pub fn test_operations(a: f64, b: f64) { + // make sure they all map to the correct operation + unsafe { + assert_eq!(fadd_fast(a, b), a + b); + assert_eq!(fsub_fast(a, b), a - b); + assert_eq!(fmul_fast(a, b), a * b); + assert_eq!(fdiv_fast(a, b), a / b); + assert_eq!(frem_fast(a, b), a % b); + } +} + +fn main() { + test_operations(1., 2.); + test_operations(10., 5.); +} diff --git a/src/tools/miri/tests/run-pass/floats.rs b/src/tools/miri/tests/run-pass/floats.rs new file mode 100644 index 0000000000..9c4d0594d1 --- /dev/null +++ b/src/tools/miri/tests/run-pass/floats.rs @@ -0,0 +1,11 @@ + +fn main() { + assert_eq!(6.0_f32*6.0_f32, 36.0_f32); + assert_eq!(6.0_f64*6.0_f64, 36.0_f64); + assert_eq!(-{5.0_f32}, -5.0_f32); + assert!((5.0_f32/0.0).is_infinite()); + assert!((-5.0_f32).sqrt().is_nan()); + let x: u64 = unsafe { std::mem::transmute(42.0_f64) }; + let y: f64 = unsafe { std::mem::transmute(x) }; + assert_eq!(y, 42.0_f64); +} diff --git a/src/tools/miri/tests/run-pass/fn_item_as_closure_trait_object.rs b/src/tools/miri/tests/run-pass/fn_item_as_closure_trait_object.rs new file mode 100644 index 0000000000..799f97a4f6 --- /dev/null +++ b/src/tools/miri/tests/run-pass/fn_item_as_closure_trait_object.rs @@ -0,0 +1,6 @@ +fn foo() {} + +fn main() { + let f: &Fn() = &foo; + f(); +} diff --git a/src/tools/miri/tests/run-pass/fn_item_with_args_as_closure_trait_object.rs b/src/tools/miri/tests/run-pass/fn_item_with_args_as_closure_trait_object.rs new file mode 100644 index 0000000000..79ece75c77 --- /dev/null +++ b/src/tools/miri/tests/run-pass/fn_item_with_args_as_closure_trait_object.rs @@ -0,0 +1,8 @@ +fn foo(i: i32) { + assert_eq!(i, 42); +} + +fn main() { + let f: &Fn(i32) = &foo; + f(42); +} diff --git a/src/tools/miri/tests/run-pass/fn_item_with_multiple_args_as_closure_trait_object.rs b/src/tools/miri/tests/run-pass/fn_item_with_multiple_args_as_closure_trait_object.rs new file mode 100644 index 0000000000..f4b5b449aa --- /dev/null +++ b/src/tools/miri/tests/run-pass/fn_item_with_multiple_args_as_closure_trait_object.rs @@ -0,0 +1,18 @@ +fn foo(i: i32, j: i32) { + assert_eq!(i, 42); + assert_eq!(j, 55); +} + +fn bar(i: i32, j: i32, k: f32) { + assert_eq!(i, 42); + assert_eq!(j, 55); + assert_eq!(k, 3.14159) +} + + +fn main() { + let f: &Fn(i32, i32) = &foo; + f(42, 55); + let f: &Fn(i32, i32, f32) = &bar; + f(42, 55, 3.14159); +} diff --git a/src/tools/miri/tests/run-pass/fn_ptr_as_closure_trait_object.rs b/src/tools/miri/tests/run-pass/fn_ptr_as_closure_trait_object.rs new file mode 100644 index 0000000000..24ae1f35bb --- /dev/null +++ b/src/tools/miri/tests/run-pass/fn_ptr_as_closure_trait_object.rs @@ -0,0 +1,15 @@ +fn foo() {} +fn bar(u: u32) { assert_eq!(u, 42); } +fn baa(u: u32, f: f32) { + assert_eq!(u, 42); + assert_eq!(f, 3.141); +} + +fn main() { + let f: &Fn() = &(foo as fn()); + f(); + let f: &Fn(u32) = &(bar as fn(u32)); + f(42); + let f: &Fn(u32, f32) = &(baa as fn(u32, f32)); + f(42, 3.141); +} diff --git a/src/tools/miri/tests/run-pass/function_pointers.rs b/src/tools/miri/tests/run-pass/function_pointers.rs new file mode 100644 index 0000000000..4f597d4a2e --- /dev/null +++ b/src/tools/miri/tests/run-pass/function_pointers.rs @@ -0,0 +1,46 @@ +fn f() -> i32 { + 42 +} + +fn g(i: i32) -> i32 { + i*42 +} + +fn h(i: i32, j: i32) -> i32 { + j * i * 7 +} + +fn return_fn_ptr() -> fn() -> i32 { + f +} + +fn call_fn_ptr() -> i32 { + return_fn_ptr()() +} + +fn indirect i32>(f: F) -> i32 { f() } +fn indirect_mut i32>(mut f: F) -> i32 { f() } +fn indirect_once i32>(f: F) -> i32 { f() } + +fn indirect2 i32>(f: F) -> i32 { f(10) } +fn indirect_mut2 i32>(mut f: F) -> i32 { f(10) } +fn indirect_once2 i32>(f: F) -> i32 { f(10) } + +fn indirect3 i32>(f: F) -> i32 { f(10, 3) } +fn indirect_mut3 i32>(mut f: F) -> i32 { f(10, 3) } +fn indirect_once3 i32>(f: F) -> i32 { f(10, 3) } + +fn main() { + assert_eq!(call_fn_ptr(), 42); + assert_eq!(indirect(f), 42); + assert_eq!(indirect_mut(f), 42); + assert_eq!(indirect_once(f), 42); + assert_eq!(indirect2(g), 420); + assert_eq!(indirect_mut2(g), 420); + assert_eq!(indirect_once2(g), 420); + assert_eq!(indirect3(h), 210); + assert_eq!(indirect_mut3(h), 210); + assert_eq!(indirect_once3(h), 210); + assert!(return_fn_ptr() == f); + assert!(return_fn_ptr() as unsafe fn() -> i32 == f as fn() -> i32 as unsafe fn() -> i32); +} diff --git a/src/tools/miri/tests/run-pass/generator_control_flow.rs b/src/tools/miri/tests/run-pass/generator_control_flow.rs new file mode 100644 index 0000000000..f15c7db9c2 --- /dev/null +++ b/src/tools/miri/tests/run-pass/generator_control_flow.rs @@ -0,0 +1,65 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(generators, generator_trait)] + +use std::ops::{GeneratorState, Generator}; + +fn finish(mut amt: usize, mut t: T) -> T::Return + where T: Generator +{ + loop { + match t.resume() { + GeneratorState::Yielded(()) => amt -= 1, + GeneratorState::Complete(ret) => { + assert_eq!(amt, 0); + return ret + } + } + } + +} + +fn main() { + finish(1, || yield); + finish(3, || { + let mut x = 0; + yield; + x += 1; + yield; + x += 1; + yield; + assert_eq!(x, 2); + }); + finish(8, || { + for _ in 0..8 { + yield; + } + }); + finish(1, || { + if true { + yield; + } else { + } + }); + finish(1, || { + if false { + } else { + yield; + } + }); + finish(2, || { + if { yield; false } { + yield; + panic!() + } + yield + }); +} diff --git a/src/tools/miri/tests/run-pass/intrinsics-integer.rs b/src/tools/miri/tests/run-pass/intrinsics-integer.rs new file mode 100644 index 0000000000..4896f02da2 --- /dev/null +++ b/src/tools/miri/tests/run-pass/intrinsics-integer.rs @@ -0,0 +1,142 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(intrinsics)] + +mod rusti { + extern "rust-intrinsic" { + pub fn ctpop(x: T) -> T; + pub fn ctlz(x: T) -> T; + pub fn ctlz_nonzero(x: T) -> T; + pub fn cttz(x: T) -> T; + pub fn cttz_nonzero(x: T) -> T; + pub fn bswap(x: T) -> T; + } +} + +pub fn main() { + unsafe { + use rusti::*; + + assert_eq!(ctpop(0u8), 0); assert_eq!(ctpop(0i8), 0); + assert_eq!(ctpop(0u16), 0); assert_eq!(ctpop(0i16), 0); + assert_eq!(ctpop(0u32), 0); assert_eq!(ctpop(0i32), 0); + assert_eq!(ctpop(0u64), 0); assert_eq!(ctpop(0i64), 0); + + assert_eq!(ctpop(1u8), 1); assert_eq!(ctpop(1i8), 1); + assert_eq!(ctpop(1u16), 1); assert_eq!(ctpop(1i16), 1); + assert_eq!(ctpop(1u32), 1); assert_eq!(ctpop(1i32), 1); + assert_eq!(ctpop(1u64), 1); assert_eq!(ctpop(1i64), 1); + + assert_eq!(ctpop(10u8), 2); assert_eq!(ctpop(10i8), 2); + assert_eq!(ctpop(10u16), 2); assert_eq!(ctpop(10i16), 2); + assert_eq!(ctpop(10u32), 2); assert_eq!(ctpop(10i32), 2); + assert_eq!(ctpop(10u64), 2); assert_eq!(ctpop(10i64), 2); + + assert_eq!(ctpop(100u8), 3); assert_eq!(ctpop(100i8), 3); + assert_eq!(ctpop(100u16), 3); assert_eq!(ctpop(100i16), 3); + assert_eq!(ctpop(100u32), 3); assert_eq!(ctpop(100i32), 3); + assert_eq!(ctpop(100u64), 3); assert_eq!(ctpop(100i64), 3); + + assert_eq!(ctpop(-1i8 as u8), 8); assert_eq!(ctpop(-1i8), 8); + assert_eq!(ctpop(-1i16 as u16), 16); assert_eq!(ctpop(-1i16), 16); + assert_eq!(ctpop(-1i32 as u32), 32); assert_eq!(ctpop(-1i32), 32); + assert_eq!(ctpop(-1i64 as u64), 64); assert_eq!(ctpop(-1i64), 64); + + assert_eq!(ctlz(0u8), 8); assert_eq!(ctlz(0i8), 8); + assert_eq!(ctlz(0u16), 16); assert_eq!(ctlz(0i16), 16); + assert_eq!(ctlz(0u32), 32); assert_eq!(ctlz(0i32), 32); + assert_eq!(ctlz(0u64), 64); assert_eq!(ctlz(0i64), 64); + + assert_eq!(ctlz(1u8), 7); assert_eq!(ctlz(1i8), 7); + assert_eq!(ctlz(1u16), 15); assert_eq!(ctlz(1i16), 15); + assert_eq!(ctlz(1u32), 31); assert_eq!(ctlz(1i32), 31); + assert_eq!(ctlz(1u64), 63); assert_eq!(ctlz(1i64), 63); + + assert_eq!(ctlz(10u8), 4); assert_eq!(ctlz(10i8), 4); + assert_eq!(ctlz(10u16), 12); assert_eq!(ctlz(10i16), 12); + assert_eq!(ctlz(10u32), 28); assert_eq!(ctlz(10i32), 28); + assert_eq!(ctlz(10u64), 60); assert_eq!(ctlz(10i64), 60); + + assert_eq!(ctlz(100u8), 1); assert_eq!(ctlz(100i8), 1); + assert_eq!(ctlz(100u16), 9); assert_eq!(ctlz(100i16), 9); + assert_eq!(ctlz(100u32), 25); assert_eq!(ctlz(100i32), 25); + assert_eq!(ctlz(100u64), 57); assert_eq!(ctlz(100i64), 57); + + assert_eq!(ctlz_nonzero(1u8), 7); assert_eq!(ctlz_nonzero(1i8), 7); + assert_eq!(ctlz_nonzero(1u16), 15); assert_eq!(ctlz_nonzero(1i16), 15); + assert_eq!(ctlz_nonzero(1u32), 31); assert_eq!(ctlz_nonzero(1i32), 31); + assert_eq!(ctlz_nonzero(1u64), 63); assert_eq!(ctlz_nonzero(1i64), 63); + + assert_eq!(ctlz_nonzero(10u8), 4); assert_eq!(ctlz_nonzero(10i8), 4); + assert_eq!(ctlz_nonzero(10u16), 12); assert_eq!(ctlz_nonzero(10i16), 12); + assert_eq!(ctlz_nonzero(10u32), 28); assert_eq!(ctlz_nonzero(10i32), 28); + assert_eq!(ctlz_nonzero(10u64), 60); assert_eq!(ctlz_nonzero(10i64), 60); + + assert_eq!(ctlz_nonzero(100u8), 1); assert_eq!(ctlz_nonzero(100i8), 1); + assert_eq!(ctlz_nonzero(100u16), 9); assert_eq!(ctlz_nonzero(100i16), 9); + assert_eq!(ctlz_nonzero(100u32), 25); assert_eq!(ctlz_nonzero(100i32), 25); + assert_eq!(ctlz_nonzero(100u64), 57); assert_eq!(ctlz_nonzero(100i64), 57); + + assert_eq!(cttz(-1i8 as u8), 0); assert_eq!(cttz(-1i8), 0); + assert_eq!(cttz(-1i16 as u16), 0); assert_eq!(cttz(-1i16), 0); + assert_eq!(cttz(-1i32 as u32), 0); assert_eq!(cttz(-1i32), 0); + assert_eq!(cttz(-1i64 as u64), 0); assert_eq!(cttz(-1i64), 0); + + assert_eq!(cttz(0u8), 8); assert_eq!(cttz(0i8), 8); + assert_eq!(cttz(0u16), 16); assert_eq!(cttz(0i16), 16); + assert_eq!(cttz(0u32), 32); assert_eq!(cttz(0i32), 32); + assert_eq!(cttz(0u64), 64); assert_eq!(cttz(0i64), 64); + + assert_eq!(cttz(1u8), 0); assert_eq!(cttz(1i8), 0); + assert_eq!(cttz(1u16), 0); assert_eq!(cttz(1i16), 0); + assert_eq!(cttz(1u32), 0); assert_eq!(cttz(1i32), 0); + assert_eq!(cttz(1u64), 0); assert_eq!(cttz(1i64), 0); + + assert_eq!(cttz(10u8), 1); assert_eq!(cttz(10i8), 1); + assert_eq!(cttz(10u16), 1); assert_eq!(cttz(10i16), 1); + assert_eq!(cttz(10u32), 1); assert_eq!(cttz(10i32), 1); + assert_eq!(cttz(10u64), 1); assert_eq!(cttz(10i64), 1); + + assert_eq!(cttz(100u8), 2); assert_eq!(cttz(100i8), 2); + assert_eq!(cttz(100u16), 2); assert_eq!(cttz(100i16), 2); + assert_eq!(cttz(100u32), 2); assert_eq!(cttz(100i32), 2); + assert_eq!(cttz(100u64), 2); assert_eq!(cttz(100i64), 2); + + assert_eq!(cttz_nonzero(-1i8 as u8), 0); assert_eq!(cttz_nonzero(-1i8), 0); + assert_eq!(cttz_nonzero(-1i16 as u16), 0); assert_eq!(cttz_nonzero(-1i16), 0); + assert_eq!(cttz_nonzero(-1i32 as u32), 0); assert_eq!(cttz_nonzero(-1i32), 0); + assert_eq!(cttz_nonzero(-1i64 as u64), 0); assert_eq!(cttz_nonzero(-1i64), 0); + + assert_eq!(cttz_nonzero(1u8), 0); assert_eq!(cttz_nonzero(1i8), 0); + assert_eq!(cttz_nonzero(1u16), 0); assert_eq!(cttz_nonzero(1i16), 0); + assert_eq!(cttz_nonzero(1u32), 0); assert_eq!(cttz_nonzero(1i32), 0); + assert_eq!(cttz_nonzero(1u64), 0); assert_eq!(cttz_nonzero(1i64), 0); + + assert_eq!(cttz_nonzero(10u8), 1); assert_eq!(cttz_nonzero(10i8), 1); + assert_eq!(cttz_nonzero(10u16), 1); assert_eq!(cttz_nonzero(10i16), 1); + assert_eq!(cttz_nonzero(10u32), 1); assert_eq!(cttz_nonzero(10i32), 1); + assert_eq!(cttz_nonzero(10u64), 1); assert_eq!(cttz_nonzero(10i64), 1); + + assert_eq!(cttz_nonzero(100u8), 2); assert_eq!(cttz_nonzero(100i8), 2); + assert_eq!(cttz_nonzero(100u16), 2); assert_eq!(cttz_nonzero(100i16), 2); + assert_eq!(cttz_nonzero(100u32), 2); assert_eq!(cttz_nonzero(100i32), 2); + assert_eq!(cttz_nonzero(100u64), 2); assert_eq!(cttz_nonzero(100i64), 2); + + assert_eq!(bswap(0x0Au8), 0x0A); // no-op + assert_eq!(bswap(0x0Ai8), 0x0A); // no-op + assert_eq!(bswap(0x0A0Bu16), 0x0B0A); + assert_eq!(bswap(0x0A0Bi16), 0x0B0A); + assert_eq!(bswap(0x0ABBCC0Du32), 0x0DCCBB0A); + assert_eq!(bswap(0x0ABBCC0Di32), 0x0DCCBB0A); + assert_eq!(bswap(0x0122334455667708u64), 0x0877665544332201); + assert_eq!(bswap(0x0122334455667708i64), 0x0877665544332201); + } +} diff --git a/src/tools/miri/tests/run-pass/intrinsics-math.rs b/src/tools/miri/tests/run-pass/intrinsics-math.rs new file mode 100644 index 0000000000..a2c5563474 --- /dev/null +++ b/src/tools/miri/tests/run-pass/intrinsics-math.rs @@ -0,0 +1,67 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +macro_rules! assert_approx_eq { + ($a:expr, $b:expr) => ({ + let (a, b) = (&$a, &$b); + assert!((*a - *b).abs() < 1.0e-6, + "{} is not approximately equal to {}", *a, *b); + }) +} + +pub fn main() { + use std::f32; + use std::f64; + + assert_approx_eq!(64f32.sqrt(), 8f32); + assert_approx_eq!(64f64.sqrt(), 8f64); + + assert_approx_eq!(25f32.powi(-2), 0.0016f32); + assert_approx_eq!(23.2f64.powi(2), 538.24f64); + + assert_approx_eq!(0f32.sin(), 0f32); + assert_approx_eq!((f64::consts::PI / 2f64).sin(), 1f64); + + assert_approx_eq!(0f32.cos(), 1f32); + assert_approx_eq!((f64::consts::PI * 2f64).cos(), 1f64); + + assert_approx_eq!(25f32.powf(-2f32), 0.0016f32); + assert_approx_eq!(400f64.powf(0.5f64), 20f64); + + assert_approx_eq!((1f32.exp() - f32::consts::E).abs(), 0f32); + assert_approx_eq!(1f64.exp(), f64::consts::E); + + assert_approx_eq!(10f32.exp2(), 1024f32); + assert_approx_eq!(50f64.exp2(), 1125899906842624f64); + + assert_approx_eq!((f32::consts::E.ln() - 1f32).abs(), 0f32); + assert_approx_eq!(1f64.ln(), 0f64); + + assert_approx_eq!(10f32.log10(), 1f32); + assert_approx_eq!(f64::consts::E.log10(), f64::consts::LOG10_E); + + assert_approx_eq!(8f32.log2(), 3f32); + assert_approx_eq!(f64::consts::E.log2(), f64::consts::LOG2_E); + + assert_approx_eq!(1.0f32.mul_add(2.0f32, 5.0f32), 7.0f32); + assert_approx_eq!(0.0f64.mul_add(-2.0f64, f64::consts::E), f64::consts::E); + + assert_approx_eq!((-1.0f32).abs(), 1.0f32); + assert_approx_eq!(34.2f64.abs(), 34.2f64); + + assert_approx_eq!(3.8f32.floor(), 3.0f32); + assert_approx_eq!((-1.1f64).floor(), -2.0f64); + + assert_approx_eq!((-2.3f32).ceil(), -2.0f32); + assert_approx_eq!(3.8f64.ceil(), 4.0f64); + + assert_approx_eq!(0.1f32.trunc(), 0.0f32); + assert_approx_eq!((-0.1f64).trunc(), 0.0f64); +} diff --git a/src/tools/miri/tests/run-pass/intrinsics.rs b/src/tools/miri/tests/run-pass/intrinsics.rs new file mode 100755 index 0000000000..3152737a60 --- /dev/null +++ b/src/tools/miri/tests/run-pass/intrinsics.rs @@ -0,0 +1,10 @@ +use std::mem::{size_of, size_of_val}; + +fn main() { + assert_eq!(size_of::>(), 8); + assert_eq!(size_of_val(&()), 0); + assert_eq!(size_of_val(&42), 4); + assert_eq!(size_of_val(&[] as &[i32]), 0); + assert_eq!(size_of_val(&[1, 2, 3] as &[i32]), 12); + assert_eq!(size_of_val("foobar"), 6); +} diff --git a/src/tools/miri/tests/run-pass/ints.rs b/src/tools/miri/tests/run-pass/ints.rs new file mode 100644 index 0000000000..4f23b5ec9c --- /dev/null +++ b/src/tools/miri/tests/run-pass/ints.rs @@ -0,0 +1,58 @@ +fn ret() -> i64 { + 1 +} + +fn neg() -> i64 { + -1 +} + +fn add() -> i64 { + 1 + 2 +} + +fn indirect_add() -> i64 { + let x = 1; + let y = 2; + x + y +} + +fn arith() -> i32 { + 3*3 + 4*4 +} + +fn match_int() -> i16 { + let n = 2; + match n { + 0 => 0, + 1 => 10, + 2 => 20, + 3 => 30, + _ => 100, + } +} + +fn match_int_range() -> i64 { + let n = 42; + match n { + 0...9 => 0, + 10...19 => 1, + 20...29 => 2, + 30...39 => 3, + 40...49 => 4, + _ => 5, + } +} + +fn main() { + assert_eq!(ret(), 1); + assert_eq!(neg(), -1); + assert_eq!(add(), 3); + assert_eq!(indirect_add(), 3); + assert_eq!(arith(), 5*5); + assert_eq!(match_int(), 20); + assert_eq!(match_int_range(), 4); + assert_eq!(i64::min_value().overflowing_mul(-1), (i64::min_value(), true)); + assert_eq!(i32::min_value().overflowing_mul(-1), (i32::min_value(), true)); + assert_eq!(i16::min_value().overflowing_mul(-1), (i16::min_value(), true)); + assert_eq!(i8::min_value().overflowing_mul(-1), (i8::min_value(), true)); +} diff --git a/src/tools/miri/tests/run-pass/issue-15063.rs b/src/tools/miri/tests/run-pass/issue-15063.rs new file mode 100644 index 0000000000..726aee283e --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-15063.rs @@ -0,0 +1,20 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +enum Two { A, B } +impl Drop for Two { + fn drop(&mut self) { + } +} +fn main() { + let _k = Two::A; +} diff --git a/src/tools/miri/tests/run-pass/issue-15523-big.rs b/src/tools/miri/tests/run-pass/issue-15523-big.rs new file mode 100644 index 0000000000..33c81cab38 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-15523-big.rs @@ -0,0 +1,48 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Issue 15523: derive(PartialOrd) should use the provided +// discriminant values for the derived ordering. +// +// This test is checking corner cases that arise when you have +// 64-bit values in the variants. + +#[derive(PartialEq, PartialOrd)] +#[repr(u64)] +enum Eu64 { + Pos2 = 2, + PosMax = !0, + Pos1 = 1, +} + +#[derive(PartialEq, PartialOrd)] +#[repr(i64)] +enum Ei64 { + Pos2 = 2, + Neg1 = -1, + NegMin = 1 << 63, + PosMax = !(1 << 63), + Pos1 = 1, +} + +fn main() { + assert!(Eu64::Pos2 > Eu64::Pos1); + assert!(Eu64::Pos2 < Eu64::PosMax); + assert!(Eu64::Pos1 < Eu64::PosMax); + + + assert!(Ei64::Pos2 > Ei64::Pos1); + assert!(Ei64::Pos2 > Ei64::Neg1); + assert!(Ei64::Pos1 > Ei64::Neg1); + assert!(Ei64::Pos2 > Ei64::NegMin); + assert!(Ei64::Pos1 > Ei64::NegMin); + assert!(Ei64::Pos2 < Ei64::PosMax); + assert!(Ei64::Pos1 < Ei64::PosMax); +} diff --git a/src/tools/miri/tests/run-pass/issue-17877.rs b/src/tools/miri/tests/run-pass/issue-17877.rs new file mode 100644 index 0000000000..b4b74b9905 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-17877.rs @@ -0,0 +1,25 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-msvc + +#![feature(slice_patterns)] + +fn main() { + assert_eq!(match [0u8; 1024] { + _ => 42_usize, + }, 42_usize); + + assert_eq!(match [0u8; 1024] { + [1, _..] => 0_usize, + [0, _..] => 1_usize, + _ => 2_usize + }, 1_usize); +} diff --git a/src/tools/miri/tests/run-pass/issue-20575.rs b/src/tools/miri/tests/run-pass/issue-20575.rs new file mode 100644 index 0000000000..7db7e3b28e --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-20575.rs @@ -0,0 +1,19 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Test that overloaded calls work with zero arity closures + +// pretty-expanded FIXME #23616 + +fn main() { + let functions: [Box Option<()>>; 1] = [Box::new(|| None)]; + + let _: Option> = functions.iter().map(|f| (*f)()).collect(); +} diff --git a/src/tools/miri/tests/run-pass/issue-23261.rs b/src/tools/miri/tests/run-pass/issue-23261.rs new file mode 100644 index 0000000000..fc806f5429 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-23261.rs @@ -0,0 +1,70 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Matching on a DST struct should not trigger an LLVM assertion. + +struct Foo { + a: i32, + inner: T +} + +trait Get { + fn get(&self) -> i32; +} + +impl Get for i32 { + fn get(&self) -> i32 { + *self + } +} + +fn check_val(val: &Foo<[u8]>) { + match *val { + Foo { a, .. } => { + assert_eq!(a, 32); + } + } +} + +fn check_dst_val(val: &Foo<[u8]>) { + match *val { + Foo { ref inner, .. } => { + assert_eq!(inner, [1, 2, 3]); + } + } +} + +fn check_both(val: &Foo<[u8]>) { + match *val { + Foo { a, ref inner } => { + assert_eq!(a, 32); + assert_eq!(inner, [1, 2, 3]); + } + } +} + +fn check_trait_obj(val: &Foo) { + match *val { + Foo { a, ref inner } => { + assert_eq!(a, 32); + assert_eq!(inner.get(), 32); + } + } +} + +fn main() { + let foo: &Foo<[u8]> = &Foo { a: 32, inner: [1, 2, 3] }; + check_val(foo); + check_dst_val(foo); + check_both(foo); + + let foo: &Foo = &Foo { a: 32, inner: 32 }; + check_trait_obj(foo); +} diff --git a/src/tools/miri/tests/run-pass/issue-26709.rs b/src/tools/miri/tests/run-pass/issue-26709.rs new file mode 100644 index 0000000000..62626d7586 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-26709.rs @@ -0,0 +1,26 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +struct Wrapper<'a, T: ?Sized>(&'a mut i32, T); + +impl<'a, T: ?Sized> Drop for Wrapper<'a, T> { + fn drop(&mut self) { + *self.0 = 432; + } +} + +fn main() { + let mut x = 0; + { + let wrapper = Box::new(Wrapper(&mut x, 123)); + let _: Box> = wrapper; + } + assert_eq!(432, x) +} diff --git a/src/tools/miri/tests/run-pass/issue-27901.rs b/src/tools/miri/tests/run-pass/issue-27901.rs new file mode 100644 index 0000000000..b7a9daaf8a --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-27901.rs @@ -0,0 +1,20 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +trait Stream { type Item; } +impl<'a> Stream for &'a str { type Item = u8; } +fn f<'s>(s: &'s str) -> (&'s str, <&'s str as Stream>::Item) { + (s, 42) +} + +fn main() { + let fx = f as for<'t> fn(&'t str) -> (&'t str, <&'t str as Stream>::Item); + assert_eq!(fx("hi"), ("hi", 42)); +} diff --git a/src/tools/miri/tests/run-pass/issue-29746.rs b/src/tools/miri/tests/run-pass/issue-29746.rs new file mode 100644 index 0000000000..61c601ac6a --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-29746.rs @@ -0,0 +1,45 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// zip!(a1,a2,a3,a4) is equivalent to: +// a1.zip(a2).zip(a3).zip(a4).map(|(((x1,x2),x3),x4)| (x1,x2,x3,x4)) +macro_rules! zip { + // Entry point + ([$a:expr, $b:expr, $($rest:expr),*]) => { + zip!([$($rest),*], $a.zip($b), (x,y), [x,y]) + }; + + // Intermediate steps to build the zipped expression, the match pattern, and + // and the output tuple of the closure, using macro hygene to repeatedly + // introduce new variables named 'x'. + ([$a:expr, $($rest:expr),*], $zip:expr, $pat:pat, [$($flat:expr),*]) => { + zip!([$($rest),*], $zip.zip($a), ($pat,x), [$($flat),*, x]) + }; + + // Final step + ([], $zip:expr, $pat:pat, [$($flat:expr),+]) => { + $zip.map(|$pat| ($($flat),+)) + }; + + // Comma + ([$a:expr], $zip:expr, $pat:pat, [$($flat:expr),*]) => { + zip!([$a,], $zip, $pat, [$($flat),*]) + }; +} + +fn main() { + let p1 = vec![1i32, 2].into_iter(); + let p2 = vec!["10", "20"].into_iter(); + let p3 = vec![100u16, 200].into_iter(); + let p4 = vec![1000i64, 2000].into_iter(); + + let e = zip!([p1,p2,p3,p4]).collect::>(); + assert_eq!(e[0], (1i32,"10",100u16,1000i64)); +} diff --git a/src/tools/miri/tests/run-pass/issue-30530.rs b/src/tools/miri/tests/run-pass/issue-30530.rs new file mode 100644 index 0000000000..d5139c908b --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-30530.rs @@ -0,0 +1,35 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Regression test for Issue #30530: alloca's created for storing +// intermediate scratch values during brace-less match arms need to be +// initialized with their drop-flag set to "dropped" (or else we end +// up running the destructors on garbage data at the end of the +// function). + +pub enum Handler { + Default, + #[allow(dead_code)] + Custom(*mut Box), +} + +fn main() { + take(Handler::Default, Box::new(main)); +} + +#[inline(never)] +pub fn take(h: Handler, f: Box) -> Box { + unsafe { + match h { + Handler::Custom(ptr) => *Box::from_raw(ptr), + Handler::Default => f, + } + } +} diff --git a/src/tools/miri/tests/run-pass/issue-31267-additional.rs b/src/tools/miri/tests/run-pass/issue-31267-additional.rs new file mode 100644 index 0000000000..14e38f43c5 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-31267-additional.rs @@ -0,0 +1,29 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(unused_variables)] + +#[derive(Clone, Copy, Debug)] +struct Bar; + +const BAZ: Bar = Bar; + +#[derive(Debug)] +struct Foo([Bar; 1]); + +struct Biz; + +impl Biz { + const BAZ: Foo = Foo([BAZ; 1]); +} + +fn main() { + let foo = Biz::BAZ; +} diff --git a/src/tools/miri/tests/run-pass/issue-33387.rs b/src/tools/miri/tests/run-pass/issue-33387.rs new file mode 100644 index 0000000000..edbf2b81ce --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-33387.rs @@ -0,0 +1,19 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::Arc; + +trait Foo {} + +impl Foo for [u8; 2] {} + +fn main() { + let _: Arc = Arc::new([3, 4]); +} diff --git a/src/tools/miri/tests/run-pass/issue-34571.rs b/src/tools/miri/tests/run-pass/issue-34571.rs new file mode 100644 index 0000000000..7d80415657 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-34571.rs @@ -0,0 +1,20 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[repr(u8)] +enum Foo { + Foo(u8), +} + +fn main() { + match Foo::Foo(1) { + _ => () + } +} diff --git a/src/tools/miri/tests/run-pass/issue-35815.rs b/src/tools/miri/tests/run-pass/issue-35815.rs new file mode 100644 index 0000000000..216e06c073 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-35815.rs @@ -0,0 +1,25 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +use std::mem; + +struct Foo { + a: i64, + b: bool, + c: T, +} + +fn main() { + let foo: &Foo = &Foo { a: 1, b: false, c: 2i32 }; + let foo_unsized: &Foo = foo; + assert_eq!(mem::size_of_val(foo), mem::size_of_val(foo_unsized)); +} diff --git a/src/tools/miri/tests/run-pass/issue-36278-prefix-nesting.rs b/src/tools/miri/tests/run-pass/issue-36278-prefix-nesting.rs new file mode 100644 index 0000000000..95269d0569 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-36278-prefix-nesting.rs @@ -0,0 +1,28 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Issue 36278: On an unsized struct with >1 level of nontrivial +// nesting, ensure we are computing dynamic size of prefix correctly. + +use std::mem; + +const SZ: usize = 100; +struct P([u8; SZ], T); + +type Ack = P>; + +fn main() { + let size_of_sized; let size_of_unsized; + let x: Box> = Box::new(P([0; SZ], P([0; SZ], [0; 0]))); + size_of_sized = mem::size_of_val::>(&x); + let y: Box> = x; + size_of_unsized = mem::size_of_val::>(&y); + assert_eq!(size_of_sized, size_of_unsized); +} diff --git a/src/tools/miri/tests/run-pass/issue-5917.rs b/src/tools/miri/tests/run-pass/issue-5917.rs new file mode 100644 index 0000000000..69b95f2cd7 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-5917.rs @@ -0,0 +1,17 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +struct T (&'static [isize]); +static STATIC : T = T (&[5, 4, 3]); +pub fn main () { + let T(ref v) = STATIC; + assert_eq!(v[0], 5); +} diff --git a/src/tools/miri/tests/run-pass/issue-miri-184.rs b/src/tools/miri/tests/run-pass/issue-miri-184.rs new file mode 100644 index 0000000000..24775fe8a2 --- /dev/null +++ b/src/tools/miri/tests/run-pass/issue-miri-184.rs @@ -0,0 +1,4 @@ +pub fn main() { + let bytes: [u8; 8] = unsafe { ::std::mem::transmute(0u64) }; + let _: &[u8] = &bytes; +} diff --git a/src/tools/miri/tests/run-pass/iter_slice.rs b/src/tools/miri/tests/run-pass/iter_slice.rs new file mode 100644 index 0000000000..fd7229c345 --- /dev/null +++ b/src/tools/miri/tests/run-pass/iter_slice.rs @@ -0,0 +1,12 @@ +fn main() { + for _ in Vec::::new().iter() { // this iterates over a Unique::empty() + panic!("We should never be here."); + } + + // Iterate over a ZST (uses arith_offset internally) + let mut count = 0; + for _ in &[(), (), ()] { + count += 1; + } + assert_eq!(count, 3); +} diff --git a/src/tools/miri/tests/run-pass/last-use-in-cap-clause.rs b/src/tools/miri/tests/run-pass/last-use-in-cap-clause.rs new file mode 100644 index 0000000000..de2d815ca5 --- /dev/null +++ b/src/tools/miri/tests/run-pass/last-use-in-cap-clause.rs @@ -0,0 +1,25 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Make sure #1399 stays fixed + +#[allow(dead_code)] +struct A { a: Box } + +fn foo() -> Box isize + 'static> { + let k: Box<_> = Box::new(22); + let _u = A {a: k.clone()}; + let result = || 22; + Box::new(result) +} + +pub fn main() { + assert_eq!(foo()(), 22); +} diff --git a/src/tools/miri/tests/run-pass/loops.rs b/src/tools/miri/tests/run-pass/loops.rs new file mode 100644 index 0000000000..222287cbe0 --- /dev/null +++ b/src/tools/miri/tests/run-pass/loops.rs @@ -0,0 +1,35 @@ +fn factorial_loop() -> i64 { + let mut product = 1; + let mut i = 1; + + while i <= 10 { + product *= i; + i += 1; + } + + product +} + +fn index_for_loop() -> usize { + let mut sum = 0; + let a = [0, 10, 20, 30]; + for i in 0..a.len() { + sum += a[i]; + } + sum +} + +fn for_loop() -> usize { + let mut sum = 0; + let a = [0, 10, 20, 30]; + for &n in &a { + sum += n; + } + sum +} + +fn main() { + assert_eq!(factorial_loop(), 3628800); + assert_eq!(index_for_loop(), 60); + assert_eq!(for_loop(), 60); +} diff --git a/src/tools/miri/tests/run-pass/main_fn.rs b/src/tools/miri/tests/run-pass/main_fn.rs new file mode 100644 index 0000000000..91d183ee6a --- /dev/null +++ b/src/tools/miri/tests/run-pass/main_fn.rs @@ -0,0 +1,5 @@ +#![feature(main)] + +#[main] +fn foo() { +} diff --git a/src/tools/miri/tests/run-pass/many_shr_bor.rs b/src/tools/miri/tests/run-pass/many_shr_bor.rs new file mode 100644 index 0000000000..393bafebfe --- /dev/null +++ b/src/tools/miri/tests/run-pass/many_shr_bor.rs @@ -0,0 +1,36 @@ +// Make sure validation can handle many overlapping shared borrows for different parts of a data structure +#![allow(unused_variables)] +use std::cell::RefCell; + +struct Test { + a: u32, + b: u32, +} + +fn test1() { + let t = &mut Test { a: 0, b: 0 }; + { + let x; + { + let y = &t.a; + x = &t; + let _y = *y; + } + let _x = x.a; + } + t.b = 42; +} + +fn test2(r: &mut RefCell) { + let x = &*r; // releasing write lock, first suspension recorded + let mut x_ref = x.borrow_mut(); + let x_inner : &mut i32 = &mut *x_ref; // new inner write lock, with same lifetime as outer lock + let x_inner_shr = &*x_inner; // releasing inner write lock, recording suspension + let y = &*r; // second suspension for the outer write lock + let x_inner_shr2 = &*x_inner; // 2nd suspension for inner write lock +} + +fn main() { + test1(); + test2(&mut RefCell::new(0)); +} diff --git a/src/tools/miri/tests/run-pass/match_slice.rs b/src/tools/miri/tests/run-pass/match_slice.rs new file mode 100644 index 0000000000..568a1a1c88 --- /dev/null +++ b/src/tools/miri/tests/run-pass/match_slice.rs @@ -0,0 +1,8 @@ +fn main() { + let x = "hello"; + match x { + "foo" => {}, + "bar" => {}, + _ => {}, + } +} diff --git a/src/tools/miri/tests/run-pass/mir_coercions.rs b/src/tools/miri/tests/run-pass/mir_coercions.rs new file mode 100644 index 0000000000..36155297e3 --- /dev/null +++ b/src/tools/miri/tests/run-pass/mir_coercions.rs @@ -0,0 +1,80 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(coerce_unsized, unsize)] + +use std::ops::CoerceUnsized; +use std::marker::Unsize; + +fn identity_coercion(x: &(Fn(u32)->u32 + Send)) -> &Fn(u32)->u32 { + x +} +fn fn_coercions(f: &fn(u32) -> u32) -> + (unsafe fn(u32) -> u32, + &(Fn(u32) -> u32+Send)) +{ + (*f, f) +} + +fn simple_array_coercion(x: &[u8; 3]) -> &[u8] { x } + +fn square(a: u32) -> u32 { a * a } + +#[derive(PartialEq,Eq)] +struct PtrWrapper<'a, T: 'a+?Sized>(u32, u32, (), &'a T); +impl<'a, T: ?Sized+Unsize, U: ?Sized> + CoerceUnsized> for PtrWrapper<'a, T> {} + +struct TrivPtrWrapper<'a, T: 'a+?Sized>(&'a T); +impl<'a, T: ?Sized+Unsize, U: ?Sized> + CoerceUnsized> for TrivPtrWrapper<'a, T> {} + +fn coerce_ptr_wrapper(p: PtrWrapper<[u8; 3]>) -> PtrWrapper<[u8]> { + p +} + +fn coerce_triv_ptr_wrapper(p: TrivPtrWrapper<[u8; 3]>) -> TrivPtrWrapper<[u8]> { + p +} + +fn coerce_fat_ptr_wrapper(p: PtrWrapper u32+Send>) + -> PtrWrapper u32> { + p +} + +fn coerce_ptr_wrapper_poly<'a, T, Trait: ?Sized>(p: PtrWrapper<'a, T>) + -> PtrWrapper<'a, Trait> + where PtrWrapper<'a, T>: CoerceUnsized> +{ + p +} + +fn main() { + let a = [0,1,2]; + let square_local : fn(u32) -> u32 = square; + let (f,g) = fn_coercions(&square_local); + assert_eq!(f as *const (), square as *const()); + assert_eq!(g(4), 16); + assert_eq!(identity_coercion(g)(5), 25); + + assert_eq!(simple_array_coercion(&a), &a); + let w = coerce_ptr_wrapper(PtrWrapper(2,3,(),&a)); + assert!(w == PtrWrapper(2,3,(),&a) as PtrWrapper<[u8]>); + + let w = coerce_triv_ptr_wrapper(TrivPtrWrapper(&a)); + assert_eq!(&w.0, &a); + + let z = coerce_fat_ptr_wrapper(PtrWrapper(2,3,(),&square_local)); + assert_eq!((z.3)(6), 36); + + let z: PtrWrapper u32> = + coerce_ptr_wrapper_poly(PtrWrapper(2,3,(),&square_local)); + assert_eq!((z.3)(6), 36); +} diff --git a/src/tools/miri/tests/run-pass/mir_fat_ptr.rs b/src/tools/miri/tests/run-pass/mir_fat_ptr.rs new file mode 100644 index 0000000000..e5c9e3577d --- /dev/null +++ b/src/tools/miri/tests/run-pass/mir_fat_ptr.rs @@ -0,0 +1,61 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// test that ordinary fat pointer operations work. + +struct Wrapper(u32, T); + +struct FatPtrContainer<'a> { + ptr: &'a [u8] +} + +fn fat_ptr_project(a: &Wrapper<[u8]>) -> &[u8] { + &a.1 +} + +fn fat_ptr_simple(a: &[u8]) -> &[u8] { + a +} + +fn fat_ptr_via_local(a: &[u8]) -> &[u8] { + let x = a; + x +} + +fn fat_ptr_from_struct(s: FatPtrContainer) -> &[u8] { + s.ptr +} + +fn fat_ptr_to_struct(a: &[u8]) -> FatPtrContainer { + FatPtrContainer { ptr: a } +} + +fn fat_ptr_store_to<'a>(a: &'a [u8], b: &mut &'a [u8]) { + *b = a; +} + +fn fat_ptr_constant() -> &'static str { + "HELLO" +} + +fn main() { + let a = Wrapper(4, [7,6,5]); + + let p = fat_ptr_project(&a); + let p = fat_ptr_simple(p); + let p = fat_ptr_via_local(p); + let p = fat_ptr_from_struct(fat_ptr_to_struct(p)); + + let mut target : &[u8] = &[42]; + fat_ptr_store_to(p, &mut target); + assert_eq!(target, &a.1); + + assert_eq!(fat_ptr_constant(), "HELLO"); +} diff --git a/src/tools/miri/tests/run-pass/miri-issue-133.rs b/src/tools/miri/tests/run-pass/miri-issue-133.rs new file mode 100644 index 0000000000..406b5e102c --- /dev/null +++ b/src/tools/miri/tests/run-pass/miri-issue-133.rs @@ -0,0 +1,30 @@ +use std::mem::size_of; + +struct S { + _u: U, + size_of_u: usize, + _v: V, + size_of_v: usize +} + +impl S { + fn new(u: U, v: V) -> Self { + S { + _u: u, + size_of_u: size_of::(), + _v: v, + size_of_v: size_of::() + } + } +} + +impl Drop for S { + fn drop(&mut self) { + assert_eq!(size_of::(), self.size_of_u); + assert_eq!(size_of::(), self.size_of_v); + } +} + +fn main() { + S::new(0u8, 1u16); +} diff --git a/src/tools/miri/tests/run-pass/move-arg-3-unique.rs b/src/tools/miri/tests/run-pass/move-arg-3-unique.rs new file mode 100644 index 0000000000..2e6320eb80 --- /dev/null +++ b/src/tools/miri/tests/run-pass/move-arg-3-unique.rs @@ -0,0 +1,18 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(unused_features, unused_variables)] +#![feature(box_syntax)] + +pub fn main() { + let x = box 10; + let y = x; + assert_eq!(*y, 10); +} diff --git a/src/tools/miri/tests/run-pass/move-undef-primval.rs b/src/tools/miri/tests/run-pass/move-undef-primval.rs new file mode 100644 index 0000000000..2c18c2d368 --- /dev/null +++ b/src/tools/miri/tests/run-pass/move-undef-primval.rs @@ -0,0 +1,15 @@ +// Moving around undef is not allowed by validation +// compile-flags: -Zmir-emit-validate=0 + +struct Foo { + _inner: i32, +} + +fn main() { + unsafe { + let foo = Foo { + _inner: std::mem::uninitialized(), + }; + let _bar = foo; + } +} diff --git a/src/tools/miri/tests/run-pass/multi_arg_closure.rs b/src/tools/miri/tests/run-pass/multi_arg_closure.rs new file mode 100644 index 0000000000..30cfb5b685 --- /dev/null +++ b/src/tools/miri/tests/run-pass/multi_arg_closure.rs @@ -0,0 +1,8 @@ +fn foo(f: &mut FnMut(isize, isize) -> isize) -> isize { + f(1, 2) +} + +fn main() { + let z = foo(&mut |x, y| x * 10 + y); + assert_eq!(z, 12); +} diff --git a/src/tools/miri/tests/run-pass/negative_discriminant.rs b/src/tools/miri/tests/run-pass/negative_discriminant.rs new file mode 100644 index 0000000000..16f175e7df --- /dev/null +++ b/src/tools/miri/tests/run-pass/negative_discriminant.rs @@ -0,0 +1,13 @@ +enum AB { A = -1, B = 1 } + +fn main() { + match AB::A { + AB::A => (), + AB::B => panic!(), + } + + match AB::B { + AB::A => panic!(), + AB::B => (), + } +} diff --git a/src/tools/miri/tests/run-pass/non_capture_closure_to_fn_ptr.rs b/src/tools/miri/tests/run-pass/non_capture_closure_to_fn_ptr.rs new file mode 100644 index 0000000000..c9daff9c9f --- /dev/null +++ b/src/tools/miri/tests/run-pass/non_capture_closure_to_fn_ptr.rs @@ -0,0 +1,14 @@ +// allow(const_err) to work around a bug in warnings +#[allow(const_err)] +static FOO: fn() = || { assert_ne!(42, 43) }; +#[allow(const_err)] +static BAR: fn(i32, i32) = |a, b| { assert_ne!(a, b) }; + +fn main() { + FOO(); + BAR(44, 45); + let bar: unsafe fn(i32, i32) = BAR; + unsafe { bar(46, 47) }; + let boo: &Fn(i32, i32) = &BAR; + boo(48, 49); +} diff --git a/src/tools/miri/tests/run-pass/observed_local_mut.rs b/src/tools/miri/tests/run-pass/observed_local_mut.rs new file mode 100644 index 0000000000..a4ecf1e635 --- /dev/null +++ b/src/tools/miri/tests/run-pass/observed_local_mut.rs @@ -0,0 +1,21 @@ +// This test is intended to guard against the problem described in commit +// 39bb1254d1eaf74f45a4e741097e33fc942168d5. +// +// As written, it might be considered UB in compiled Rust, but of course Miri gives it a safe, +// deterministic behaviour (one that might not correspond with how an eventual Rust spec would +// defined this). +// +// An alternative way to write the test without `unsafe` would be to use `Cell`, but it would +// only surface the bug described by the above commit if `Cell` on the stack got represented +// as a primitive `PrimVal::I32` which is not yet the case. + +fn main() { + let mut x = 0; + let y: *const i32 = &x; + x = 1; + + // When the described bug is in place, this results in `0`, not observing the `x = 1` line. + assert_eq!(unsafe { *y }, 1); + + assert_eq!(x, 1); +} diff --git a/src/tools/miri/tests/run-pass/option_box_transmute_ptr.rs b/src/tools/miri/tests/run-pass/option_box_transmute_ptr.rs new file mode 100644 index 0000000000..0786db1ef8 --- /dev/null +++ b/src/tools/miri/tests/run-pass/option_box_transmute_ptr.rs @@ -0,0 +1,15 @@ +// This tests that the size of Option> is the same as *const i32. +fn option_box_deref() -> i32 { + let val = Some(Box::new(42)); + unsafe { + let ptr: *const i32 = std::mem::transmute::>, *const i32>(val); + let ret = *ptr; + // unleak memory + std::mem::transmute::<*const i32, Option>>(ptr); + ret + } +} + +fn main() { + assert_eq!(option_box_deref(), 42); +} diff --git a/src/tools/miri/tests/run-pass/option_eq.rs b/src/tools/miri/tests/run-pass/option_eq.rs new file mode 100644 index 0000000000..e698f87677 --- /dev/null +++ b/src/tools/miri/tests/run-pass/option_eq.rs @@ -0,0 +1,3 @@ +fn main() { + assert_eq!(std::char::from_u32('x' as u32), Some('x')); +} diff --git a/src/tools/miri/tests/run-pass/overloaded-calls-simple.rs b/src/tools/miri/tests/run-pass/overloaded-calls-simple.rs new file mode 100644 index 0000000000..1eeda12ca0 --- /dev/null +++ b/src/tools/miri/tests/run-pass/overloaded-calls-simple.rs @@ -0,0 +1,33 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +#![feature(lang_items, unboxed_closures, fn_traits)] + +struct S3 { + x: i32, + y: i32, +} + +impl FnOnce<(i32,i32)> for S3 { + type Output = i32; + extern "rust-call" fn call_once(self, (z,zz): (i32,i32)) -> i32 { + self.x * self.y * z * zz + } +} + +fn main() { + let s = S3 { + x: 3, + y: 3, + }; + let ans = s(3, 1); + assert_eq!(ans, 27); +} diff --git a/src/tools/miri/tests/run-pass/packed_static.rs b/src/tools/miri/tests/run-pass/packed_static.rs new file mode 100644 index 0000000000..1fa3a36967 --- /dev/null +++ b/src/tools/miri/tests/run-pass/packed_static.rs @@ -0,0 +1,10 @@ +#[repr(packed)] +struct Foo { + i: i32 +} + +fn main() { + assert_eq!({FOO.i}, 42); +} + +static FOO: Foo = Foo { i: 42 }; diff --git a/src/tools/miri/tests/run-pass/packed_struct.rs b/src/tools/miri/tests/run-pass/packed_struct.rs new file mode 100644 index 0000000000..e0387a5f40 --- /dev/null +++ b/src/tools/miri/tests/run-pass/packed_struct.rs @@ -0,0 +1,69 @@ +// FIXME: We have to disable this, force_allocation fails. +// TODO: I think this can be triggered even without validation. +// compile-flags: -Zmir-emit-validate=0 +#![allow(dead_code)] +#![feature(unsize, coerce_unsized)] + +#[repr(packed)] +struct S { + a: i32, + b: i64, +} + +#[repr(packed)] +struct Test1<'a> { + x: u8, + other: &'a u32, +} + +#[repr(packed)] +struct Test2<'a> { + x: u8, + other: &'a Test1<'a>, +} + +fn test(t: Test2) { + let x = *t.other.other; + assert_eq!(x, 42); +} + +fn test_unsizing() { + #[repr(packed)] + struct UnalignedPtr<'a, T: ?Sized> + where T: 'a, + { + data: &'a T, + } + + impl<'a, T, U> std::ops::CoerceUnsized> for UnalignedPtr<'a, T> + where + T: std::marker::Unsize + ?Sized, + U: ?Sized, + { } + + let arr = [1, 2, 3]; + let arr_unaligned: UnalignedPtr<[i32; 3]> = UnalignedPtr { data: &arr }; + let arr_unaligned: UnalignedPtr<[i32]> = arr_unaligned; + let _unused = &arr_unaligned; // forcing an allocation, which could also yield "unaligned write"-errors +} + +fn main() { + let mut x = S { + a: 42, + b: 99, + }; + let a = x.a; + let b = x.b; + assert_eq!(a, 42); + assert_eq!(b, 99); + // can't do `assert_eq!(x.a, 42)`, because `assert_eq!` takes a reference + assert_eq!({x.a}, 42); + assert_eq!({x.b}, 99); + + x.b = 77; + assert_eq!({x.b}, 77); + + test(Test2 { x: 0, other: &Test1 { x: 0, other: &42 }}); + + test_unsizing(); +} diff --git a/src/tools/miri/tests/run-pass/pointers.rs b/src/tools/miri/tests/run-pass/pointers.rs new file mode 100644 index 0000000000..f3ae3ab913 --- /dev/null +++ b/src/tools/miri/tests/run-pass/pointers.rs @@ -0,0 +1,60 @@ +fn one_line_ref() -> i16 { + *&1 +} + +fn basic_ref() -> i16 { + let x = &1; + *x +} + +fn basic_ref_mut() -> i16 { + let x = &mut 1; + *x += 2; + *x +} + +fn basic_ref_mut_var() -> i16 { + let mut a = 1; + { + let x = &mut a; + *x += 2; + } + a +} + +fn tuple_ref_mut() -> (i8, i8) { + let mut t = (10, 20); + { + let x = &mut t.1; + *x += 2; + } + t +} + +fn match_ref_mut() -> i8 { + let mut t = (20, 22); + { + let opt = Some(&mut t); + match opt { + Some(&mut (ref mut x, ref mut y)) => *x += *y, + None => {}, + } + } + t.0 +} + +fn dangling_pointer() -> *const i32 { + let b = Box::new(42); + &*b as *const i32 +} + +fn main() { + assert_eq!(one_line_ref(), 1); + assert_eq!(basic_ref(), 1); + assert_eq!(basic_ref_mut(), 3); + assert_eq!(basic_ref_mut_var(), 3); + assert_eq!(tuple_ref_mut(), (10, 22)); + assert_eq!(match_ref_mut(), 42); + // FIXME: improve this test... how? + assert!(dangling_pointer() != std::ptr::null()); +} diff --git a/src/tools/miri/tests/run-pass/products.rs b/src/tools/miri/tests/run-pass/products.rs new file mode 100644 index 0000000000..86bb71a0be --- /dev/null +++ b/src/tools/miri/tests/run-pass/products.rs @@ -0,0 +1,32 @@ +fn tuple() -> (i16,) { + (1,) +} + +fn tuple_2() -> (i16, i16) { + (1, 2) +} + +fn tuple_5() -> (i16, i16, i16, i16, i16) { + (1, 2, 3, 4, 5) +} + +#[derive(Debug, PartialEq)] +struct Pair { x: i8, y: i8 } + +fn pair() -> Pair { + Pair { x: 10, y: 20 } +} + +fn field_access() -> (i8, i8) { + let mut p = Pair { x: 10, y: 20 }; + p.x += 5; + (p.x, p.y) +} + +fn main() { + assert_eq!(tuple(), (1,)); + assert_eq!(tuple_2(), (1, 2)); + assert_eq!(tuple_5(), (1, 2, 3, 4, 5)); + assert_eq!(pair(), Pair { x: 10, y: 20} ); + assert_eq!(field_access(), (15, 20)); +} diff --git a/src/tools/miri/tests/run-pass/ptr_arith_offset.rs b/src/tools/miri/tests/run-pass/ptr_arith_offset.rs new file mode 100644 index 0000000000..7912da9fd4 --- /dev/null +++ b/src/tools/miri/tests/run-pass/ptr_arith_offset.rs @@ -0,0 +1,6 @@ +fn main() { + let v = [1i16, 2]; + let x = &v as *const i16; + let x = x.wrapping_offset(1); + assert_eq!(unsafe { *x }, 2); +} diff --git a/src/tools/miri/tests/run-pass/ptr_arith_offset_overflow.rs b/src/tools/miri/tests/run-pass/ptr_arith_offset_overflow.rs new file mode 100644 index 0000000000..3383c3b801 --- /dev/null +++ b/src/tools/miri/tests/run-pass/ptr_arith_offset_overflow.rs @@ -0,0 +1,9 @@ +fn main() { + let v = [1i16, 2]; + let x = &v[1] as *const i16; + // Adding 2*isize::max and then 1 is like substracting 1 + let x = x.wrapping_offset(isize::max_value()); + let x = x.wrapping_offset(isize::max_value()); + let x = x.wrapping_offset(1); + assert_eq!(unsafe { *x }, 1); +} diff --git a/src/tools/miri/tests/run-pass/ptr_int_casts.rs b/src/tools/miri/tests/run-pass/ptr_int_casts.rs new file mode 100644 index 0000000000..b1b0626305 --- /dev/null +++ b/src/tools/miri/tests/run-pass/ptr_int_casts.rs @@ -0,0 +1,35 @@ +use std::mem; + +fn eq_ref(x: &T, y: &T) -> bool { + x as *const _ == y as *const _ +} + +fn f() -> i32 { 42 } + +fn main() { + // int-ptr-int + assert_eq!(1 as *const i32 as usize, 1); + assert_eq!((1 as *const i32).wrapping_offset(4) as usize, 1 + 4*4); + + { // ptr-int-ptr + let x = 13; + let mut y = &x as &_ as *const _ as usize; + y += 13; + y -= 13; + let y = y as *const _; + assert!(eq_ref(&x, unsafe { &*y })); + } + + { // fnptr-int-fnptr + let x : fn() -> i32 = f; + let y : *mut u8 = unsafe { mem::transmute(x as fn() -> i32) }; + let mut y = y as usize; + y += 13; + y -= 13; + let x : fn() -> i32 = unsafe { mem::transmute(y as *mut u8) }; + assert_eq!(x(), 42); + } + + // involving types other than usize + assert_eq!((-1i32) as usize as *const i32 as usize, (-1i32) as usize); +} diff --git a/src/tools/miri/tests/run-pass/ptr_offset.rs b/src/tools/miri/tests/run-pass/ptr_offset.rs new file mode 100644 index 0000000000..6add5212db --- /dev/null +++ b/src/tools/miri/tests/run-pass/ptr_offset.rs @@ -0,0 +1,6 @@ +fn main() { + let v = [1i16, 2]; + let x = &v as *const i16; + let x = unsafe { x.offset(1) }; + assert_eq!(unsafe { *x }, 2); +} diff --git a/src/tools/miri/tests/run-pass/rc.rs b/src/tools/miri/tests/run-pass/rc.rs new file mode 100644 index 0000000000..0bf7075031 --- /dev/null +++ b/src/tools/miri/tests/run-pass/rc.rs @@ -0,0 +1,39 @@ +use std::cell::RefCell; +use std::rc::Rc; + +fn rc_refcell() { + let r = Rc::new(RefCell::new(42)); + *r.borrow_mut() += 10; + let x = *r.borrow(); + assert_eq!(x, 52); +} + +fn rc_raw() { + let r = Rc::new(0); + let r2 = Rc::into_raw(r.clone()); + let r2 = unsafe { Rc::from_raw(r2) }; + assert!(Rc::ptr_eq(&r, &r2)); + drop(r); + assert!(Rc::try_unwrap(r2).is_ok()); +} + +// Make sure this Rc doesn't fall apart when touched +fn check_unique_rc(mut r: Rc) { + let r2 = r.clone(); + assert!(Rc::get_mut(&mut r).is_none()); + drop(r2); + assert!(Rc::get_mut(&mut r).is_some()); +} + +fn rc_from() { + check_unique_rc::<[_]>(Rc::from(&[1,2,3] as &[_])); + check_unique_rc::<[_]>(Rc::from(vec![1,2,3])); + check_unique_rc::<[_]>(Rc::from(Box::new([1,2,3]) as Box<[_]>)); + check_unique_rc::(Rc::from("Hello, World!")); +} + +fn main() { + rc_refcell(); + rc_raw(); + rc_from(); +} diff --git a/src/tools/miri/tests/run-pass/recursive_static.rs b/src/tools/miri/tests/run-pass/recursive_static.rs new file mode 100644 index 0000000000..d259ca6361 --- /dev/null +++ b/src/tools/miri/tests/run-pass/recursive_static.rs @@ -0,0 +1,12 @@ +// FIXME: Disable validation until we figure out how to handle recursive statics. +// compile-flags: -Zmir-emit-validate=0 + +struct S(&'static S); +static S1: S = S(&S2); +static S2: S = S(&S1); + +fn main() { + let p: *const S = S2.0; + let q: *const S = &S1; + assert_eq!(p, q); +} diff --git a/src/tools/miri/tests/run-pass/ref-invalid-ptr.rs b/src/tools/miri/tests/run-pass/ref-invalid-ptr.rs new file mode 100644 index 0000000000..ebbbb77748 --- /dev/null +++ b/src/tools/miri/tests/run-pass/ref-invalid-ptr.rs @@ -0,0 +1,7 @@ +fn main() { + let x = 2usize as *const u32; + let _y = unsafe { &*x as *const u32 }; + + let x = 0usize as *const u32; + let _y = unsafe { &*x as *const u32 }; +} diff --git a/src/tools/miri/tests/run-pass/regions-lifetime-nonfree-late-bound.rs b/src/tools/miri/tests/run-pass/regions-lifetime-nonfree-late-bound.rs new file mode 100644 index 0000000000..1aef95d8a3 --- /dev/null +++ b/src/tools/miri/tests/run-pass/regions-lifetime-nonfree-late-bound.rs @@ -0,0 +1,45 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// This is a regression test for the ICE from issue #10846. +// +// The original issue causing the ICE: the LUB-computations during +// type inference were encountering late-bound lifetimes, and +// asserting that such lifetimes should have already been substituted +// with a concrete lifetime. +// +// However, those encounters were occurring within the lexical scope +// of the binding for the late-bound lifetime; that is, the late-bound +// lifetimes were perfectly valid. The core problem was that the type +// folding code was over-zealously passing back all lifetimes when +// doing region-folding, when really all clients of the region-folding +// case only want to see FREE lifetime variables, not bound ones. + +// pretty-expanded FIXME #23616 + +#![allow(unused_features)] +#![feature(box_syntax)] + +pub fn main() { + fn explicit() { + fn test(_x: Option>) where F: FnMut(Box FnMut(&'a isize)>) {} + test(Some(box |_f: Box FnMut(&'a isize)>| {})); + } + + // The code below is shorthand for the code above (and more likely + // to represent what one encounters in practice). + fn implicit() { + fn test(_x: Option>) where F: FnMut(Box< FnMut(& isize)>) {} + test(Some(box |_f: Box< FnMut(& isize)>| {})); + } + + explicit(); + implicit(); +} diff --git a/src/tools/miri/tests/run-pass/rfc1623.rs b/src/tools/miri/tests/run-pass/rfc1623.rs new file mode 100644 index 0000000000..0ee523a5be --- /dev/null +++ b/src/tools/miri/tests/run-pass/rfc1623.rs @@ -0,0 +1,81 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +// very simple test for a 'static static with default lifetime +static STATIC_STR: &str = "&'static str"; +const CONST_STR: &str = "&'static str"; + +// this should be the same as without default: +static EXPLICIT_STATIC_STR: &'static str = "&'static str"; +const EXPLICIT_CONST_STR: &'static str = "&'static str"; + +// a function that elides to an unbound lifetime for both in- and output +fn id_u8_slice(arg: &[u8]) -> &[u8] { + arg +} + +// one with a function, argument elided +static STATIC_SIMPLE_FN: &fn(&[u8]) -> &[u8] = &(id_u8_slice as fn(&[u8]) -> &[u8]); +const CONST_SIMPLE_FN: &fn(&[u8]) -> &[u8] = &(id_u8_slice as fn(&[u8]) -> &[u8]); + +// this should be the same as without elision +static STATIC_NON_ELIDED_FN: &for<'a> fn(&'a [u8]) -> &'a [u8] = + &(id_u8_slice as for<'a> fn(&'a [u8]) -> &'a [u8]); +const CONST_NON_ELIDED_FN: &for<'a> fn(&'a [u8]) -> &'a [u8] = + &(id_u8_slice as for<'a> fn(&'a [u8]) -> &'a [u8]); + +// another function that elides, each to a different unbound lifetime +fn multi_args(_a: &u8, _b: &u8, _c: &u8) {} + +static STATIC_MULTI_FN: &fn(&u8, &u8, &u8) = &(multi_args as fn(&u8, &u8, &u8)); +const CONST_MULTI_FN: &fn(&u8, &u8, &u8) = &(multi_args as fn(&u8, &u8, &u8)); + +struct Foo<'a> { + bools: &'a [bool], +} + +static STATIC_FOO: Foo = Foo { bools: &[true, false] }; +const CONST_FOO: Foo = Foo { bools: &[true, false] }; + +type Bar<'a> = Foo<'a>; + +static STATIC_BAR: Bar = Bar { bools: &[true, false] }; +const CONST_BAR: Bar = Bar { bools: &[true, false] }; + +type Baz<'a> = fn(&'a [u8]) -> Option; + +fn baz(e: &[u8]) -> Option { + e.first().map(|x| *x) +} + +static STATIC_BAZ: &Baz = &(baz as Baz); +const CONST_BAZ: &Baz = &(baz as Baz); + +static BYTES: &[u8] = &[1, 2, 3]; + +fn main() { + // make sure that the lifetime is actually elided (and not defaulted) + let x = &[1u8, 2, 3]; + STATIC_SIMPLE_FN(x); + CONST_SIMPLE_FN(x); + + STATIC_BAZ(BYTES); // neees static lifetime + CONST_BAZ(BYTES); + + // make sure this works with different lifetimes + let a = &1; + { + let b = &2; + let c = &3; + CONST_MULTI_FN(a, b, c); + } +} diff --git a/src/tools/miri/tests/run-pass/rust-lang-org.rs b/src/tools/miri/tests/run-pass/rust-lang-org.rs new file mode 100644 index 0000000000..7ba68e6b23 --- /dev/null +++ b/src/tools/miri/tests/run-pass/rust-lang-org.rs @@ -0,0 +1,21 @@ +// This code is editable and runnable! +fn main() { + // A simple integer calculator: + // `+` or `-` means add or subtract by 1 + // `*` or `/` means multiply or divide by 2 + + let program = "+ + * - /"; + let mut accumulator = 0; + + for token in program.chars() { + match token { + '+' => accumulator += 1, + '-' => accumulator -= 1, + '*' => accumulator *= 2, + '/' => accumulator /= 2, + _ => { /* ignore everything else */ } + } + } + + assert_eq!(accumulator, 1); +} diff --git a/src/tools/miri/tests/run-pass/send-is-not-static-par-for.rs b/src/tools/miri/tests/run-pass/send-is-not-static-par-for.rs new file mode 100644 index 0000000000..4ac1b5436f --- /dev/null +++ b/src/tools/miri/tests/run-pass/send-is-not-static-par-for.rs @@ -0,0 +1,43 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-windows + +use std::sync::Mutex; + +fn par_for(iter: I, f: F) + where I: Iterator, + I::Item: Send, + F: Fn(I::Item) + Sync +{ + for item in iter { + f(item) + } +} + +fn sum(x: &[i32]) { + let sum_lengths = Mutex::new(0); + par_for(x.windows(4), |x| { + *sum_lengths.lock().unwrap() += x.len() + }); + + assert_eq!(*sum_lengths.lock().unwrap(), (x.len() - 3) * 4); +} + +fn main() { + let mut elements = [0; 20]; + + // iterators over references into this stack frame + par_for(elements.iter_mut().enumerate(), |(i, x)| { + *x = i as i32 + }); + + sum(&elements) +} diff --git a/src/tools/miri/tests/run-pass/sendable-class.rs b/src/tools/miri/tests/run-pass/sendable-class.rs new file mode 100644 index 0000000000..b3e07d00f0 --- /dev/null +++ b/src/tools/miri/tests/run-pass/sendable-class.rs @@ -0,0 +1,34 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Test that a class with only sendable fields can be sent + +// pretty-expanded FIXME #23616 + +use std::sync::mpsc::channel; + +#[allow(dead_code)] +struct Foo { + i: isize, + j: char, +} + +fn foo(i:isize, j: char) -> Foo { + Foo { + i: i, + j: j + } +} + +pub fn main() { + let (tx, rx) = channel(); + let _ = tx.send(foo(42, 'c')); + let _ = rx; +} diff --git a/src/tools/miri/tests/run-pass/simd-intrinsic-generic-elements.rs b/src/tools/miri/tests/run-pass/simd-intrinsic-generic-elements.rs new file mode 100644 index 0000000000..36567f4c03 --- /dev/null +++ b/src/tools/miri/tests/run-pass/simd-intrinsic-generic-elements.rs @@ -0,0 +1,42 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(repr_simd, platform_intrinsics)] + +#[repr(simd)] +#[derive(Copy, Clone, Debug, PartialEq)] +#[allow(non_camel_case_types)] +struct i32x2(i32, i32); +#[repr(simd)] +#[derive(Copy, Clone, Debug, PartialEq)] +#[allow(non_camel_case_types)] +struct i32x3(i32, i32, i32); +#[repr(simd)] +#[derive(Copy, Clone, Debug, PartialEq)] +#[allow(non_camel_case_types)] +struct i32x4(i32, i32, i32, i32); +#[repr(simd)] +#[derive(Copy, Clone, Debug, PartialEq)] +#[allow(non_camel_case_types)] +struct i32x8(i32, i32, i32, i32, + i32, i32, i32, i32); + +fn main() { + let _x2 = i32x2(20, 21); + let _x3 = i32x3(30, 31, 32); + let _x4 = i32x4(40, 41, 42, 43); + let _x8 = i32x8(80, 81, 82, 83, 84, 85, 86, 87); + + let _y2 = i32x2(120, 121); + let _y3 = i32x3(130, 131, 132); + let _y4 = i32x4(140, 141, 142, 143); + let _y8 = i32x8(180, 181, 182, 183, 184, 185, 186, 187); + +} diff --git a/src/tools/miri/tests/run-pass/slice-of-zero-size-elements.rs b/src/tools/miri/tests/run-pass/slice-of-zero-size-elements.rs new file mode 100644 index 0000000000..dbe8ec9add --- /dev/null +++ b/src/tools/miri/tests/run-pass/slice-of-zero-size-elements.rs @@ -0,0 +1,58 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: -C debug-assertions + +use std::slice; + +fn foo(v: &[T]) -> Option<&[T]> { + let mut it = v.iter(); + for _ in 0..5 { + let _ = it.next(); + } + Some(it.as_slice()) +} + +fn foo_mut(v: &mut [T]) -> Option<&mut [T]> { + let mut it = v.iter_mut(); + for _ in 0..5 { + let _ = it.next(); + } + Some(it.into_slice()) +} + +pub fn main() { + // In a slice of zero-size elements the pointer is meaningless. + // Ensure iteration still works even if the pointer is at the end of the address space. + let slice: &[()] = unsafe { slice::from_raw_parts(-5isize as *const (), 10) }; + assert_eq!(slice.len(), 10); + assert_eq!(slice.iter().count(), 10); + + // .nth() on the iterator should also behave correctly + let mut it = slice.iter(); + assert!(it.nth(5).is_some()); + assert_eq!(it.count(), 4); + + // Converting Iter to a slice should never have a null pointer + assert!(foo(slice).is_some()); + + // Test mutable iterators as well + let slice: &mut [()] = unsafe { slice::from_raw_parts_mut(-5isize as *mut (), 10) }; + assert_eq!(slice.len(), 10); + assert_eq!(slice.iter_mut().count(), 10); + + { + let mut it = slice.iter_mut(); + assert!(it.nth(5).is_some()); + assert_eq!(it.count(), 4); + } + + assert!(foo_mut(slice).is_some()) +} diff --git a/src/tools/miri/tests/run-pass/small_enum_size_bug.rs b/src/tools/miri/tests/run-pass/small_enum_size_bug.rs new file mode 100644 index 0000000000..7576a97e36 --- /dev/null +++ b/src/tools/miri/tests/run-pass/small_enum_size_bug.rs @@ -0,0 +1,14 @@ +#![allow(dead_code)] + +enum E { + A = 1, + B = 2, + C = 3, +} + +fn main() { + let enone = None::; + if let Some(..) = enone { + panic!(); + } +} diff --git a/src/tools/miri/tests/run-pass/specialization.rs b/src/tools/miri/tests/run-pass/specialization.rs new file mode 100644 index 0000000000..13894926d3 --- /dev/null +++ b/src/tools/miri/tests/run-pass/specialization.rs @@ -0,0 +1,21 @@ +#![feature(specialization)] + +trait IsUnit { + fn is_unit() -> bool; +} + +impl IsUnit for T { + default fn is_unit() -> bool { false } +} + +impl IsUnit for () { + fn is_unit() -> bool { true } +} + +fn specialization() -> (bool, bool) { + (i32::is_unit(), <()>::is_unit()) +} + +fn main() { + assert_eq!(specialization(), (false, true)); +} diff --git a/src/tools/miri/tests/run-pass/static_memory_modification.rs b/src/tools/miri/tests/run-pass/static_memory_modification.rs new file mode 100644 index 0000000000..a68f727322 --- /dev/null +++ b/src/tools/miri/tests/run-pass/static_memory_modification.rs @@ -0,0 +1,8 @@ +static mut X: usize = 5; + +fn main() { + unsafe { + X = 6; + assert_eq!(X, 6); + } +} diff --git a/src/tools/miri/tests/run-pass/static_mut.rs b/src/tools/miri/tests/run-pass/static_mut.rs new file mode 100644 index 0000000000..be5830698b --- /dev/null +++ b/src/tools/miri/tests/run-pass/static_mut.rs @@ -0,0 +1,17 @@ +#![allow(dead_code)] + +static mut FOO: i32 = 42; +static BAR: Foo = Foo(unsafe { &FOO as *const _} ); + +struct Foo(*const i32); + +unsafe impl Sync for Foo {} + +fn main() { + unsafe { + assert_eq!(*BAR.0, 42); + FOO = 5; + assert_eq!(FOO, 5); + assert_eq!(*BAR.0, 5); + } +} diff --git a/src/tools/miri/tests/run-pass/std.rs b/src/tools/miri/tests/run-pass/std.rs new file mode 100644 index 0000000000..e0e23812d2 --- /dev/null +++ b/src/tools/miri/tests/run-pass/std.rs @@ -0,0 +1,33 @@ +use std::cell::{Cell, RefCell}; +use std::rc::Rc; +use std::sync::Arc; + +fn rc_cell() -> Rc> { + let r = Rc::new(Cell::new(42)); + let x = r.get(); + r.set(x + x); + r +} + +fn rc_refcell() -> i32 { + let r = Rc::new(RefCell::new(42)); + *r.borrow_mut() += 10; + let x = *r.borrow(); + x +} + +fn arc() -> Arc { + let a = Arc::new(42); + a +} + +fn true_assert() { + assert_eq!(1, 1); +} + +fn main() { + assert_eq!(*arc(), 42); + assert_eq!(rc_cell().get(), 84); + assert_eq!(rc_refcell(), 52); + true_assert(); +} diff --git a/src/tools/miri/tests/run-pass/strings.rs b/src/tools/miri/tests/run-pass/strings.rs new file mode 100644 index 0000000000..d5fc80b41f --- /dev/null +++ b/src/tools/miri/tests/run-pass/strings.rs @@ -0,0 +1,27 @@ +fn empty() -> &'static str { + "" +} + +fn hello() -> &'static str { + "Hello, world!" +} + +fn hello_bytes() -> &'static [u8; 13] { + b"Hello, world!" +} + +fn hello_bytes_fat() -> &'static [u8] { + b"Hello, world!" +} + +fn fat_pointer_on_32_bit() { + Some(5).expect("foo"); +} + +fn main() { + assert_eq!(empty(), ""); + assert_eq!(hello(), "Hello, world!"); + assert_eq!(hello_bytes(), b"Hello, world!"); + assert_eq!(hello_bytes_fat(), b"Hello, world!"); + fat_pointer_on_32_bit(); // Should run without crashing. +} diff --git a/src/tools/miri/tests/run-pass/subslice_array.rs b/src/tools/miri/tests/run-pass/subslice_array.rs new file mode 100644 index 0000000000..468cc9f094 --- /dev/null +++ b/src/tools/miri/tests/run-pass/subslice_array.rs @@ -0,0 +1,14 @@ +#![feature(advanced_slice_patterns)] +#![feature(slice_patterns)] + +fn bar(a: &'static str, b: &'static str) -> [&'static str; 4] { + [a, b, b, a] +} + +fn main() { + let out = bar("baz", "foo"); + let [a, xs.., d] = out; + assert_eq!(a, "baz"); + assert_eq!(xs, ["foo", "foo"]); + assert_eq!(d, "baz"); +} diff --git a/src/tools/miri/tests/run-pass/sums.rs b/src/tools/miri/tests/run-pass/sums.rs new file mode 100644 index 0000000000..a8dfd5ed66 --- /dev/null +++ b/src/tools/miri/tests/run-pass/sums.rs @@ -0,0 +1,59 @@ +// FIXME(solson): 32-bit mode doesn't test anything currently. +#![cfg_attr(target_pointer_width = "32", allow(dead_code))] + +#[derive(Debug, PartialEq)] +enum Unit { Unit(()) } // Force non-C-enum representation. + +fn return_unit() -> Unit { + Unit::Unit(()) +} + +#[derive(Debug, PartialEq)] +enum MyBool { False(()), True(()) } // Force non-C-enum representation. + +fn return_true() -> MyBool { + MyBool::True(()) +} + +fn return_false() -> MyBool { + MyBool::False(()) +} + +fn return_none() -> Option { + None +} + +fn return_some() -> Option { + Some(42) +} + +fn match_opt_none() -> i8 { + let x = None; + match x { + Some(data) => data, + None => 42, + } +} + +fn match_opt_some() -> i8 { + let x = Some(13); + match x { + Some(data) => data, + None => 20, + } +} + +fn two_nones() -> (Option, Option) { + (None, None) +} + +fn main() { + assert_eq!(two_nones(), (None, None)); + assert_eq!(match_opt_some(), 13); + assert_eq!(match_opt_none(), 42); + assert_eq!(return_some(), Some(42)); + assert_eq!(return_none(), None); + assert_eq!(return_false(), MyBool::False(())); + assert_eq!(return_true(), MyBool::True(())); + assert_eq!(return_unit(), Unit::Unit(())); +} diff --git a/src/tools/miri/tests/run-pass/tag-align-dyn-u64.rs b/src/tools/miri/tests/run-pass/tag-align-dyn-u64.rs new file mode 100644 index 0000000000..81c19022ab --- /dev/null +++ b/src/tools/miri/tests/run-pass/tag-align-dyn-u64.rs @@ -0,0 +1,37 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +use std::mem; + +enum Tag { + Tag2(A) +} + +struct Rec { + c8: u8, + t: Tag +} + +fn mk_rec() -> Rec { + return Rec { c8:0, t:Tag::Tag2(0) }; +} + +fn is_u64_aligned(u: &Tag) -> bool { + let p: usize = unsafe { mem::transmute(u) }; + let u64_align = std::mem::align_of::(); + return (p & (u64_align - 1)) == 0; +} + +pub fn main() { + let x = mk_rec(); + assert!(is_u64_aligned(&x.t)); +} diff --git a/src/tools/miri/tests/run-pass/thread-local.rs b/src/tools/miri/tests/run-pass/thread-local.rs new file mode 100644 index 0000000000..db00e42d99 --- /dev/null +++ b/src/tools/miri/tests/run-pass/thread-local.rs @@ -0,0 +1,67 @@ +//ignore-windows + +#![feature(libc)] +extern crate libc; + +use std::mem; + +pub type Key = libc::pthread_key_t; + +static mut RECORD : usize = 0; +static mut KEYS : [Key; 2] = [0; 2]; +static mut GLOBALS : [u64; 2] = [1, 0]; + +static mut CANNARY : *mut u64 = 0 as *mut _; // this serves as a cannary: if TLS dtors are not run properly, this will not get deallocated, making the test fail. + +pub unsafe fn create(dtor: Option) -> Key { + let mut key = 0; + assert_eq!(libc::pthread_key_create(&mut key, mem::transmute(dtor)), 0); + key +} + +pub unsafe fn set(key: Key, value: *mut u8) { + let r = libc::pthread_setspecific(key, value as *mut _); + assert_eq!(r, 0); +} + +pub fn record(r: usize) { + assert!(r < 10); + unsafe { RECORD = RECORD*10 + r }; +} + +unsafe extern fn dtor(ptr: *mut u64) { + assert!(CANNARY != 0 as *mut _); // make sure we do not get run too often + let val = *ptr; + + let which_key = GLOBALS.iter().position(|global| global as *const _ == ptr).expect("Should find my global"); + record(which_key); + + if val > 0 { + *ptr = val-1; + set(KEYS[which_key], ptr as *mut _); + } + + // Check if the records matches what we expect. If yes, clear the cannary. + // If the record is wrong, the cannary will never get cleared, leading to a leak -> test fails. + // If the record is incomplete (i.e., more dtor calls happen), the check at the beginning of this function will fail -> test fails. + // The correct sequence is: First key 0, then key 1, then key 0. + if RECORD == 0_1_0 { + drop(Box::from_raw(CANNARY)); + CANNARY = 0 as *mut _; + } +} + +fn main() { + unsafe { + create(None); // check that the no-dtor case works + + // Initialize the keys we use to check destructor ordering + for (key, global) in KEYS.iter_mut().zip(GLOBALS.iter()) { + *key = create(Some(mem::transmute(dtor as unsafe extern fn(*mut u64)))); + set(*key, global as *const _ as *mut _); + } + + // Initialize cannary + CANNARY = Box::into_raw(Box::new(0u64)); + } +} diff --git a/src/tools/miri/tests/run-pass/too-large-primval-write-problem.rs b/src/tools/miri/tests/run-pass/too-large-primval-write-problem.rs new file mode 100644 index 0000000000..1bbe45277c --- /dev/null +++ b/src/tools/miri/tests/run-pass/too-large-primval-write-problem.rs @@ -0,0 +1,23 @@ +// PrimVals in Miri are represented with 8 bytes (u64) and at the time of writing, the `-x` +// will sign extend into the entire 8 bytes. Then, if you tried to write the `-x` into +// something smaller than 8 bytes, like a 4 byte pointer, it would crash in byteorder crate +// code that assumed only the low 4 bytes would be set. Actually, we were masking properly for +// everything except pointers before I fixed it, so this was probably impossible to reproduce on +// 64-bit. +// +// This is just intended as a regression test to make sure we don't reintroduce this problem. + +#[cfg(target_pointer_width = "32")] +fn main() { + use std::mem::transmute; + + // Make the weird PrimVal. + let x = 1i32; + let bad = unsafe { transmute::(-x) }; + + // Force it through the Memory::write_primval code. + Box::new(bad); +} + +#[cfg(not(target_pointer_width = "32"))] +fn main() {} diff --git a/src/tools/miri/tests/run-pass/traits.rs b/src/tools/miri/tests/run-pass/traits.rs new file mode 100644 index 0000000000..b2eae5d04f --- /dev/null +++ b/src/tools/miri/tests/run-pass/traits.rs @@ -0,0 +1,30 @@ +struct Struct(i32); + +trait Trait { + fn method(&self); +} + +impl Trait for Struct { + fn method(&self) { + assert_eq!(self.0, 42); + } +} + +struct Foo(T); + +fn main() { + let y: &Trait = &Struct(42); + y.method(); + let x: Foo = Foo(Struct(42)); + let y: &Foo = &x; + y.0.method(); + + let x: Box i32> = Box::new(|x| x * 2); + assert_eq!(x(21), 42); + let mut i = 5; + { + let mut x: Box = Box::new(|| i *= 2); + x(); x(); + } + assert_eq!(i, 20); +} diff --git a/src/tools/miri/tests/run-pass/trivial.rs b/src/tools/miri/tests/run-pass/trivial.rs new file mode 100644 index 0000000000..891d115206 --- /dev/null +++ b/src/tools/miri/tests/run-pass/trivial.rs @@ -0,0 +1,11 @@ +fn empty() {} + +fn unit_var() { + let x = (); + x +} + +fn main() { + empty(); + unit_var(); +} diff --git a/src/tools/miri/tests/run-pass/try-operator-custom.rs b/src/tools/miri/tests/run-pass/try-operator-custom.rs new file mode 100644 index 0000000000..3b447f36ec --- /dev/null +++ b/src/tools/miri/tests/run-pass/try-operator-custom.rs @@ -0,0 +1,13 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + assert!(Ok::(42) == Ok(42)); +} diff --git a/src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor.rs b/src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor.rs new file mode 100644 index 0000000000..5cf91b3f4d --- /dev/null +++ b/src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor.rs @@ -0,0 +1,3 @@ +fn main() { + assert_eq!(Some(42).map(Some), Some(Some(42))); +} diff --git a/src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor_pointer_opt.rs b/src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor_pointer_opt.rs new file mode 100644 index 0000000000..fb57d4f4c1 --- /dev/null +++ b/src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor_pointer_opt.rs @@ -0,0 +1,4 @@ +fn main() { + let x = 5; + assert_eq!(Some(&x).map(Some), Some(Some(&x))); +} diff --git a/src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor_struct_pointer_opt.rs b/src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor_struct_pointer_opt.rs new file mode 100644 index 0000000000..44441ed1d3 --- /dev/null +++ b/src/tools/miri/tests/run-pass/tuple_like_enum_variant_constructor_struct_pointer_opt.rs @@ -0,0 +1,33 @@ +#[derive(Copy, Clone, PartialEq, Debug)] +struct A<'a> { + x: i32, + y: &'a i32, +} + +#[derive(Copy, Clone, PartialEq, Debug)] +struct B<'a>(i32, &'a i32); + +#[derive(Copy, Clone, PartialEq, Debug)] +enum C<'a> { + Value(i32, &'a i32), + #[allow(dead_code)] + NoValue, +} + +fn main() { + let x = 5; + let a = A { x: 99, y: &x }; + assert_eq!(Some(a).map(Some), Some(Some(a))); + let f = B; + assert_eq!(Some(B(42, &x)), Some(f(42, &x))); + // the following doesn't compile :( + //let f: for<'a> fn(i32, &'a i32) -> B<'a> = B; + //assert_eq!(Some(B(42, &x)), Some(f(42, &x))); + assert_eq!(B(42, &x), foo(&x, B)); + let f = C::Value; + assert_eq!(C::Value(42, &x), f(42, &x)); +} + +fn foo<'a, F: Fn(i32, &'a i32) -> B<'a>>(i: &'a i32, f: F) -> B<'a> { + f(42, i) +} diff --git a/src/tools/miri/tests/run-pass/tuple_like_struct_constructor.rs b/src/tools/miri/tests/run-pass/tuple_like_struct_constructor.rs new file mode 100644 index 0000000000..05e8893de1 --- /dev/null +++ b/src/tools/miri/tests/run-pass/tuple_like_struct_constructor.rs @@ -0,0 +1,5 @@ +fn main() { + #[derive(PartialEq, Eq, Debug)] + struct A(i32); + assert_eq!(Some(42).map(A), Some(A(42))); +} diff --git a/src/tools/miri/tests/run-pass/union-overwrite.rs b/src/tools/miri/tests/run-pass/union-overwrite.rs new file mode 100644 index 0000000000..df2ff6e51a --- /dev/null +++ b/src/tools/miri/tests/run-pass/union-overwrite.rs @@ -0,0 +1,81 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(untagged_unions)] +#![allow(unions_with_drop_fields)] + +#[repr(C)] +struct Pair(T, U); +#[repr(C)] +struct Triple(T, T, T); + +#[repr(C)] +union U { + a: Pair, + b: B, +} + +#[repr(C)] +union W { + a: A, + b: B, +} + +#[cfg(target_endian = "little")] +unsafe fn check() { + let mut u = U:: { b: 0xDE_DE }; + u.a.0 = 0xBE; + assert_eq!(u.b, 0xDE_BE); + + let mut u = U:: { b: 0xDEAD_DEAD }; + u.a.0 = 0xBEEF; + assert_eq!(u.b, 0xDEAD_BEEF); + + let mut u = U:: { b: 0xDEADBEEF_DEADBEEF }; + u.a.0 = 0xBAADF00D; + assert_eq!(u.b, 0xDEADBEEF_BAADF00D); + + let mut w = W::, u8>, u32> { b: 0xDEAD_DEAD }; + w.a.0 = Triple(0, 0, 0); + assert_eq!(w.b, 0xDE00_0000); + + let mut w = W::>, u32> { b: 0xDEAD_DEAD }; + w.a.1 = Triple(0, 0, 0); + assert_eq!(w.b, 0x0000_00AD); +} + +#[cfg(target_endian = "big")] +unsafe fn check() { + let mut u = U:: { b: 0xDE_DE }; + u.a.0 = 0xBE; + assert_eq!(u.b, 0xBE_DE); + + let mut u = U:: { b: 0xDEAD_DEAD }; + u.a.0 = 0xBEEF; + assert_eq!(u.b, 0xBEEF_DEAD); + + let mut u = U:: { b: 0xDEADBEEF_DEADBEEF }; + u.a.0 = 0xBAADF00D; + assert_eq!(u.b, 0xBAADF00D_DEADBEEF); + + let mut w = W::, u8>, u32> { b: 0xDEAD_DEAD }; + w.a.0 = Triple(0, 0, 0); + assert_eq!(w.b, 0x0000_00AD); + + let mut w = W::>, u32> { b: 0xDEAD_DEAD }; + w.a.1 = Triple(0, 0, 0); + assert_eq!(w.b, 0xDE00_0000); +} + +fn main() { + unsafe { + check(); + } +} diff --git a/src/tools/miri/tests/run-pass/union.rs b/src/tools/miri/tests/run-pass/union.rs new file mode 100644 index 0000000000..342c94f3d4 --- /dev/null +++ b/src/tools/miri/tests/run-pass/union.rs @@ -0,0 +1,88 @@ +#![feature(untagged_unions)] +#![allow(dead_code, unused_variables)] + +fn main() { + a(); + b(); + c(); + d(); +} + +fn a() { + union U { + f1: u32, + f2: f32, + } + let mut u = U { f1: 1 }; + unsafe { + let b1 = &mut u.f1; + *b1 = 5; + } + assert_eq!(unsafe { u.f1 }, 5); +} + +fn b() { + struct S { + x: u32, + y: u32, + } + + union U { + s: S, + both: u64, + } + let mut u = U { s: S { x: 1, y: 2 } }; + unsafe { + let bx = &mut u.s.x; + let by = &mut u.s.y; + *bx = 5; + *by = 10; + } + assert_eq!(unsafe { u.s.x }, 5); + assert_eq!(unsafe { u.s.y }, 10); +} + +fn c() { + #[repr(u32)] + enum Tag { I, F } + + #[repr(C)] + union U { + i: i32, + f: f32, + } + + #[repr(C)] + struct Value { + tag: Tag, + u: U, + } + + fn is_zero(v: Value) -> bool { + unsafe { + match v { + Value { tag: Tag::I, u: U { i: 0 } } => true, + Value { tag: Tag::F, u: U { f } } => f == 0.0, + _ => false, + } + } + } + assert!(is_zero(Value { tag: Tag::I, u: U { i: 0 }})); + assert!(is_zero(Value { tag: Tag::F, u: U { f: 0.0 }})); + assert!(!is_zero(Value { tag: Tag::I, u: U { i: 1 }})); + assert!(!is_zero(Value { tag: Tag::F, u: U { f: 42.0 }})); +} + +fn d() { + union MyUnion { + f1: u32, + f2: f32, + } + let u = MyUnion { f1: 10 }; + unsafe { + match u { + MyUnion { f1: 10 } => { } + MyUnion { f2 } => { panic!("foo"); } + } + } +} diff --git a/src/tools/miri/tests/run-pass/unique-send.rs b/src/tools/miri/tests/run-pass/unique-send.rs new file mode 100644 index 0000000000..7644da08e4 --- /dev/null +++ b/src/tools/miri/tests/run-pass/unique-send.rs @@ -0,0 +1,20 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(box_syntax)] + +use std::sync::mpsc::channel; + +pub fn main() { + let (tx, rx) = channel::>(); + tx.send(box 100).unwrap(); + let v = rx.recv().unwrap(); + assert_eq!(v, box 100); +} diff --git a/src/tools/miri/tests/run-pass/validation_lifetime_resolution.rs b/src/tools/miri/tests/run-pass/validation_lifetime_resolution.rs new file mode 100644 index 0000000000..4d919f7352 --- /dev/null +++ b/src/tools/miri/tests/run-pass/validation_lifetime_resolution.rs @@ -0,0 +1,30 @@ +trait Id { + type Out; + + fn id(self) -> Self::Out; +} + +impl<'a> Id for &'a mut i32 { + type Out = &'a mut i32; + + fn id(self) -> Self { self } +} + +impl<'a> Id for &'a mut u32 { + type Out = &'a mut u32; + + fn id(self) -> Self { self } +} + +fn foo(mut x: T) where for<'a> &'a mut T: Id +{ + let x = &mut x; + let _y = x.id(); + // Inspecting the trace should show that _y has a type involving a local lifetime, when it gets validated. + // Unfortunately, there doesn't seem to be a way to actually have a test fail if it does not have the right + // type. Currently, this is NOT working correctly; see . +} + +fn main() { + foo(3) +} diff --git a/src/tools/miri/tests/run-pass/vec-matching-fold.rs b/src/tools/miri/tests/run-pass/vec-matching-fold.rs new file mode 100644 index 0000000000..ac80a4211a --- /dev/null +++ b/src/tools/miri/tests/run-pass/vec-matching-fold.rs @@ -0,0 +1,58 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +#![feature(advanced_slice_patterns)] +#![feature(slice_patterns)] + +use std::fmt::Debug; + +fn foldl(values: &[T], + initial: U, + mut function: F) + -> U where + U: Clone+Debug, T:Debug, + F: FnMut(U, &T) -> U, +{ match values { + &[ref head, ref tail..] => + foldl(tail, function(initial, head), function), + &[] => { + // FIXME: call guards + let res = initial.clone(); res + } + } +} + +fn foldr(values: &[T], + initial: U, + mut function: F) + -> U where + U: Clone, + F: FnMut(&T, U) -> U, +{ + match values { + &[ref head.., ref tail] => + foldr(head, function(tail, initial), function), + &[] => { + // FIXME: call guards + let res = initial.clone(); res + } + } +} + +pub fn main() { + let x = &[1, 2, 3, 4, 5]; + + let product = foldl(x, 1, |a, b| a * *b); + assert_eq!(product, 120); + + let sum = foldr(x, 0, |a, b| *a + b); + assert_eq!(sum, 15); +} diff --git a/src/tools/miri/tests/run-pass/write-bytes.rs b/src/tools/miri/tests/run-pass/write-bytes.rs new file mode 100644 index 0000000000..7c9a38fca6 --- /dev/null +++ b/src/tools/miri/tests/run-pass/write-bytes.rs @@ -0,0 +1,45 @@ +#[repr(C)] +#[derive(Copy, Clone)] +struct Foo { + a: u64, + b: u64, + c: u64, +} + +fn main() { + const LENGTH: usize = 10; + let mut v: [u64; LENGTH] = [0; LENGTH]; + + for idx in 0..LENGTH { + assert_eq!(v[idx], 0); + } + + unsafe { + let p = v.as_mut_ptr(); + ::std::ptr::write_bytes(p, 0xab, LENGTH); + } + + for idx in 0..LENGTH { + assert_eq!(v[idx], 0xabababababababab); + } + + // ----- + + let mut w: [Foo; LENGTH] = [Foo { a: 0, b: 0, c: 0 }; LENGTH]; + for idx in 0..LENGTH { + assert_eq!(w[idx].a, 0); + assert_eq!(w[idx].b, 0); + assert_eq!(w[idx].c, 0); + } + + unsafe { + let p = w.as_mut_ptr(); + ::std::ptr::write_bytes(p, 0xcd, LENGTH); + } + + for idx in 0..LENGTH { + assert_eq!(w[idx].a, 0xcdcdcdcdcdcdcdcd); + assert_eq!(w[idx].b, 0xcdcdcdcdcdcdcdcd); + assert_eq!(w[idx].c, 0xcdcdcdcdcdcdcdcd); + } +} diff --git a/src/tools/miri/tests/run-pass/zero-sized-binary-heap-push.rs b/src/tools/miri/tests/run-pass/zero-sized-binary-heap-push.rs new file mode 100644 index 0000000000..63a0d65f01 --- /dev/null +++ b/src/tools/miri/tests/run-pass/zero-sized-binary-heap-push.rs @@ -0,0 +1,28 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::BinaryHeap; +use std::iter::Iterator; + +fn main() { + const N: usize = 8; + + for len in 0..N { + let mut tester = BinaryHeap::with_capacity(len); + assert_eq!(tester.len(), 0); + assert!(tester.capacity() >= len); + for _ in 0..len { + tester.push(()); + } + assert_eq!(tester.len(), len); + assert_eq!(tester.iter().count(), len); + tester.clear(); + } +} diff --git a/src/tools/miri/tests/run-pass/zst.rs b/src/tools/miri/tests/run-pass/zst.rs new file mode 100644 index 0000000000..c1c88875c5 --- /dev/null +++ b/src/tools/miri/tests/run-pass/zst.rs @@ -0,0 +1,18 @@ +#[derive(PartialEq, Debug)] +struct A; + +fn zst_ret() -> A { + A +} + +fn use_zst() -> A { + let a = A; + a +} + +fn main() { + assert_eq!(zst_ret(), A); + assert_eq!(use_zst(), A); + let x = 42 as *mut (); + unsafe { *x = (); } +} diff --git a/src/tools/miri/tests/run-pass/zst2.rs b/src/tools/miri/tests/run-pass/zst2.rs new file mode 100644 index 0000000000..c2d7b88ea0 --- /dev/null +++ b/src/tools/miri/tests/run-pass/zst2.rs @@ -0,0 +1,12 @@ +#![allow(dead_code)] + +#[derive(Debug)] +struct A; + +fn main() { + // can't use assert_eq, b/c that will try to print the pointer addresses with full MIR enabled + + // FIXME: Test disabled for now, see . + //assert!(&A as *const A as *const () == &() as *const _); + //assert!(&A as *const A == &A as *const A); +} diff --git a/src/tools/miri/tests/run-pass/zst_box.rs b/src/tools/miri/tests/run-pass/zst_box.rs new file mode 100644 index 0000000000..12138be5af --- /dev/null +++ b/src/tools/miri/tests/run-pass/zst_box.rs @@ -0,0 +1,8 @@ +fn main() { + let x = Box::new(()); + let y = Box::new(()); + drop(y); + let z = Box::new(()); + drop(x); + drop(z); +} diff --git a/src/tools/miri/tests/run-pass/zst_variant_drop.rs b/src/tools/miri/tests/run-pass/zst_variant_drop.rs new file mode 100644 index 0000000000..a76f64ce29 --- /dev/null +++ b/src/tools/miri/tests/run-pass/zst_variant_drop.rs @@ -0,0 +1,23 @@ +struct Foo; +impl Drop for Foo { + fn drop(&mut self) { + unsafe { + FOO = true; + } + } +} + +static mut FOO: bool = false; + +enum Bar { + A(Box), + B(Foo), +} + +fn main() { + assert!(unsafe { !FOO }); + drop(Bar::A(Box::new(42))); + assert!(unsafe { !FOO }); + drop(Bar::B(Foo)); + assert!(unsafe { FOO }); +} diff --git a/src/tools/miri/tex/final-presentation/latexmkrc b/src/tools/miri/tex/final-presentation/latexmkrc new file mode 100644 index 0000000000..23aa1a481b --- /dev/null +++ b/src/tools/miri/tex/final-presentation/latexmkrc @@ -0,0 +1,12 @@ +# vim: ft=perl + +$pdf_mode = 1; +$pdflatex = 'lualatex --shell-escape %O %S'; +$out_dir = 'out'; + +# This improves latexmk's detection of source files and generated files. +$recorder = 1; + +# Ignore always-regenerated *.pyg files from the minted package when considering +# whether to run pdflatex again. +$hash_calc_ignore_pattern{'pyg'} = '.*'; diff --git a/src/tools/miri/tex/final-presentation/rust-logo-512x512.png b/src/tools/miri/tex/final-presentation/rust-logo-512x512.png new file mode 100644 index 0000000000000000000000000000000000000000..38484c670e01f3f355672d6c95f79f035a963a44 GIT binary patch literal 96029 zcmZs?XIxX!(mkAn0HKGD)X+hYqBLm<9YFy_rFRq*6cD77gr)(MA}AM|8MW7b3UBkIWu$C%&awg?>GxHBPM!YdH?{xbi-Kx4gdhUxCH?q zG#8h{;6LX8VBqo%eVx0(Q=2X0_w6QYsYA6R<<&ATM?>BqoV}wf9YjpP)ALz<-8Cgy zRM%-;Diocg{Yx*NHg6gvgS8_I2m_M40l6z%ZZEzxyn83|2KtA;?1%;e1brSQtcng$ z&V<+{Ze}RBw8zl>|K9M&oCXqtS)plCDk#?fJ+X*jN4c?7P%F^ZMnWvy$tM4MK_@VN z2P-)1D~yyh<_8Zr_5bgUYJk0!qYgg&Ij=Q62%y$_OB{4yb?txOJ61j$KZ5MTbC{7C z*sT8@r1JRbuY8CfmB#gzy`qY<$a$#AyK#`UYr@ofR$Hiq9DxZ*Zs7`0W+UUZ0rJ+ut!y zB{DRk+Ws5JP04-k8V`*HOT(>-@i--lm?VN~J3VN>(@pyPU_4(#(WYe^G+#Ld!2{2m zmx8usVz5mQk4jPumO%fDR2J(x3*T_QrM5}VOSh|D+)WXp*r#24LWex;Dd$1ygB!lm zew*Qg`I*A%glm1o=NXQG{~2t9UQT$fkyU!g)0AbnY5^L8bBlt`dG|#?tuv)8$d*xD z>l{Lo@|G9oym0U{l)f345;M$3Jc-zj4!M}flPU8oJoa3N)>WbMhpPaV; z#m^ErRNgz2b0+yB0gyNCZ;~feE+l;(ytJx<({_aSY3l&OTdlkH)=aKE)e!OBf)E0V zK14Lm2X(zHc{xctv$FJ;H-xX1>{=5X)+RJigl-sxo=%O|A8?B0PMGjvu)*=wP=X{L|4DSs9YAG&BVDS0RiAwBjQ^*Dy~Cfku?4w~YJ!>X~z3tO!c? z%QaO#9y~f1Yra0sw7bwiOeA9?TuU-D^Y zg0lB46ZdX|Kl1qN_#pn<68RUQS<6n^*YKn6ytjeaMiRxuVV+den0?|a2od@w@IdGr zLwK{_&3UURHrk^*{9>Fm8~@&W|2K<&a{~BoR7bRO&npmK>?6`vlcYa+L}dQwTKp}vbSae=>|}hUWJ@ibCN$i;rAd>fJdC7q8tzvsNI3uU z0hifdqX7tGI9b?wD>dg(R=t};ekumF=$>Dkr_qUvu5Wkl(M-MdADHPkf}AB^Y?hBz z>{vbU7f}A`LXT$G>Rn||X(_Elfo&oHk0SK9h)dVyX>GQJLS-7hnBCX7dRU4<*T|U* z;UMMD1JWz+VE}xgL~m+qt&IoeFt+=)Z zl8^m{oP216c0~;hZ4}k7jt|d-CT%0dsqy3-|T+TrCj4?)PRdMj){~&z_ zy-DDhL|M6`8f*i8kT-Dcl|j`KDH>D9n*X+5}AO9}5$ ziSLc!lEgOxXup_z>#g02ztyc$W>!^i<)Fbv(so}4o1DK?GoBp869Run^iP;%YzuWg z3!?5ZDGadcY@Aw?jw+m3)CDI-x0m<>kJrOF%ITzRN*xL-!R;S7X!w1Z=B$6|WJ@q6 z8oT)Z*#!RwRB=(FQyVV{h+00}*G`u8W{OwR_$spLE@??d{V!G|TC$1~9yvc=6{^(P z_IvyXxvaDSs}pFhe548^s}}TMkBM-UfK@+lFS+(sqqq1P7f|No8z%G>(^BIhf~fr6Lrdq2^hfRIQx?(`Wc?G5q871y3VW*&YfRVJ^xjoUUx8k`t^maFOYc?_S6?`Wv`CjcGRTc z=OZtF0jB5AHSj0S1b%8v>uEJ_g2v9sW>`5aT~Df#EwWwT?i!g3i5Z|GaY!!O>i zy#oYbT5l^w59WZ63a)@0Yu|#eU#~N${JaoiC2qHQ{H)pYWJ230I0Z1M_ka#wE}So+ zp}Af5;8uNe%t1v!ch9?DQ+scQ^me^KALI4ns7zr}F22H-8Drcm7+fr>B{hJET_CSf zfQlrtQq=Vup74BW1YEK{EWYWhE(g_{6{A}OZ>l>Jpxf+%; zobF$R8a4I(GkF`1qOL-HnclR0*ZNxyc5Ood8f+`#Tv`3h4*Fa=Y}j@YCw$CbCm(RX zDpn2ynAy2L`!&ByAOUvlaoT7=(k|PJBv6lp%naI@s<_7J_jYjI4h{ZB^$o{>0r9zZ zM#3*M&%^-1ksLH3cskg%@OL6A18@J+4`|g_-LE@KHrnj?Uame=N)Q?tcLPuQU@dDx z9m~1hu`h0>^^%prfCopHGo8MKh9nK%_d$ic0BmHhuQ3xG1}C1_gn9iKOL+H3_Bk{l z#@N;c2*w&Om*t;9?Qqi`3-l`Z+duFH)2CZ>3PP7xPI7(ZO@pzqV|R{_earDZHI-8O z(Hu~5elZK7jX&T6)+-(Jcp;xTG{6< z@q6^gjG`MX zqIO0v%Gra!om%GL*#Kj}&cR^p?sZaNoH4sL-m&fBUPxjEy1tyjzLmBjda#RxonI+8 zn(7w-eM(DUWRpy(NH>6iLFbiiyX3;qzXzfe6Liq`l@Wd#N+yY~Hwz z-mELtMiLY@uu#o5i;cT^O|Zy0w_MG0kuwjNYSkjU&HDWpS0K+BR)Hc1SH7?|llGd| zR(_8E?8+>+LoO}_%kbW}Lq3$xL@wezzSX>n#$;15<4O&7p$u09H~bH^XCJ0uLEAnN z5$h8}P?9-|}DVXjH7 z)+K}^6Q+FDmO<_kWVrgy;D-nC-Obus)spQf;9~GM?{N1S&64pvcCGRb)u!>uqHn+A z7~M;Z9bL4kWmQN6j>?>-#7?8VcO8PjW^DX4$9=TN)#?>7dElMIPY#es8#FelhchLw z$K;4~eap)dh;by$RXUX;$Gq*XQzpUFZf|_q>Gr6j)AhJ%=>1}J?q4|KSh$9IUKzNe zY{()ERvUY?so<|z@#z1#0CO!NMNdfg2icNM0K3;V|BkiDhcD&K)EX81G3Ys{9WKg! z`%ljL46YxDGJZAOwvI|vCyTruVJ%E`9Nf%yH$U>$${aV=ZohGVV%NQ{r*c?lf^>>hEFceuOi}wwEbfUG3g8baJty1e|(PdrB1s3fm zh*TPGPh!Oj9mWc4MPD%i#fm5Y&o$=}|t19Var3Yv7I_K-HVqi~m z90RX(Our6Og8ZI8UDkZ+r?gu?cD9bs>f3=>)rg+~nlWyQ?A&{?ZL)px{``)y4$r6a z(_!@iW7ujjBdF6pUG(GH0V9?R3|{7@QnqJoHoHty=rLa}PBZZKKNO7iHo!Y!k}@$2 zw{0RYT_SUbV3-B+4s4t{qfhV-_ot6NM{=wis}mE$r@r-^%m=-D+PeM?4r_B}kCbxS>^9I(m zT9UfWnVTujnVb5pp9{%h2@+!y%bfTs2$YxZoUO`jn@WSc$$NS!E>hf-@ve=lbC)!Y z-1RO%4DQGcN_DiU6@%uW(($kPZ(U&r+~fwJ9N%(bXR0Z&X8lcaK^r5DvvVeu5iFBr zzVK3GrzcbSe>KXf_l)Y+V@(1rI?S7f8T7FNK8B93hou?tkMMT2T2Bl9U03~-YX-aq zGk@zhxInPE)Bws=$h#6Dc+4-v&ZV)u<-TMJU0}at{vyJ!5}cBcV)AkA)CVq5Q?dZ0 zZzHe`_l&5CjJ%!M`|pXZr&^NJd#AdsV!O5(CcqhSh`2wKYyqsS>h|Y;K3wqPSa@BA z`aHsIHA;e}9;U{}36^4q==dhiLc53U}n&QlM+I6Z2evk`pW~D~ZvPLv8T$c4^z}`Zq zN=k;J>>;1hH6WgDu-BJOG->x_2Hu1XflFGy*f&#uU2-n2iDGYh&es4vJR*icZoaX= zEs0hpa|mlJ#zhCjv=J|Bb0 z{itvQ{F81-vYYL%=cgm&%ZbaU{OxE3ylnMzku7E{fR3SR+ph0)C2Ky{zaiU%YMHsP z=N-=)Lkwe#&hKmhl-|IX3E5i}Nj+zj_m!!!b5VpklgDBk5Q7${38nkMqvpd+Rpo)Z z`5&FUI1e_4@6#zGAM9bV@ej4#2|mK0xxz;6%?Q6|Ik1SDVIdr)nsPO9l6@y3HE zpUahZ4ERp^uH4CFx!37_7j`a^$8rkXoVXgU{q=qye(Hyyje1Hg!$J>~5#w81l%^+n zBK_#+!Het9n3M`%y;u1w%`gW#5+4!-+7dR*fD3M6A%OFM8E%!#OQ2`Gq-xI0&iQVAdbngm)!}# zjS2gbZ{=GP{~zdtZW2iznruX$gQ$!;8p!J52MK^mE+pyXyK0O?}n^;szP)>oN9heFA!S72$rquTb z0rKopiU{4jssBSo=gYaiC&)%YOuXUATvl0Hr%>4jcFgQoLeZLKx_b6wBUq-bQ^|(s zoaM%Y6(u`-@oZhX-8*d|tiPS7UguWte)hrEwa2B*6<2SS{YY$Fbn6wkRCVuVy1)Vb zwbL1pez*Ut)iSiEcKhE`c9dj0<)gmt>EKaE)^|Eei!5e_iPE;LpGGX1#P02(;tmB) zRa;qfph!*rL7p8sp=b-84nA;ug8%c)5c!z)HC~n3drZ&DP1xfM+JgD(oHe`1AU10D zy{sr-4!SPHOLUq2Jd}#HB?#RK*txXZbp4`=kNyV?k&u#)=C;I*iqo}K`{y`j8{++1 z^UwHi*=dqV^$uy->wU+Lo-zsVyH>(>1U|zzRHA4X0~2Sv27X#{m3-*&?9bNj{`Y> zl@p9jJ22hOdSe$uqpJjN{hXpr`!E__9XBtm@}lP^&r>-!D$l8PNYko5XQ(r+5>eI z-WKy%{6b5B+2SH&uaUL?=XYz;kw1C<{JlwpV{VHsEp}_deyV9l_gvoo;lcIE=$Su{ zXG5mK%spkQA>L+XU!E&%Ne}dG!nfb>YEL^peQ;t~_0dQDlKVS&v}_R2-CUn?F4Eof zZLEJl?Sn%J<1DAIv3BGD{`L+PYmmmz{dAkpq;FNA>~C_59;9wFSX#}5%$2LU0HfVz+vI z;9(zv7bkGx>m4Xn=zC`!OeCh^*+pG*#nZ}?1^bSzZkIb!7nKDh z=mox8y%}UPUf@9BT03xiwp2PjTD$%8$5G>bEo{Rdl@F@ZcewP@GTfshmBS` zJ>o0K{KQ(vm2{9@i~*6iYB-*$hwwFXaBL4Zb6J zRfxJPD zSkS{>`KMLwIZN}uq*s~Rb`xdmfa-QU0qK=Fs!Z7aps~Gd1HTUV$+>yM@_HD@XMFWd z3^kM@Bnji-HI4E2)#N%1Iky#=wRvU+W7I|;vMC0{8r{w2(tbYgzk$_bZO#4J79|0a6ruQ*~ojaXs#&DIeT908jVK`sc z^l&oH*qEL1iMGlgi5OjYOglTYg$)uU4DG#iDhbB<$T7ET&3FKuG^l%==_cVvOk!-* zCeVR5u+>77-FnMN_>DE#WttiN$Ps}d107vOfUp9Cu=6+m3-Z{jwtTFm9#xk+hcD@g zoFczlUqqSNZ5&Rub}WvY4==rMRVn60 zkk_Mc<#x((sD}mS@l-59x4sT>s7WgVf6ocT(Z708EbFK6E3T5Qy}jS?bAkdoSC1LD zBq7ac7FsW}Yv(526w%!I;w6-&1Io3`QJRN^eS{$;U?P`aW zg>rZmVMRyc&m{SHzp`GLpqhblLby%yAm^5xOd1@@$5(Ri+!8!G0~VJrdAzPmz9(b_xFRm)Y_0E&siFlg($FDZQGk57oh^Y4S768l-*gO*URv3pS#6tY5zte@_EIObSLS_~^`BneEMok%y)4#O8;`@;_moBXN< zb&k_UOQtmH)*i9D@I6=+AG4uK>$2+^`Kq%&EVtjeoO4y(VXp4oG$bl#)mOOJ!TEy( z*j~XPdGe?bWFO6{s}TpnLaEDiIW(UZBlKLae}Jx0T!ZN9VK~ym&w9Ip%jNU3+!lHZ zcdc;3cQ=zl?xJq;F22+Ej>37gIWYFpMf;Y*&;ZQx`B6jQmhs+sO@4~1>Uz-Jthe(H zVaT@#jN4)#5T*&dm}-4dc5y|eV%5@&oDVk2&a)oyR-A>YEm%JJ@c1m?^HYD>H6!AWYJ+{=wuI8@Tos9GG0@jL#E<}Z zNx(dqn;I!iA7hDdA9RRPi<;jqx+(_~k=kf(FwxCZr147UgVC%{h;Ko{{3Cdk1>5o+%9g5t_%LQG$#uKYl$CE1SRM+1_L9aKX&B%36 zARlDD(Xq#h14JIN!3ksIxYilAmj~g^NOwZVXk+VIMY&XF62p6>{sThu_16zi;vmYu ztuj*IHx%wzS!*2MqeaaaXxanxZ=g&BqVy8NcNIquiXwNWV*&X10{MxvAo<2K4bAZG zoU5UI+nlRzBE$uW?A@)&h_qey>67C7LYBFMY(I>i9MyL=QV{{?k$DUvxQCj$Im@53 zuAil1GVZ)b#zUVXp7TSh1ccwbeP1gF5maS(InM$Cc?h*FJ%rp;qHArn&$VU@7!x}^ zJMKw&!!wi}S>brqmzxSPCJA&ryic9}>z+@mX3`t(PPrIBG}PGyQkn{_2MX`X&_yoU zL+aIl|4Km@X>b#;`Pq!vb7irBDpcEEqkYb)Z0Y4ebHm;0YNR66M#CwaQy6`d@-k;D zAwQshv(sn6;#G~j#TqQad=p7ZT%`+k-y5+f+XXD6x(v4ak3x^Rk><8v2q-jBl?PdD7Q1(-sk+t^yYJ+3k@Oji*CWQh)J5ic$gRfcFkA5y@3TvV*i zl7iAb<$F!36n(Xb{alkpP3YNgw@Q9NoAZxlNVno#v^mWA9DNQMvJYl41Ki?EhODO6 zM^9r4)8z#dL}}I=czAXS&2$*WxM;rV!gO{3bdlZ;LOI!cCs(qLe6IZd5u=5?S#wtr z0(@r=QK|^4v$#4OgQJZU^dD7r`J~Vb&qzBIUp1Z^RHfcSeaBD~KHwius>^}AdfzzZ zmj5H1IR&LZXuVeb%+bwpNl3}g&nVz1gLzadqLSv1*%HCQwai1819Ohnz zPdoe^`*pWczGZQUPo4M=ze3uCTX{{WV)&-i)n4roDn44v{<$vKc$nKKi~6K&+mW=@ zon(GGDMnZuh|{(;O%i#tMqzTY0~38Yfsf>7-VdOO`L23Ua=jRlt|39V(sy_ITt`Cf zW9X!HQ~C*1Gm;9ieL?Fqn^P3-DON;HuM8tHa-hE4G}|Q5w-%~Ps&f|$K}yqqH*z7pg-{*eG1()5-^$^ExgobG-7TjFqqKJI~45wUAXlBYwc#uc5V z(Cz*Z*RqJ`80Mc=GQV@zOBb)5yN*7kA|xhE&=^y&73ZqXJ0p%nmw@>mTY`AuU(159 z_r3ca+n;m8s_>j`JyZ{NY}O{aPu8lB?)2uP9j@#oTB~7I1Pa|=N(LoV;GfZY-c1UH zyqGHbIMkEVE|5n@b(wR$SPst z;~WbHpdwkvxPL3!1cGC--n@OGUWLJr^L6KW=_1|ULGuiC)Bu_P`n=lpBk9mQi$>X$ zjhc?l{=bip&s;eP0c}0ndBE-2*ma&oCj*sTdZuUk#1fYTVi}2;fye7xIyvz!Dp@rOH_PPMGhFVllnPUFPE1=$)fFot;fHl|JE^`tAcg|6t9#!y6R3vfq zt+-gx>*iU}0jD~xMPqPZ3Zd4Py66*HB_5!2yS@tq-JO?u8j>w^pEBMLJU(M%#pbe8 z!rZ%dLN8Bd3YLqJAJ@|FJAF(&`*qi3QQnUYj-TkMJF3}DBtHo_I01g+{@TE04z;03 z?5q|%)b6WTi%_BrPfORZz8*g|-5TOL++{++WXeQ*uFLLBfWRl>^zW-F9;g2o%}wS zXT*3yjwalmQ8UFAolf^XClZ+kd<%_+}38lH2M^X+YTg`O?GxA@E4t8`T+7O+n2 z!adZVzP0mkXJokXJ293@4uJ2JjLYR3((E84C~4rOF}chP=fR3Nm2X0|DcW?A_mq^F zbky{(AeHuA?0vwm!}qpG-t0&+M*GH#%#m(c=yqdj_>twwpB}dbdk^vtfylcG|8zU~ z8+qp0so!o2hSC&9TF?PQbB(8-yqO>U5~>pP&(}~FMsKB9p;b%=RHKSmWu1sm_59c*z0EeyWSx5djy+LJ>Ce;`+F&eIS|iQZgFTu>V2fHUfmkP#7?jRGtktp?hO2 zizuT`6ip|p@m1OLFz2J_O`V)Yv|xy_xzT$=qWHH?%FT^`xufQb3BqJO<_nb*p7i8t z?&GtxF9Z2NpURe3S7Q3E@0H^|5aa)8Wgp(Fg(nd7dybL+uANf__`ddDW}j>|Jddj) za?rs?H?@1jge*nV!5If5OS{+N(OUt=A)2PWZ#{WTjk|AqI7400Rg{;vI5$}iX)WUB z$NVCVppd8EaOx&BtZB5}7J@vL56;)so}-!RFMny<=s|)^ga5F-4U@feH$M??Hn)Il6H1R4?Vbv zmEDc*gh{Ova86D0AZ{L+Y!6Hl2OIO;44_uq!$t!v^7e6y4WizlzZX76naeUR#sfFJ zV~aUdSVC=P_5lr`?9e2%6Fu-R!Hvm1*vwWXNQB~z zq5WWOY&dYf)d6#SEGT9*&)NWz?6(#QqBc>Z*=w+pVp-KiSHx&X>g&Q%q>N?3pNw0h zCjK&l>>i=@(Exd-tYQ9(CF+%xdZ+>j7Zaz8Ng7Y$ib$I!HO^wkHrQ?87^j_0C*3l< z8PPE(#Ikx;(`nSM{UbXThMF2-c~g+mI(D_B+vmz!r7^bCkXXIHYE!%=|K0O5UWA!0 z^rc`7%At-OZbMN@qCnist4KWsCKJg6#|7S8l&96iO*Z6X_gv;-$(pLLeObU5%NxK@ zTMxG-%)_A51R`aH4y8tV9ZvL74Gs#;zJ(ee9 z&e0xjvYE7{u}kv+4yAH^d(u%tX*VSF6fQF2g}ZT zsBxfFitPun5B|!+L!8gShgBD=0sm+5y<5op+4zq8jRx722UgU)#N?x(dxlBNPh{S9 zqI&yI=~g$njm8?Gl+q_&xgy9Xek?oxxN9QR4`1@0XLXuu(d;z!O?Hp+zCKN=2wGJV z>>4+Sqd*N$P0pUAe$QsXCt;kj-{!Q|yFJ+)iF0~LxCIJW6l8VzJmRH%$hs!~xRc_O zCQUY#+;u2^)9K{KN^!VBBo(*HKS2UtpB`lV-O1Utlq4|Tcx291wL4G`t2}YldA?a9 zu1At$v5-dhV#{ySMLtu>q!m_p^A*Yy#0vUVg)3cUfqW>r&?eMu<^Qt^QRXyBx}Tkk zX&7bKBd%LJ@~&mptP_5{!p}{be>Nwao^@-lU!__2`FI%%y}Y!HP2AHpAX2nes1$O7 z8{e@1$1Ik@$|SSdk#z$7nHcHt!{MF&?P~o)YH~k7YMBdO)4Otgoe)F) zXF4}lW@y>SD?k{8sM04Ecfdv3ZeYJCi7#?5J3Y1QGU3LA%-zJyMUCA#R^N1?td}i^ zopJ7=^le-hIn>XRNYS$?o$!g}N({=D6+U=;O_DVG@T`axxj&%2!SuLtf)6-!8?l^A zwUEs8(K!fA#Rhr>MXK( z`G3XDBFblvA?~O}e)($dO4-X`8ngTGI)ctN>z|R3Y^SkPQ9m!@Wozj-J( z-}zk<SRf2aL~MseMN8ChVo3oxzP`_kE13hGKJ5Uo;DV00DmW;f!%>s05d2hICk;|(4h zpNzqszjgE8zR85zekDfGVxy0|{F4hG$#_D@8H~PA{cJGGt?W~7 zrNy9<1DW4-iT}P#I`#ciUL03F>FQvZIH9b^u~AUTXIMiz1S>`K&pEBjNh%4>9@7M< zR1oD(TuwT)9Ne$gIbBPR_;3+!zsQ~bYQTh)J22UQIGM>0THue$P-O@mmbH%D(@Cna zNj|5YvoG@yf&sLRg?B+DAba-cjal?*6wH}&_VKb6>wEyXI|d-7^1pI~AZK#zsZPhC zzKwRMp3?2p0_!dS3$9vd0KhWgOp682aYDaNCs0a@l%acVTM;fNl;|T>j_SSCsMAN% z(8zL@L}7CH8?qy`m0_~LeE)Xmo8R{Ze_(q>elf?jNbDwx)jeOnEw*XM-^F2z|Bia|OJt#p^SAm7 zHe`W5RptJ!PH8S{E06Iw*C$s0{8HOO;##Wr2?2;oupMJd6P$UtLwkPT9R--?!k(NP7@j$=J=+wb3@%{dX-39Ews(geq9s zUzByJ`U$(;{?ee-$JiB%; zHgmW^fV|vdtiJ3pzY1hcWD;Y1Wm{td{c26bXD`MHy# zh(FfLzY7d!%I&eg$8PUep|VS=FqDWOamg?dGHnHcc4%I8uV47~O2H%0x}@>5_BpZ` zDGEji#w_n&rvc6J4+uA`FogJV1d1}+=ctN-Jl{i~aE4>zBJo2)2$6Y}J}S#u<;~d| z!^D$uzR8XLBVw(Ck7}A68Vv<4-C@l5&-9C$qk4;z@6T*GCbtwRf*hW7;+5Pmze&8j zK^bHJc}j_zHJ2o??%`?o;5yl;&E=0gO{f4dQcK#XW~G@Aa@%82^bX3*({6Dop-5DZ)pyeZM@Ip>RfqZY!XI)hZW?; zd@b-z@lsjL3o8jKLrDpKER2vieeZ&~Hl#HM+iGWCUlWbey}Y z1T1=}+z5nk!x_?@-i)}1L{ng9EeY4!qI zd*9Go-jU*)f{wK|lX?fre0M;YlVcK$_;u)Gm-6)8ggQGxhWit3}j zyWwP1=EO?DhDmRfJ16mevY{Z7$4-8)=7~`3*-+cYa85h8K|B6VOzsZ~lNi)jW$Zz8 zxLD#9<}v-%eec=>qb%s(90Q~AE#${RWsR5}*bsYFGtcgDS6rC^fI z7Rq$BpKd@$Ky|#ae2Brx8ks-*6TN#T4M4v@w$BU9%i*NM>YVk(*f|7#vEjpO?iH`P zbUDF?^77+~7yu0`&JkaTcO)DIyOaLyiFD{n9Ak({Ps6eIP2C4uXVuz&P^Qb=7MHw~ zH~ZfLF3v*4N7T=I?EE53WIFErTYn$l`z$DA_d&X3iMhQsX~X@5T`^&n_*R-79?1;3 zBS6+CqCRfJ;~L2=`F|FY-jkQ$m3|YXR6h&_mW;gGp3Xh{RQQ@d`0fCIuDAi`RN0uA2Az zAC2rsRdlWrgtY>Bq?Wl;lmL&)_@I$|6qjmTAg_S`yOq0hOu}Rx?+oM?xgL0wOzN5g zwhfrVkge}p>H)02MTuARz@?5_ee>Xo8YjM57zp&@_08q!ef%ksq_6}+EKZjj!NBVi z<0Z}d2FdWMwV;*e%_rvKc3M_H--u@e0ov^_S;w`3RoUWwW+ftY(`EyN^<^+x-9jWWJ!cU zbh!{F)<&UwJq^fF1^?G=u(8E^;d{Cke93L_HZnZY-My$T{z^7W#a{+x z@3Y@7wIJ1tJdzOx)sDYncNR%n7K%9%5~pn3hU}L=6w)E@q#R*4IG3@=uTd?VW!>zJ z@mB(^(8OUEq00{rGV*gbcB>kWuhtIPR<%srsj3_bFQnKUd_>T9I)S}c1;}E6Q=%NJ z!q%=if~X{3fn4Ju#9Ue|&k!aDen}kMp!gA?O;noN_u8w;YtBWmP+sY@OIYj6%auBD z=x}$!dzIZU8GmJd@6hhw_Yq@F7Hs&VMu+4!PU_8l_@b=f(iHi9TqLg=uQHTMnGcsp z*r~FMv`DpbNsfojKLx--sV>_H%>dYdO3D=h3p`=|w=n1-Vw;Iecd$!T6P&$H!~7RD@?oJOsd~7Uo}T(-FL~Nz%j`WqFCuOYgey z@{(@k#W=Hc?lti(yR#%2Bu472J_(VGNIwsBdLyg7(MeW<_sX0#Q6rH;yB%i}Zz!+# zR|MTnp^Ptz-To6UF`7b_i;+V6H6@6|lwPtv<0hcii~R$b3W2wtsEe#@d*X*lPQN7; zQ==^|4p#r9i--f5Lb6zOFQCNMmnts|y8p{c4-Bq{SYw~@V2uRyf2O#0m`Usl1HPA1 zD#TF5Csqfc@Dz%j0r}#n$ys^8TR#KBtDK32XDUgrP;&Zi{+!_fa=-6?Wyb~vCuMBp ze%;J88}{1sjye>ze8Q=5H;J1I^?l5I*{tV;9*Dk!PGH!jPg>Y$0Q9I%c$#i;&TL~} z8e)5x;FT#7>$@i=O?Ym%jcH>%@UoVB>gfUEeERpCd`3bKV9>T&Z{M7Oiu!GK3N*#l z`PwL6*bXE-{t0&rV&Q{Jy$^4qrJcW@?@J|^Z#A#X3w{f+ZoQi{ehU}Dt@GH`SsTsb@kuaN4@;VYep_SKJ{08b7F3XC!cy8yv~ z8ESH~F0eJe?2SQeX>KoydKt^O=;SGH@KqjzdItuuqWXpXLY=ComV6}1lRS$_wq*B{ z9{9T6U+nmQAUSY+lGECiY zLWFw<0jg5BJl?GJOsC6p?{6l&5KcBm2}4P#1C{0LRi_SNGbZ3LVa6&sdfj@VIKUds z%-NTy+2yBzx8TQH;*oQrMNgz}>M&kxW>8Xua)R9I6JIr@fj@;Ae1@)a=prxx4<~>! z?fvR>PiFf_h;@s#8yFoo(|vKQ$sqTsa#fNl-$aj0jmkbsZyELVjVd8U*C7fJe2Hrr z3*JWo_fgD?s4-nNF<8>}YcXNUqx5^N?c3$P;*`Rpq#iUk+u&&=)&g#)OQLKKy9RPE zQ&!!j`QYNMbs~Xv=h|;!5?=tmqR3EG@t0=kjocsJD5pgF3l@TiK|23{bALmGw!@k} z!M643xPhaaH!_8oq8!J@t-B3yZkVWXD`P;`ya?Km)<~VIs9e@ej0OfUG={_|2}AEB zBJM)!8-R^a3<5l0%8Da^eigR@o>3_X`yx;J%Wq~F^X@1H?sF|kZWQL0=hOYYVo17M zji=u%GO*rY@F3b(YU{O74D8Z~+*4Z3ljakoH^fZSMO*SnVk!RVN%{{%cmY6aMZ}pV zrg`kIQMc32IEuN{!>ZFb;!el@4yCKZuxL2RgsxsbMbl}Gub|aD;l=98^|;(rcVfAXP<#2u#)h(Yph^*t!cf76JV$44L{zlP8Zz zr28c1d<@L!jc9H`db^R1w%O#*w!(lMmp?wNFhvC+ z8pY_{0_QXHN1eI!XLdE=Fx{8iuvAI?<({m1J58ckt49S9JKt3qseWlebY~yl`GXSq zohbP|8(3?LwHM>?Itn}e+IDk;&{VupfP5xccg8a1|4SN_!y(7)&h-K>W85w=xr0sr z1QXCvfF`~JOA#nk;y^ZF-G!V*eq|Po(gl|~JXsbhX#A+KproUA8En|T?&$)DI}AyH z_;qp8pohcU3X-#Ek2~Nz{tCB?PUh&k)t90zLK#h{@kCUnQu+l`&JLLGPj0H3zlXON z^iaI(-=tE+dQnj-Ctuvp!n~I$rC;veWKG85+yU9YR9O}!lX-$8r^)M_o!H1aFCNv; z>hKi!^RuMvPq_&@rpn~x@%6)Tqf@Erq?$9WXFX^h#n06MPg_{3H@yFIdEhQB%l?5J zCuqv;P~dP}8`n3|=zcVDTA<1xVK-!w#%>iKiDP8%zg4E)LfWDnPPKYd6-Cs$F z_-+V)aAfXvji|qC`Q&Y)UUUr4e2w}#ZC)aZE_C?chM?u87?geHqPf3=jMyGY2~V*|!(?|bdg#GrSBH}(r&z0Yud@Mueo@&M@((y0c_Ip1O?epgQZI$V|KKcbtN&GQ6~)-A z!GP?ec&((DVy>C9m-mt#{`!_W(%GPdm7Sch58cix}h^}8;A`R6>ZbMATG%VW7Cz;Y8S z4GiHUMLLTsJwWN?DZ$wHdFa&!iEaymHA!+cp~+kG29N-(3m$$ncy8{){9qe|8GzT* zS`?tYG{s+}>k`iyvU*E4?;%?;!0Gp85}(u>_PV6ULh-tKM+}pEx}2m0Wv!_EBFA*V z<22HTdyv8eDdqW`gFVxDy+G=}#76RoUn;rHSZy^Ap7=V{RCR{HY3Rw`U|=1N*FxTM ze%?_3+MM+2Cnp&9R`d#<_+AA64uXkoCuqjz{|Pg*=SpEK+;+2Q+bSFNH=g9iI{%dd7N&PZmp= z!2XQ#HT+q1u^r4kWqJ?HCoqH8y54qTgDwqyZ>m?_J&cHTNBZws)AE%^Bfq8_>D^c1 zd4dWBFx??#ITvD3bd%XH2}JIbsJyDIBM0SywTeH)v_P(69u(>h_Nn11AIlrmyQe9s#Kj}s8{r6_t#Bbunab^y4?tZ5EM5a`n5T)jst{B+#I9F&#>MMGd zH~92QTVY$3YHOi8U@*p)#f5!+G)DnNC`Ru9(`b|d|RNTan5&wM`w?q}1!+S)T?Wrvyb$4d2l z=b6K3FiJK7xFwa?>2c(JN@w$%hcbHG)`Ec;G!a#J8E%XPGPoiw1AqjgOP{oLHA3@n zsr*AeZ#$H=3L$}b=>if}obB@@!9>86!;@i$H&aa(T&D07IC%Uif8TEVkq6=gcFdB3 zZb3#}BGco}nX1Ql?7DdYoVOX#UWq&qXS7~=dr=olIDm? z+TQ@-)geX9s6swgye6kqYDVDg4Q_J+M+S%y(|pV#6E_O76rI z^PcL$`b#;i+I%aW*|%5l&e{s^4s13>T@{o0H-$a8Z?!Y(wzzhb{ua$EihIs2uy`fe z=tri}gbGtT7_zsdY&G=RjHbb(Y_dX|#mGfebgVtFic%O$RTc&Nq=?r#o+}A#cZ;i8Pq|X?#$-2rK)^9y(%+f&pDqT^BQE?OilHp{6 z;KL)Wq^GR3zYbGyS}QE|g1N6?{oC!$)D@6cT~k^C_I}Bp#7*!-=2_9>?pSnq94b*D z-aDf2bg+G@Iu7TI?6bx=b}k>@sYF%!B@okPt5DYe!lUyzxrpd@0qLLZlagQ}goe5V z_pQa;W!WS%6E<41ZNKT^>vhA(z*QC+QFJum@a&=bV*k?>oR2Ky#NloTWDtJ-$gHIdQBP_o_!E^}$S_K)&K zu}|(ES(_7X6ax+NP||Uzb6+e7UJp@28S3cQo8LKH44LUYUYAR_2|c$_ox#uAUE+35 zevN;a6Eay5cK55p)Bdy~x9`)Wj4u%vfQP>45IfKvdD!aE6n11RGcbiTxTQD2JC|yZ zIifwFq|JXQZT|#S-!3xjAX2yibL0L; z)8{xv@cez5Uz2SuLR1@BT5VYP$bpc&xXc#ips}*lD;nMwq9omQ=(9q@=Y}hlzP3uhxji6j% zDW9C`6ly&qxM+)>RT#4f$E5i(JkKBFxEg7Dy;I&YRW=_@wlQot|M8vpf?89;y`Kjo zbuV3k(fmc#kn=Ji-R}~Rw@q6dVE8o1Z=sT*tt{wPHB=Ao)#|&(Oo)f_Y!Jd` z4;sn3o_?I?EjlCf`ejNpmnmMN;F1!8X=6$0Rq_$d!y$q@${Q@q8qZZ%1W4Zr+B+`u z^e^+w47T^{&VhQ8ycNL^uy<^rdOT)Qzi*L~I$+W!zC4cX=;UcBxKm)u6tmc{i(Y&O zp5h~(Dq^%KMkW+;rHRg|`)3tXV-t$`qpUS0jjy^1=BXnd?{j>TE56!btbS75g2+sY zGu!p+3n;WXrn%LVxzp`Wmf?7p6!*1KRa!lU*>+c%Qzr8&fkqh)M@2r1K6|>|ElWR#s%7{3$D~r}5}_^E=NlcggZ}qMaortzLJN~aB#We5Es#pLxF_60I_@-EfCwrYci92UPHnCGL2M=3!*16^(SoZlL-s+QH6#&fKJ zyMN9|cE@`>f)X1Y=$AAgG37Nim*9|%ICLJUof2&SD3J9!V_v^J7 zbDb2=0^$YrS{~ysA-*!jKm6nN)>bG?=in!#T5yY72y*D2wR#RWG>eWpXe)nUhTdV{ zL`7LNVs%>_+I4X}N78miL`EqyBTU(i$i&8g|2=oh9+|5YcqBhLbw^#SGM=c<8R1mN z6OYCdRc;JJR`tphyLGC;Deb5#2$&*IDzVLmxd_j9mH2>%z%O~c1zX_yzsJn?X6(o$ zCYG{=jauqD7*>%5J^slKEm*%=`9%vud(%#CKF8*STv>i0cjkI zqMP3k%zI3jI%fV5#ld_%JPZ*Z6$oO8h-q@b4x z;#xiWSXTBKx7NsmtGY6V68j;x$<=gfW$Ul5Mhp1=9VU4Wcy^mV&3yNpMHGFcUk^xDmMs_v~dS|${ zRfAu9J1FDIaY4F!r^>kO_t}B+KXZJVcb;-fb#XkS?89P$#@EJfU$Y6U%2#^hAN)H1 zvI%>!wxLJ`1P-mvJ@j}C5tDgMa+oW*w193ic6b2ZZ>y#byHzcU$Ie~*aPp=H{4s&# z>2JV6QIE*wY5_x$=&(ws(I#Rws&HIk%pe}%^mr9oA9KbKW1E{|{-3M$>rjDG2)F0@ zxKy6@=r^4b-yfxsyv|%Uc9)P{DV!(G7AKwzK5O~5>b*j5!FvT=AH<_?a+8MQay04< z4fZY8>U04IjA+RB+gd&*5f=2bWq?B{eXlFw|D`(TV93)?nfvk=EnT?Xjx7Ceny>L| z89b1gdxDINbwfl;sO)wkJ`X}!Z*I#BI>^hD_M41edb!k z*V!szgh=A_MWOwex69Qlk%r^ImPJO>2r@iz4heg z!(VQ{6QV7ySs)O=emA^~%FGwBd^bRWXof6(uG{awrSb8viQdSg(Y@vWJ`-B;t2@rD zpFSKE2C`=dn6Vd3E^eKF4BKYn;CyW)A?$z~^5vZ;}4n_=uchvet~>kV_hzbpt#5rM`n>?n5Sbh34czmWNw${I@Js^Q-?I4uB z@UqJ;yguboFX?t-2%!VtMZ`lNhhJ{CD8g;ZMRP8wHIbck zZFZo-s4AUR=V)`+u2ZTToS7U<(7swbU&p@k5%D&1oNsMH|$9*gLyd` z)19C%ZS8!tY-Z(Ohv#({XL!d)gctuS`JuXyYR){IbglTHn6mIo)R2t^a!rfV!d3Q8 z_>eEFadI5<+dX*%ad`BND+V<0%;1P`ED86O5kEo$0FQ`|IlwLN3?MKOczCmm!uRZ- zLhb_#;!SW1$5`13Etj){!0oqq*KH`}EqMk9-`7V0$;`VA9zh2x*adF?)qXT>wo;Ff z*bx9d6(LWc=;ohCo2hPg;s5^VDExv2*Hnz}|An(g<09I;ZaC>Wg zv$k);YCmzBe`J1CLliH6`Q-(U>rqW5)>IY<27>YL(AH-K7cZhzF{^Rzdrltn#7sun za?*-v&d}D1wF6*rpU}rwdJjQYK7$VsmThQJLNs5nporUkb4SL%B?ReQs+(;1@Sv5k zU^QHwl*qjL7ry;@ZpG5)=nCBfxuf-&f5hVvp7#-60AQBShr8seB^uhqH@%OT56v zi#Np6!7P-cwGyI`L485jL%vb)j28e7?IA>aYNViY`KCBK65Msx zG6zpzKnk9~5CW|J)d3fft4kkWoDDY)%-RJF!{7S&X%@BGMXgT&JQQ;UUrCYplL;ye$HEXnDFGh8OH}CEK(ANOJncBkHZNj5nLap12etyaSg1IkS5Z@edOyHD8vjzXDD%`c-K*W5A zTwuMLPVD0*CW#TKncKnyl;dgrc@5sfZvTka*EN2>;Xq_gHT(-6X?Smz88f^yAT!e> zeVCliESQR?YU@Orv0pDcAPBC^a$BTh1b%U{U@iH_9Pv^lwr2;QzqI7Bu;w;^j|i91 zgQ*Tr$_o0vI5>-bSWDLZ-D1S(#j!8##CE39C3doxA~{lOI*B14Pvqnc)jOE9eTULK z+tAkqEWDwX9&u}g=MMiacq|L(wjc`GxC0fUpdF7YhJYaG@uu*!e>hFFLwww(}c zmH;#U`vy3h=iZ4V4vFO^0j|PBhq`i1g!@=Yne~@@vBPY4Aa?X#a1&JS0QHuy(BRO9eAM#w_Yb796If7%v6{j)IoWvD$N6~8jin;r4 zm!kaOM~;^6YX!S-;EDSk%s0b$KH{qZ{!d`(w};h0`e_u}Mtwr1hadw;On3GDxQYia ziYTnXT**)jIli3hxYM%*>{~lsQ70G%IPdF4<=C4$efRUS_1so9(_xZ z=kYe5u@Lu=*b_t{mgr7|USmBs=lLS4^dBjI~unLX~y;lrok5Jl)U5Y^zb^oExVzxLFAS-2A9bcw2`NX%zl5y>&pIl zkocK76Vt(*izS<;!2vCi)9TPSyC)OwkYP4cr(^ej_17hZY6YD6p|k{asDdR$F&;QC zYSF^m`z6&DME(}>ydY(S(8mz!2^rix*%Nw#%A&yQ1A&fDFTS&JOe9`j*Eo>-Y+-Gy z^B8b4u5jdH-){f7>HKZ{`7!OiiQa7Uy*o*}k@+8b0xW{*4!+48%Jj;44QIAjUogU-p8nCb0(ZI zUxMzPo0b&Z58gIQ!qxgnV3~a@WlRRqsrLo#rM&p}ZF0ERnf5^>F^e`c87^PMaIsmS zN?l)`e8D!=w-ve@u0VLWYv(7;<%?XsQ$b3HXI}2r5B$;41~1dhf9*=`fzPQ0agc9Y z-35mj`sBo+{sCEQhN^>0<>313`B4@(sVw8!n&sP+Nx5Yit|}AmKSKElfug?Kwdp5f zQ#m-B@q@pfA*lhpLr>xX76Glj=f#OJz21V0M)P+vek~ksKSp#1s(4Q0DYf4CYL4S; z7{a+k)+a#Ip!7>pOQ6t)U2TdW2Y=d&ug!@GioCZ za6$8!=Ww=ee-7OSFg87|4{Dpb6_;BQMJy~gD*v7L0llGw%;_|5HmF!f~HG`chKm!d| z{t?8xHK3v?htQy^taRAt2;k7Y7$PtQi44Gw-IIRXy$|rCn}H9Muycyx-St#%yvayC;*{}yJVby zn(@bi)HD{=*t!13KxS#buAVzkeBns*al1hFjSYa_5%X2VUPs9KN(wb2px4?tmAv-|u z#2PjYLDYe7-o3q|ds0GmjnrQZ7hGZS-rvL{L!yc!@6{(?`PP8Py25ohHtX0f@3U2S z$VxShJlt${P-N`OuguR>WpkG?vun6lz*q#rPmP$$ibm0UGLiq*c zxlK5*Q`l4a*p?HK8N;c73Xhjy*tl-3L#kmQPaYusmSMd{dNRNv{Z)Nt2hi^0pJ}3= zHho;r_r_ zo>;f0EePL!&i{;WM^_ZQokCOw287-$J77v}rtrD_3@g1gY-z35%AmLe|N9GPIraD^ z{{sBhovyo@WK|sdN9U`9 z`uEn^3j{|LbkvAFG+-^%FQ z;M|Il0#!i#f%Xw?6GW(;1UPb|q{-h9abxfa?}z@HLxH)j8q2TV5sjYl;b<=5|RebB3HQy*Ux${twai^?QDh0GDxf9$@d)I4Vx#%+`JpEb#PaH4GPnz z+&ICp(Mk~2-#FoVyQ9kzm>O84x>Z9|3}k8p;V}`dxJ-n66Fe#09YU}m;CimrvJ z1fXQUl4`x?F~3@#ii1T0f%wG(4EW7>TF#DEMZiWn0+;y|GA(0=qapiw25-)WP#ae8 zX4)83a~J&s5!$(_686E{4P~5$Aedz!bN)LToZi@U+4p4wi&%uY+9SV*e9o#ux0lo5O z-F>(2^PIk|G@^z6WncksV|X-g5a2o~aJ&i|;uI?6Q*K4c5+q9J?gOU~#`gtQ88^of z55L|UcI!tEU1;xuZYBa1k;J%T08%JSjtG1YXgUn)|EEbv7Y9N~)}&U_s-r%KXafCU z$Gu=M=St%d{u}Mz{*tK)WWbu5R*T^gK2=)s7pRd-B3`fPeEv-o(Z?HzKhs`f<&eeF ziLo*td#4jD&`DI{uxWFjgndl;aM9%Z-}8gh_&8Lk29o#*76Ep)3vc2LqzI2-ryBhI zX1pak=ovhd3~k1RPGYaZ_7g6-8IQhwWePMeJ5-eo<~Ms->%(%7>*T{?#M#?M(Bfor zbZ7*R*oSU=4iq(xGUnqhPG|0(;;#(h80n3g+l+PYyjk4*lCE)U`JCWz^%9t?LwWPo zwigHmkd&`Zz&RiAa6(0_Sq=I3h0ZoOxu3qR8#Z+A2T#n_BK5BG;eAS%UYPb@>~z?x zA=nOK#h{O6EMv~|!(%R>q9csLQU-mM9tC#YM>`sU9G=z9y$EU!zJ5SU&P(z3v0Pc9 zlV1gWyPtrZZ6Dk3FyupVfONVd8l*E+ZcKEfZ7-;0== z^_=I%X}po|R@e>IBn0K>XlN%B85ztZRR%VgvzBBhRnMh#Q};D(rGL(kW24he-$qIP z%JpU?x&Au0CKdk}&-)GWi*IPq6?)pdqqCoVOeMmc&T~`}=@u$86o<%_9{QI4x{~?< zox9*l{w=8Lg535>q57eW74(s6813QZBz1&u38mNj5D%Slm}|K@1-^_!F{Lx!L^PD` zIHDGJT(Gh49u9=HSzgzU8)BKk9Ber2%=lb$RpM#}EL4)h(j`m-vo;7Ps8 z;#*?N{@)7^Pp^|Nk!HSB?x1$m|1^~VSw9CaY2F7^BJ7aH`KfNBr@f%yc8?p$3ShUIv*j-}TZ4%e7ou2N*jksz zfQMrvsl^{&Ik`(-36jNOx+_F1DQ`y>bViO5vBFAWr9A(|c!Xu#i5M_Ird-lkeQ_J~ zHgr_WUHE1W822GLJTZ+w*Ve9cWp8JPKWee7t81~ls3>?#qSx-_*cqq)M9|Ou)|k=) zJ71O0|Jbu{+sHnSc{ss5+&3HBAKA>wNb$XM_#y#d?EX_09+*o9azEiYztyZCVs7j! z>(wY?ByZW(or0H}D;Hjnaq)PMynt2~#S_cw+TD5%l-h!vIO1$g(U!3|{kCNH%VRzF z5-drrq8V|3p#pNI+SeZnbeCXivr&~If3B}R=74&g_YTwm|ebqVnc&+!iE`J*6u0-2Pv#y zx*aSW_uldX)rLcP6Z{WBv}c5c)TPv}e7M?GZ^{YILtEQZPYPKDXI28Xl$D^rplWL| z{z5fO6obtaBg~h1Ps=VEV7f_*Uuxat1Ia>9p`gWA_?Wd)++Kki^%lfj#6N^Z{aJl* zwedO4h(p7MxoqgMacf?cATtOJESk#kn#dXEr|F9cBilpdc-jrH-0g!{-gX8F*&aio zI!PDjywf_f>+75a6# zmz$}+QzhEJmS==1jw8O#OuS*(wnumydSs1InKNY4zX- zZmGP?HU7lOro}_7VI{WgZzki#hR`W^DfWA$7?}O8WB7j}VWEZE@*ES9%un2DaJY<4 z*p;!;?O|0U1*QGR_QkfGCej*s2*ywVtLH>bfH3stUOH*I^%V z=$c($!!DG=u@~4`KOqN-99&@qIV(vDomR10T4IU><9a(w0Plf$)_*NTM$)eBjhRBmUNsk<%z^Fi;8! zxp*_wCZ#GY?-R}(9$QBP+ww>`YcSUW*i>>P)uVSY^zXN3&0_%1hJCT~osWJ}zKbH> zyQGMJaQ`@R?=DR{?CwBU8cX*IEI5qMUMchpx?rl0ksQXWKxy=}MpQq;Y-6KkZ#NRy z3m1zqo6iJ!T-)D5kX@77PvqAGoU3L(lYb8_;)T|*fqisR)e+EeN9^TidXO#qxev>= zj3JOi-_C9dK=q@dl~9X{7_`lt>Jq0eW?Z)}%bP3;UFNA0Uq?+SztQ5Z!tFPUmsOACJDZSIFH;!80%2u~zr@9+WH&bgW2O zB-Pe9wqM-f8Cvf({o1{Bb-jS)|8bJPz^ZzZS|PIxD%s) z?v|gMp-MFdps1d3VswO_Lj2%bu&vwCkORm*z4p7^BK`1619xsMFvU|i8GTTSa!8VH zF|f086v=^~S6%Ia41_8p6$3U3!1%C$uLH-w7B3C#59C{foQH4w`JVJC-5;eCnB?|w z4*A;3`WP|$R>p^{a=)+F4wfwAr+u*;$ND8F&8QP&qRlDshmKFTo_YJ^u};3YXqT8s z@}~<|-F9j*G|NVR0vFNASX-*+J>cWWO$X4cw41YgUvEYr;IFr}5W{!|BU^r}!X;)= z1R#=)MLyy20%a5Fz*U$WO^0>xl)8@1a?(tw%-ESTT&69d+=ME&gT)-F3|c=&%2V9{ z9YsjlY`Pqv$-cLRxBjF{Q#sEJ(rzk;PSCV`HpEOKy#@lb*wWSoLhZZhoo%};9PQB~HE~8e?nh0T`Rf5`uT#{cTcFD;AcCz>1i=5TAv~`M-k1MFe0P@L~00a>ROePSUZJY4)3N_F(X8hq3nhGG`s)XYm;3`9J< zp8Jv&Lx+;LOG`eHhDy+(X){5OUAU$yNrHbjAr~!%z#yEUGZCkP>*1&(DCC07I8JdZ zR0ePMz;XGOTTt;;c&j~PP2zRV>Ig{K|AxeHg{}3=F;BA2Tw!2Um4WGQ1!Sdl?MPD(uD)~iY@d2&um&glTI$L*NJYyO!tQ(mB-|4o9QJ0edLgv34fB}KBQT(Hk=X31G zJbrEPEb-JuZUU;i76^xc6p13B)JcBo2JxXb-*VAg!h1C$K+GvbPa89~UK<|4`Vl>=J0$}2xP#KW1x zrgqG$TV(EGm=ZC*rZ&48ciA6yWb}$2&iSj(%Ch>OE(^ zZcc`t?KtK3APTz-87(n*ycu>{lRP_!qp1Mau`=h^wiOn(^H)PG$YM)ET!RNEc>~W2 z6^kMH%PL9`oQTW&tVW2(0q=peX!VQYhZ(??vC8o}arQalQb(-N67eCAAnS+Z_lpQ3 zmk@C89U@Qx1n|ekfn&e=sH7JZuN99oVFP#Twt#+Ac0XjsJWITZk^R=ue3$$0!;0A{P^f)xFu@ZA;o zOb+AD2dapP$|CIzWh9XtfAVs7?_;R<2W#K>iiJwnJ`9PAa&~yIMh1s_d0}`*zUoiW z)hz_DdbBG5xvECN|4Sy$m_H~o`+0@Om?8u@g1Iz8E4yOSoF6Ip{5(P#Ad=#gSB1j7-%P4Z6U@H{N?#&Q_-3o!wz9&eU9e9FnS6VnZFW9>)1BES% zR@0`>);WeMw=EPTea{c9*nFeT2~;-4k`~&GAW$iwK?#FH@MsY( z5`g8p0|aiu8BhWL=Yc^hTk#nSKKi{08Bv14z-m1(eQGy66$ z+YjV=ib70!fC2+1qvJ$Y%K8(q!`~xD8L+1g-2Zo_g`cp0JX0!e8O-$ zLpMU4CJ_TVlkbV{BNnQynQ}0|A`;&nB4FC0%5cE}48{XBiZoRK_a1;l4ONQ(W1WfF z)P2a-t7d&Xs!m|8B|&$!cIY_Kj{@!y3Y%M~l%181^#CZ_*c-hv@zAMGcH3qufZJt@ zm)BmN#>&E&OU#&CLRE_SbjZK|KDoSjl>VBadU=ei)Dft#r=vi{n)ujBX$y{H8=jkr-vg{fWwt& z11De~4ng;XPb6H@%Kp?C4U`(~ym`tZ_2Jn4&sg0{4pHsUnATA-k&!J%_UW~fia#r- zN#yh;1FNY8Nf%PAOIn}+o1J9JMGV^1Tjoa{S>^RT3+x%VCSAQX@KLU_$`OKM3AQ6WF$`&|_xR{i`{ykx=^QMkCdtAQ` z1a3nwz6HIx3VX7h!eEl;GVC<<(R7O!VGj@mBA%>~jc3A&ufTUfpos3pWb~pFV8~b? zonFw)7HPMaDTlgp*l)`qo)e}QpE6}0ZC3x8S-HX(6^MABSmojQs_8q$;4C2PkT=3D zS#sCKj50l(@mxkN3P?P9^4Br{^-EZ{XlIJn7(V?hF@4W98XkR_e6Ze67Z)iqe?r+? z9DY+6c_S2C)ZnYup&ZH$;Baum^~jh-{N{<8K~MIZe0=CeTv_-5wuu76>eSM!BTkLj ztB zLL{Capp@Z3N(Ybt*)PCBQC(R8(1cf4Lxj?K$=le}7$vs!&~{4`-YShnO8i2;kq8|f z&zvA(ydOU-ku= z@!Ve!_k6Pmi4aCj@EL1zg5=f8zDaYKY=S80VEfZu4|51;$iChZ{S{a0pu7rJ9q_!tWz98qG2HPOVJa^89w$6BnmpRsilP8rKQH|o+UWkb zR^5NEsk}e_+yPqEhGERZ;4416v)+`Ibx;VU-5e%eZcy>N7e#kuX`Q`yO^gnx~{J=B3hXXZam$e@XYu<12_ zJ^ki&$oNL^s+rV_e? zxN>Q)=UwJU9)L6xrvVb0Q_czi5f>{ZB-MI(Z()i2;3u{WKwSRHLv(m#bb+(tQkBj9 ziKCwrB+9P=IAo5Q3?UKU{G9VC<8N|}aR4uCCM;6~Wc#vZ#HR6wcH<BJ?IQHK= z z620~i^HrAe^1HqD0mN=q+^RHBy$}yEXz(Ju84YB>TuK9BPf9n_G3Y@AXHgwrWT8)3 zQCuwuSUYTgqMp_DlmJS3e}Br9&ie+lPNR;jsFK%sCg2O2Z29lpJ(&v;{fi#_wZBCP zr|=RG;TEa6|LCLH-v2*!%hT9zV0|M2RvL>ox0bPPhYrptUd^2>gxL5|4d z%?h5NVj>4Esn@;-#C8{)KfD*@P3DO$@}Zn1h#_2s~1hVc-R*JK5!H5S$FG1 z2w7l=+{UL6FHUDA(LqFkWMjFn+{DKRfMOk@W*{)DVxG&x35;v;A5-RKz+8bw*fx=@ zvGRU|W!l;$0USyGfgxPP(AgR%CsMOv=l|A8H`qo_kwP1Lj_5F6C2+|D>Y_KYSbdN2 z&irVEsCn|ioxw+&71gVQio2VrI;mqTwP=UFJ&`fU6=Lq_JTV5UmJ&h|C!V^zrK-t$ z8kj0IlHv}7(@pC#q-^>f@%58PQ)sAbKsgKr+)A602RyH6bP#Wgp5q3d2@WN4$`~&J zzzW<>Mx&Qi>zZU9vhNEdP=uat2q@FERGaax5=TyPiis=ES}^n=6$5-#+3bYWIBObI zSdrq9ia@0b``1Q@YQ+h8z4P(pnd5k@H)~%{h#EavbSFVF`FK=Rw&Pf_p`+K;v9}yS z#Rv9iCo<|UJr@LoEmr*qz{99ZOJ2LiIu!kau;Gy&(omQ-7YEUOF-a{*z%#3=S35?t z_Pw^|NZuzEWr2wy9v;9NDFMuC1wX0l7@1q-=-%v}2I#Z*S>B{z?Zfl9yahLI_-VXhZ03vDz};2hr}Jma6Emw3`XfIFK@yeuX5Ic_@QAA}@QU;gc_OU2H8jS^UC&dA#YUh%TfqMAP{(V zPH$k=k8)At3Lk*tJ-m+O7ude(zh+V*tO(Yxs$Ki4&>dhbfuD zW4CwN!kk!~INr{-0|Kvr02X_H2qN#vUnB#2;G$bwE>*BYlW9~rKj5jz&O}qq=t*b{ zn#ql#hmIM=@AO~#k`D!W?vNkT2j|ze?+#Eg5kApW|5qLXuU7Hml`i`VL1dy_5|o!U zc%gwn*w{(NX??gmXFkN^iqdtjdcz^Cq0 zTYfm)Z<#~e>@SfVm8k1HYr7o(QsfhSH^RY)S+4Jw!*U6OcU3(tX=I)r2eUIu2I$Qj z@d}N0{S&G1ti=lXMH$;!lI>?f9?&2^y6llU^il6cV}q+H>gh^+0gKXy3lFDB!}B-C5%E(z$Cj{G~$Pl&ffXaE$v1m%Pge@XH`qj;CId z7;ZnQDLv0<;i(G|uYFKae$L}i!AYd7>lOHy2Ay3K1Gvx&n<1Ppohu(i7y|v zd&Eiy)2LjP^Qw1~1IB6`dTczi7x7w+%o8;py6}Jhpl2*Ci;YF91{>RY!t(&z1^suE zT@f=BC;w4B5q~;Qmqvcjm1LB0d09^6eaTT&=qq>XbTPHvpDNR{ZNKPdJ|x4-KN=lX z-5+)D62%-i5;A$G$XHji2ore%jqdtOK3}~10hj^ti>|u8*`>)jGZ|NHLpP>2Z5XeKyEyIH1^tYp;JPYWn$1n z6P38|4Gj543EVM$|JGfZ@<^lcZO4rOoaP6Gv--+KDVzMCob{D!6WzFYM+s43{ZZ#V zDes%`N4#ez76vl%EmxUyzY|0**wQLuiRZd+83K`VBZ&2}HI``+qE(+j+fl_bVjjbi zKUwB^gwc~y_0Px3&ZzXN>;*eb0j4ae+HKYA8g8+qIm|+am<`^nza~%@AP@%IEzs=r1dR0s3F9pq`BO>goODmbRk2W7l2 zInE3GXdoab%4oQ;t9sw>W>Acz0|gP7%qIN5U)P3uUmlH*FlbB$f!uKN^&^sJ_8N)s zdhX4QdkdPH?EUo?{HixTC{Hm~KxIs4|5UH5S|oFl?D_VMgZJQq%llWcUGZ#i36gMA!RHbky&Bj2v<}o^#3@z@_4A; z?|<%{#n{Kbg^@%hltPIasZ=T|NhKqx6rqg@GnecX?OHxesq~?;CHkahh$xD7vdo~O zY-1nH^1I*P>*Y0ndGXx2&-0vf-pe@*-cg%yGI4VpQjQ4TC465pBe!a5MQmTMax2|o z^1uV`o;OPCKSOSXrUhp+Va^BQl}2WC;X`yFwSKhr*gtj{p)2VIUT-d#ED^KkUqk-g zBYU_K$S#^}C2=$f{11KlPh5CVhVS^E8QZ?@$e_uBW!>chdk|h0=Izyz9{FeX2jK+) zc2quQtc7m(td*vnGjb$u~hDG4UQq10JsGTYVNR-iE0dG6J=y<-w_k2@DIeggmfQufu{ zvxe}{2>tI`?-0{V5s_jE2T_;JHu(B@!NYgoe6IN&lx4H}Sa3!0;f zHTxQR@LGhK({3B0+Wz?mf?EN25{R*j9EW4kxVj5j@o124l~zt@{xANw_xJK@a@ zFGU2C^6ZYgd<1kl0Ud2PvAt1us^G%!>R7fv?@g)tu?^xEkH0B@7ybzC>nBAjIv}8h zhNJ!y`!JyCsH%aOd?$lC`iVPEY~Bo_va9acfM+&GPPcx3wao@Un*K zm(`~ev3aK!sIMA*OHF2B2M(U2BJaDHMJvRnZs>tYHhLM-01iJy=gi2z#}>8PRsR0a z&|xUtKg~SzZEx4NSI4`pH^*g_;kE7g^_eeY?RY;<8qaFg$aIpGDAUg{qB^@>-lpL? z@#vz(py!67quu*|)jcs8owc&OJNAB0gdH`}p=s@KXW*S>%;d?UImu!Z;Hf3cMxv9o zephQw2m08G{%M^fsjBz)&e8S95sV_jF8q`CYV$N-1%wYhH1JD%Dz{>=Dm$3|vL_ z4~?!uR)6PiIcUeQ014I8fOcAu{Pm{d;6Bc`e9VAN7oP2p``&rJ<9+Nx6HZrCK%XRM zc-^_H2!ni-G;qkhC$d!$Cs0^+8zFo&C~GN8oPE?{HrN2E+QR^9f3I5E1*cy>)hSMmH z#OI0*34*!RB0im|R5}dMGRr(S%GPG^j)VKdl>1DMY z=z5 z?MuV)(pzt3O$hyb`>(CzBbL^{laG#8qCdd(J=kjpV5|zF&fb=Xr3xTciIH&$5KW-^ zsI|$C+eokLBv#$l)%X%`98Ef{BHEOW9DIm2i|AfdY5kL^y68~af0bllPfh=$z_o8> z-H=S~7yh-R#K_hws6^PbVml{Q9P`l7 zI@XQnC!}T(y}wV~&ABLYjYjWCD0eePkP?}yP^t+63c#$`t=WEs_I>s;R~mTkVf-1% zowg3sX5Xe}4H8C-@7AY{a5wA995E(+f4*)w-6iK=1uPi5MSyNbhDILuvpZ`+A#hB^ z->Qd&L0j-0cn%9NgK+%|v?_vyP@9}jjy<7MLqAZwx5$XvcSIQ|MVptXh*ajIEH%Ks zVn>6_n@?;)AU)>e82!!EL4c2LBYC{F$F_|>Mi|6-9WjfHTCe1IAsK99^$=}aymqXA> zYJ~#f)UBMZCS;F3r@NqF~@DtP0&S_hoP0ThIOZXl;V0sI9blqWrnv_WA2pOkb8lm#tc(g)URbo)@>f0AdJrPiyk7 zOC}v^qy{xIZno++napWi7I{a;_6g7(&;b8NY16@(e@Wpj@kb>!H6xr7CUR>sZE?a zks>=?)A~MkYaK0fQXtA8Kt)lc^g01UCZ#NEgtzZ_X3Ux8@140R$47MTpm2eFFhD#y z5PM#YiufBZgcS(EIGWc8)Wwg{pDtk;JuWe*)!*0+!W`?kx7<~K57eHqkkD0% znAyaZ>u$*8SUb3m@y)3R(=#e+Rf*1y=VT)Q)#AwcHO$cQ*8@2}kawNz6*BgmUugCc zgr_zkldBg`(eKJUW@UN9nTW8S2sg5(X;j|Y1>kE(g(nlA*qp$GpV`N+eYzto2$h6# z35=IGC!{9D4>tk!<$-`auL2=YfJ=;x=OjOTuio6sCvzta$o=HlH)i{bo2ON-_7Hs_E7taO(r8sG8VVH8uOe`Z2LIkKmrn4v^+B*f9j){+OTajL=Mpg9JCC-VT3Z) z5MXLg8<##Lk@ij-d2gC53X`uFBj~Gnvt^_7H;1TEhoud8v%33lBvgdNo98d z`zyZwQkv_-z4qtw!?eZN@d4}oouh92wnK_gF(n(ePLd}E$>@zT$8UI;@oAV!K>X zqB8)dY(tt0n#YWRGW!g}o*qP%n&tOB$Bi#1!;CNS32DxB7E80oJpQYhOVsfMn^d~g zAyDk#a{b(tXoWta27y|<7&!eUYPxL>K%k5WBYt?U;0+uMs6D_S)AdrqQ*earbJ&m3 z@8-aHD$cm`Ugr7&nUWQHj7r(h0%w^yYbTo!+I~F2>orQD>&u?pdmiR-N_MM;9T`#N zJu+no6}Kk75B)9NEDF|+F?+BQ1Xi#nAHmi^+#W_VLwEgiIrtWwxHm{qaHPvhwU&M7 zQ)xl_!jnYgs4?#(JxX~UZ|X89&Dz{&WRP$z$KYEHUkN#`*K8Ejg?OP}BXsqzn9~cM zA<|-(^e;hS&Yh%Z$TPTdkueN{ri`=J4lrI+nBuokhR-2a00(I#QFiX8Xpsv0b)T_R z^sPUpZx+M8d>w~^+hu#5agrnJE8 z-Au#t*@mmL5^Hf{(kVIcC1FWtD8DUl668W5_Jr<3eWLFI#p@_zf+hg;NhJop_DaWk ze(D`m1mW#(4-ljA(nvg!Z5T0{Jh0^5E34X;x79JbDS;%!hvcB%fL*A4#&u2m$Z!YD zQbv1oCsyfPK+=ceW9EQB;w;$kFI#sH$a0a%U&pAxDV#Tp$D4y((-ob02%|+E!slX# zWHFOKdnaSAkr`*e;tW~JZ<*?YS}A*Q!jU&H-c%*4&-!WbmTD`v=TdvUDnx7gdF}liu$unkg5IFPi5I} zA7K3PW*|_ExKrvaSU`mI%70NBK5q4pYM^W{GbbUG6{G$Wk1%1oq%ztCiKc%9J%@6s zwa^j-+l&^p*!^P5L#?^V59yT0=?pSYb4bSK)hU|6!*1(b8VD?YFgJ(~_!@uNr&M!{ z-a1zJw4+G{ZYyw;3l0*n(nm5odG7nyK5YL;;N35;ole?2l*)DV7id$xlnG@!NdN7{ z2d$Mx5q>dv8}?w?dK9F?YSrg$cmcVX(mXAo$wBg0IRr50XNt0yO#a%|ux5p36}cO` z!Ex)5&>8_vu@4rFL~*W9^omra7i5UOim>rdMDN>Ee%AKKB_zAqq+#M zt@lIHv|w6~;yBbJi;tDV7~@Uh@|ixwlXGii79Sd9atpyfn6ZWEb5Isj$+vbLpQJaA z<4ExG)ry!fN*e*Yp|NRM913R?A>4DaJUzzgiOC0((bFGxzA?&}TSFS33~Suw{6w_e zK)PPE-z9h21HtrbWCz=7UG={LMlY5ANZ|fm%Ko>n;3EQ&HS_^5v4Nue5aROK#^1-; z%ZT`22MK$7qG0*!&N9-v9n`)Ep^|GZ|4i#6mbW2c*&DlS!aq^G>j@Xwr|zkm#6}?P z`wdD>Ll&>B_d}pQfx)M3`AuF9yjeJ0t5IY=jQY#?m6{fI90CMYS3P>ao^xCW>g3dj zI1#b@rvsG|qt+m{m>Cr{?Lui#%5HDX$^72|I6K)yx?w!~_Mzk~0xhNVB?!Edkr+Mf zS~72O->fOzJB(2IhBQR5Mos1_I$g*bDNk45msE0oq4dou?z(dBlDP2`c$al{`rTXX z@+}B&!?&DCi~g|NLXWR=#rTmg9#=i~%MMRN$nO)8wtU9NL9R}_nZKW{FP-9bK)o`t zf?JZszPF$>OggZo=~9vL6aBrOgH9sVB?Sb)mc#;iMn_ zqN;-$dh!D~w|lthBDtXK&o@N>#2LJt0_%P7fgVhz3ec2W+H!=i;F#8BEQHIg66;-!6a@)5@U9=v_hOjaviTPWO_ z5}gY%wX5&ktV{*X-%0j(>0Nym_#kTybZmv~e^0p6W~3AYIcwbqq-E8H$F9ygx! z_xk;5@DlgdF0MgOw2>k=Iw%r2Z_3M;r?B^%RBfgSspM^b62q$gM~dAK<`9=X)EE5x z621fc+tq!@C=Z{VdKrSJqFV9LL}*qZ!ZI{ezlNRsPUaY3z?nm8Om(=#+|Hv})BFoJ zB`@Ti+0J}4-OBhI@?bdenG8=GUsd`^>#lMMWT;>FwRY7UI4*Q*nag_#D{3Y9ooNp& zIS|=qsg4kZZ)!3u31nWh*E1zi;i-_DWA=xJleRQSh1Z`e2E4LPH@J$>IKZf=6xIE; zn#HI1`~sd~ZqD~#(l?6S4dbljC+6yeATjtFd00RIpsfDgisf;JrA) zSV3ud^LCNrh|YY5Ikm`C8$G&d;^S)0dpQXaxUbGowj=v}#fK8FOz;Tz_p|5!kI#?N z!V}G|b{*^8_0rb>_D>P&ZnzK4c>vZ^ITny>DIy^0JeUtN94_Bc5x}U8&tQrLA5`j{C8XqeN+EoW#8??~Ty*BZ?J#^&%3iZ&c#< z5^+N4(`SEl&WqQ$;Oe6lW}UVqLKqacQF{Lc%q%uxuq5jd?9zS4Itz!XuZp@rizZXU z^?yAPhs<3+b$;x`zE|26fuD_yTN^eKXkMTazcurX+@ZB!VIf-QZ+Qj6i81UDWWb}N z%QoYqpX+j@t7?n5e67P$@jjA&Xw5~@iAc5&{wn2uU+fxw(dHyO4uUOj`7e&M%VJpO z)y%v;JnZIG4;$$|B`WhLOeEX7y zFfyD%1>trK$V?cfXZJ>37&(z%-J>@7-M;53F=$L`coTRVFxOho)A|3*4Bn^Da~$Do z$N9Y-%#g5&*h_VL?$zfR*2irgiSOt1rgjalqp)3*{ZNK&4Zq+x`&Q-Viy8>ChsdYLEcH&NW&sPX*5~&yYmWcQEZi55ph3O zp)Pt|Am@cR?8+)3Voz5o1QHO@v2`CLb($Kq`S%_!D2}~S$ZXGi8EI^OZl^wASvfUw z4~KIu-p6MW&#MW9X#)A_UPL6(1T+anot=yiu$eal*ikDQJL5!LgV~FN78;<3!wQx~ zdhl#>S5UjPHcgfBZ!Ukec!vf@|E4iEB`MHkTTbLveh@tdj`R|HP=!rblwGpz8V0OP z;2K8X9bd6fE2cd6hG(;vZbf3>kx9ON<(BQg7NW0W!InBNu>pl0=p%rIFB(1*27dXd zI*BlKODJM3*4mPll;GA?5}dODoWb6cqC2^`p^;XCCE(TZDlN8 zb1-8U_s&9Ce6HFzxllb(896YwVw-+yE)dTd+lDR?vBGc z{IJCjjY3un4*Q={hbg4*H+??|=y^nq%7ABXSU}=8p7NS|_a7S9a2FY?`rdiC{~*5o z8l~@NWu<>n+CcCAMF@_clwTewEFAi{ghs@!rsT9_lVOh0PZeYw=3G%@r|->6W5w^` zpMBI+*=6lJyfx>9_4}9$i{M1=^`SIw5}WUW(tE!8S;d@e&lu)Oj|^k=G&{lDQgSb7dmo1ye_Y zy~Mk2jnqk5#aksYIHnO;?9mV?QSp^bD00j1@-2$lpAUYdSUfCWDWDQxmB$$+cOYw% z`_JYhJ32IYy*SIQj#7wg>_T6voUU(ep*>Mf9PWtUfZ5x~V^;weT-0pFE)`(KHs|Gr zs^ayGM&IWCX<8WYlT;DAkrABAMTmD}dXs|2H3B^Dv(0AdDHD22W!sN>Fsz@dCbkTx z&4EcCF7GW`Qo9oxZlM-Z2j|o9l^mabyqo)2L9*##_ci*TA-8~14fgK8+dFn}AB;~K zmVqV+OMVXHv#0$p@Qtg76Z#_KI~{F;{7&(H^IOj>IN{Y=Ngsi+0w_i8ch6iKjZkDZH)(^0$&<6!>RQ&oK$ z;U)GQJ4#DxVhBz{V`mIFTEO9i1z?=8MwPLgymWLfM90|WBfJZjCJJKh(UP|=>ayHz z#yI%#UA25r83Zr%V00_1%Gx70Zk??$%1e3jx94~p$HldU@-nNVPxmd4nVxI5Oq)WYvg8_exe z==t@1&ao*5_Pf#3Skl}D*$0Fy#WhiiH5IFf#J3ur0jTCq?fa`sXG!(7Z!Xu-qG}7By3g39v5*KEtR@>;<~)`wT5+R?2cVrLRaL{ z58oe#@E3m^POfANc$;bAKq%iRejI~pe4p?li1a$C2c(CI$YVpBZQJApG`#(DVnZjD zm$N7V&*7c|c69?wgJWaA9)!Q9E4pqAFZkM8c89AhY|h*?v~}*rQt9>C@?Bzg8ZTgh zr}tqJ$6nCZgR7mxnyR%;u{SyHdyw4G`M|ZS7``b|)zy~ZGsrPhL*2Hw7lWx1uL5aD z7Q%ap-}Ae;dt#)2H%cZbT<%)Ffy?>j3&ipy9E?(?wC^6VPKcEk%5U=hh9ZA-7sCG?4mFvjF-s?5|;~!O1xX-3! z>a4=`1+te9!Lv|g@7N!*b2|?aD4ynv+nQ*=JvdVTI+H`sB$ZqsckWV~7 z-}B2l&<1yxVpNVdp=c8`^aO2EY%hrFdl?VQCO>|V(e^~?23niX2xi?{^Ki@R^n2HQ zYRHWTit`}w4hd_(CIey%B4!=e0`3pMx;@l>-H6&!!_8Lyq$enRi##Jjk%}|ss*5x( z1fIFl*|L1zMF8>2;es#L($QoOd1#FP&=vI}tbw0KgfMsM#39Mh)O3f2^k6D0N-?HK zz}rg;A0PfXdek75J40@|8j1%X`>QmXp`jNIFX(9Rg5G?tr3V6{j0%nLl=CU#nbXVv zGPnLbwtKmM{A1UzUA1fIds5?9Kqaew(q9vAqwk3`1|xw*ghGC4_7VNmZMwRyq0-^ zchIkxzt|rWuxR*WJDk0m47pA zi4X0G&2xgvZG!gfr37}8utTvrrUSjTx(&{^?%u#tvDd<2Q?O{0RUe(uiLhxXpM1;0 z0ikq5Tvf(hMTn*@CcxY=ercA3zqN+>=E924Ir0nTu#*sUovTBYxR=*(aW&Fd2`g-7 z_Pn~}Ldz^c$u|L6#nJpze^%M(L7%^UO=PkGrSG%qF2G4>$YJb$irCncMwYB>3D@?q zB+SfAnmGDpeX64zyM!~QZVI+2)R1|T)3eSSReqcFePXh#Y7|Bg{prhFsh;6Cm4bj{ zg)9YZ!ED_nS@vcwyFnXHem4GZ=J14dV|;fiQ^6+osRk^|=QG2;vD@PLidDmuv$h|K2Zjhf;n~&TR z-EyO-1|UZ~(gxYG;m)Ic1iFo!vlW>Kd&z$m3RnS|Due?FE`@YZ`-(MVy3Y8Ye-x3q4s@l~B!&xlJjl9)N~7kD{g)!q3{N^^my-b|P@x6RE$2F!n5%^DwME z?7_)zIlGZ!|62%VFOiOR^B+CpdsOr1e8gP{Ogocyj9|>eQ-ccA)+YqU5xi=4ozy^W zUz-iWshFZ8N?(-Xs_XO4lTip+Lv9(>l!HpGv0r=BSGVZ~Em@ZwNs16YQ3}e?BhpeA zw`tcmCB1MFTPtRE72JqK>}!8|5eLB!{ugQ;_lhSeesKB9nUR} zW2QB8Lq>uQ)^e)(gOSO2@oE6cxbmPTv5H{h7jc4Ch8{d!zz9`dmPjHBq8f+xDP>0B zW%r{^6KWtB<#>eUE>lQueRW@y)^cG)vNWnVR-NLIKFRFe^PWcOX2w^1tYIQpB?@i< z!AutfOE-h~mRnKmfJHsWmBSDJVTai@syYc)y4`G03O7xn%qS)re?KdJyU!48YOL62 zx-xD-*#a!U5yfmvb7rmBvICN@itLxIEL$RfaaGKc;iEXW)J{f!4eM-_7AIWT6eXHN zfc5gQzSeDd7az;`>b&YE;i$F3%6PbK!%>X&Br)JLvLAD_**rPcut5d7gEDv2lU!PL zhhED2lo5O`WXSvd6nARZo4a0&UK&m6_84Mq7T3Pzr*C*#Y#6@ub77Us2QNJV*fg@V zp-AB}M5%RvMWK9m?|M%QQQNz_#_Fsy+S_WhFM1cXml$_QtjlF(CQxA_{k2g#6exK- zU)-7YN~3tHplBA+)CZzY;mTcX0{G(skatoFcy{L*pq?a~S=8>>W}ER}hOUfDim&wl z%wrndj{8=H_wxH6{uyT;%iR1~eXcJ#&>VLr<-hsN({ufGb6B6(ltC-;S0`H3&km^? z%*^73jNUWLrp;W$2o&+HYv~4Mb^`}CJz<|X%tU#Ik7M9}bQ=Q6ag)KW{}L&Gu?wbFoA!xDs1`1FyfpFBGLYTYd1wWX#_~Z zCWwJE)cd$6jOM3@hTj*SowNVSPOUZmJCL<3Lq-5e6-B_()eHm3hZmk^7C4X!l(wg* z*M}IyFSILDY3Atm{C8@ew;X}!J2-uRarkYe2LPcJqJF?0a5YRmCD}oEkiNwB9GWU) z-XUcsEUdbtMs9QyJ6%zpOyd}$eOebJKs zPGPzAy-bMP$Y_2$6N4hfYV%F}4c`eV$R{0tc_iQj$B-C4Ql5l;8E|@ra=)jgI8SCT zgdQ)X#Y5$KyIRp+i+dNeKd2mjoH}lK2cS&`rcE zgRyh%Uk=H^T^T#zCV1K-cSY{tqaF#9?}M?;T)WjSTc($^4QBV*&J(eog>hZN+Mf_*u7ZAa0)Pd7_dFQmT^Uhq+AXh%)rT!Olrj=0K(fA%@S0Wh zznL&@6)hG%*vEdD_tyXWhNV97{`k=*LU_Binty^rWzr{c@!aNRV|0T+Iodmsb3rQG zi;re)1;OxX`S52scsL$F;paPGxJ=Bce+kVyTa)NE;+F?@V0=n=mgF@P6<4|a=v|BN zkCh{lFZjdPM6)iU4sFRbO|e^P+1fP3MNij(-TE)OZ2aigyQdcHLGdS2j*XyVT10gg zq-uuhq#t9)dXv_N;~5si9fw_PugJKSXX;gYhg%O$-@*sjZby#$4=aFljaAyW>*H0-44WX ztSA2P{Ug{+3WUcSoHR-Kkg@=wvMI~Hsa6c{oE2HLNcLHsK4zywbTvw*ACzq*(K4u* z9zB&i_csZFx{V|T%2N~9wWY{5c%eKS5(+~I4=>5wG4BSfPi#GOUijpX z%r6MGQKkTznm{|3L)|-9S7e;I9VJ~B^0 zmE%m7+%q754L-NzcBxX|3nW|)YV4w4IYoM;vfEHRr{!g=e+T`8`kzsh+zGd>0k4CzE4lObBLhHB4V2nxUsam5apSO zS#F*r3xGMdYMA=esU@FvZpoglrBWJ&%CU|PH!tdjIzsq-so(hQty>fJ?jnQvGm9;R zo00f8Jl={mbvgn=@YT;@s?c!HtBP^YPq?7^g8@&JIyEn91GX3*L1nf8glTw}gjF-d zbsHm)khD*)UTL4sd`3NzcW&eaN{Vq6Ac3NXIk<{C^Vbd_LUQwtC)yOz^x+@`Fc%UQ z^N<3xq7gtCE~#7;zaT>{G2x04G<;;3d`YOm|I5$XfIB5XW35t@?WI88MyDvb0QIj8G?KQavlYv114CBY^#YB zPX=vJjI||eA=S0!ko=`pQ1d%uXy^gn%AsR1B40*J5PtDBg+C=nem_os=!PXUo-w`eg6e zt0Lp9d_o_|AzM-75$D*5Po*{ODPwszMQT%y{r4%ZvTqB+fPj5XdoYldLnvh~mPmge z!a+gy_7c2Ou_r6EC&UIPe{i8)=X-|$N1gcSPL=`qdyb)0A~uUh>ck*K_y^?S<$xaW zdK2U)2WyE9&j0uu*jCnYw;nd3MypQ9?kJqUA-U)yw`p}oE4@#P?AN^3p&3l(ns7W$ zBQraI7ar;?8MVX}e*XHfsss-a4?Y@2s27vL^2Kmd;&Y)nE?(gAQW?p1(>H7*ZJ}}p z2#Rit+}vaF*j?*KPGuWaB1vH8_;i6VTf6P3A7VpbEbeCh)7tdn5mzgot9F;K7Ljp> zXQ*58hk?yPQ2+rBn7|xKFnQ$rb<*3?>W5ks^j3fZl*5yz&h5BZW_J9@P07rWRIT45 zKm0Rqi7jb72R*9bgpA{U8ya^x&Os7-Z)1M#P@Q`mlZlf~{WV zk79m%#rtP28oA?oPTa$->e~~H;9RRHjQZzaBNQ8728=I$VpYov!~>khLV3E_J}0%4 zu1}0qcO{SHalMi|?EHxLP>kKkdcSz;yQw%kaMrMj``9L&@G%yycU~jG7Zf?pWV|im z=&^QXJ{wE%#@ko`)dm1RtIFX^Gxh{+6`g_>n=<67H*CQzU$lebB-+rfpzHfRa?q(C z^T=gu85Nd6I)xNmq~rDKI}tv3{9vCkvWk=AX~P#O{hDkLXs%jV=6q?zm86<%E8~fN z#W>H(k>FHwfrB$oD&H+*QXMm=dyMdX@T*4V<7ITb+B|&jS&3%%iAN6pg|(JaA~Y*7 zdUBS*vo=kPh?B#zz2|E9?;r7x;;!_&vUc+>cUm8VaT)RddwkcKYdZ*tcSD>oIJk7b zzxtfKggOrFh`2OAt;vvCDIN=(4W$RqnQlnE2HBN$w9EgA zoGXt+VWyL(2>2H<52BwEpbN1OEaxV_loK#g>4ELuiqRM1eKd3hQx+f`#vLh_7vsxU zoBbp!F4;k458n=-HKhdIpc~37q|U-O;H1mS%wXAWaex2R_sbLIPlV-(yt=(gGD2LM zO>->Zb!O^U6nTnwYQU5qQo=`cwxdgOt!Zo5xu`%PzJGC%0FHFXA-Bwq#v+4WG2av* z5DUf0IjX-LJYR^=Lge7LA@mU}TFwCN^BdfwU^9E*?)@Z81*g(0{{+w!@GuLbut^c9 zMz9DRQYV7l$}tb z0#xZFjkg>glF<2A%BoziOk5T*omMp}7DD=cLt85O$feN@70Oc?h zOXM9@+ha$~yfE6feD=W^*`q8a2P%DCENi8(wCmnpJ$N|8R|}LW;zk%M#=mm@DrA3XTV8j5cvz zWz%lrX1RHIH-alTwz!#%bCYs;Ke|WomFkz&9>w2m2TI$NA3!i1U5?d8I?+a=QvXMWq4};H@-#J zFy)mjma~Bt-g$BCFJHJ~ha%Ck?mgIS*omu4LrlO4q4w*;cne@E$YB%d&kdzQLgv4! zyW#I#j#4e&i1qGTSjUsxbAm6G@}93^qBR@YHwRx7qnHK}c1e>Q>~_K?;O&9=BYBHr z_gU;uR*vnivnj2U$7D4N;FmTp8!*$?@?ESSQyzJVo4dKL59n4ihqs4Z|1cYz%kEa7 z6~fbxx|};<->4&D(Qo;W-^Z}tP2O58YPzXej!@>rG4f6Sd?hf-s7067fX!;&!Yk69 zIN6J1OzQYxLOj=O7}*Hak_p24$3NokE&E4TZA@yS45#^Q#+dv-W4D#z9;m<`a?g%0 z`1V=3o4t>W^|T_cBph2kZQ{9C^%25IpIR(h%5gdBBs7{_s_DQ`I* zlqB1gw$3z`*OSkvQ}r|jpVbWY*A(PGk%0*=I8p&E8pOxE2e|wUcd99DTBL)H=u#Cb zzx{V%%T0T0o=~}5Y0y3@S zpkysLBSP}eHU-GnlttoZ;?fcvt>F*f9nKcbUqzwE{ZIW*CR#a`_!)pM4QgaDzGNNN z&FIuUBw%=vAd2F1&KuVN^U_BACcOyQ2KDg4kRl)B%6<80%Vx6wvy96SJ(8hb_hJ1OL%Jbb`{wt`Ars$b2O7 zWz%qpFv@I3sy_Am-0CzQqc^?CbcEgfGVoZ=8gJwzV0fW-I*bIR8X*S`ZaR#6EsNq{ z8NrGGmbejai(HEvZnx?STTrDcf6N*)B!JM3C$xO4EQCB%NntMD>Z`}%iq(-Vr)YHYJl(sU^{VXkr^b@bx$c#*^=9U z#mr1QtW)E3NdNd$I72e|2L&Ej8<cWt#i^eJg%#!CE1K9ST7hQ7=kkC)v{p^k#fMKQI97_p~dIn1wRPzVDAP@(5FokvC(I2%-{MoTC*Gayb0P>kiEv&^7WBFX$LMiH zwYn2 zEe^=4NJ}2L1uRZz>rTx%Inon%j+v8Ag*mPn1Vmuyg+mRf{q&vZ9{TbhS5-J^d@{T; ze_vrOfs=hGeoAMM0v~GlZ`0X5jV$H&GfrA8>$UWcDJ$>rCP1q7=E^mVb_w&BIeb^xCDB#7q-UeDAydE_`!S9v$BtyL2Po5CNh__Pu@&tty@c z$gKz#T&x_@n*!e|3yv)P4}e5!mngTqkp> z3*$$%W`&tODPWtU{ku;#sjuuEPhU)%EdRi_Pd;OxEW2+xoNLb@VUM1p{`)B6(ZDPT z2?QFzYuF0dh$BD@8TVZ#Yy@8D8+O|lBb#zU#G{@EF=r3?AQevCVcGp*2cL7b*2oUu zikJCw9!&D47bBRv)KW-BJI@tapA&I)iisdJ%13^ZsMI2Lm#*Ie_!%If+_I#y(Hm%;F30`Hb7^j9a@~JVN?i3d2{cc<%a%S&9Dyyxemj zQ?x&a6r?ys7JR{BucsHIo*jXpYohXv-FAZ0mk9G!uK$+;j zt|@^}LEPnsNIqLOdF}umsioUjo98kT)w0Kj7CrnQ0>Zk))?4*9QL9vbJ88h9TUK)U zJK%&PLr>9i^;2n0onJsvfsL*3AMeR+e{lRX%L$^hRI4lmPLs1~`zB{Pb6Q>$<5P{! z868=0yLp<)B87W6NT=|aWQbSDZka#jVngy_?;r|N4cW{_>1K7j*-Q7>rDVs{LkCQ^ zo>=wR`IW2hYTrnV^U+2?A&!|GNpSVae0{HzYuie5EU3yrvZpPVq@eI{YBLCN8M-P}MkhjL(n1Cq1U~;yLL5BCf2Jlm`Z*A`XoWlk+A6|> zWV~LnyaAlAHT23^TlQ_8HCE$D1fwb>0&H`hG8}Ju(DGwIcTZYIngppdED9YZFcbyoLKrL+yrlr4TQ}2zGLIq91t^uV zR}XCIIM4tf_c_uXXvRIC$$u2w*byNt-=V#o3yf=V=7^iZ?KNZXoE4ArLJM9F($fEp z2-aApO5Vs2i0u$;JH_Yb4ZNgI!Z!4b9Z|WYr#tw%&^q8MhloC?SLL91K5XhG@1w@T z2fqNzm{)Z+WQ{Hnmk4Nr=V`kQ5N<<%cb#m{WtBwCHoJhjWE zv3>o($st|?VI&dzV{`-=FoiXu<99Sx!aOIUM5|D`1H}eafC|gDiJ9dUqU%eh%6@fe zcNtWiL09)iv^(oQlp`w1CPPCFz#$g9-;zZ!C0c$M>D5l1jS`Ja36~$1nlfx$cqL1X z>U*+PkgZOQmk|f$qH9^SW6XaN_yiE(x(0;TTCIujUJiXYN>+)EiNLSyp2=W+J*bK1=-jb=&A7ohQ!gExHbgEEgDJ$CfPd7c^g{OW+ z_hwSaX+(+6Myr1LVx8!|wB!$y! zszL{vR*UVgN)X4Rl}hX7unVH^nG?7~mV(wEbitTBPApy#y#wKYo1&Uk(9YU7xP})O zx#L6W^#c|x+0`xYb5o-YE#6D5Tg%Y+shC+D5QhT^eRzLJ58=O9r0V~Xbmf6e|NsB> ze(x}I%`Id@xvwa6usNc1ARQ{PQc)DDPbtUV=ui}WR8-2+K~#FsDWVe{wkRpe z<{X>7f3Lp3zy0IA*Y$ipo{!@hj9D4_eIUC5hg!3ehl}ZVJXunN9#Ebo-hu!dxlagWjBAj_sj5S<3nVXzxCf4!(8=!k(@4vPRz7 zxwuut)Z(g73yC*mM5OI#>Jb+IDX;4CdIfB^F?M8{A=^pG2CTl6uD%mPhKCTF&scXuxK$^SOY6(LzxWJ-~SGnh6BT_4L~tyxJF27gVXB`j(}gql$5thzF{`-<9n zAT0T9q#W=JPhM%AB7Km37jA>+#mO{?oyN){^8oe%VLot??aD}MP(Nm?MCqz*k-O|X z(RFKF61Mp_N>gyww~$vfo#a_RV-6lT?5_Z8Ito+VqT6wbB{ZgZZ<-BCj$?DGWgeK| ztZ{6nkId>fg%!m3Lx?w@K-rsxZs%pg8&PZlQFNa_T(J$yAxfmHPjux}xKCGPG30tt zg*&(&m;>QfCaohBWM?S^HTN3vE2_1zBFssg-GgH76%TO5qqr`>vixC1Tsw>Ra!oHI zU?at$@xp&fTu+dsmI?OG^|3-&H?9EgIv51HkvClqbvf#&$z1?WA4Swy!h|T4HS|Un zUHnlKY)q=z;qfb&F-iXH#Nt;{Hfeb$pnpcyJ)#vDjAazsX7nZ7WOs z1MB(C$9C;T?(Bw6e{46hY1(~Ln`2~8OqGPb`s5|ke7ZK-jP4U%Qo z1#y*C0Bh*`8L(#qrl)(u)Z>6TzuHcZP^wl&0zz#qX^Ktl6R_eVdqK%s}1 zHaFxr?~0yX7%82U0UzsRbv$>KF23pD_}jlWD<5d0I(LdlVU9$RxDq#evI!Q15h-4j zW$y1*V!IhM_GUg?+4~1C{>*uOf;NRE_SZiaHWN$$Z`-g*3V5wPvF#LrFTWmlkP5Ps z9B;2uxpQd~Qu@F!-HU%o9lf`AE|IknPo(=vXWbo&fz$zlHaJ*5M{VPjy3BAslAIQd zd~LSk|5{r{2AloNh977{W2Sfwz2tT(7Tc`~0_yq%5IO1oDZu{a9DBY5e(wp)I>wk= z!R|1mmER@XJa7iC7`8B8r_l`R>vEw%B4xfl;Wxta1lTjf0!n;t`Eg*)gz~J18cp4! zw~bM(FCu7&2-q>npHXo9fERklxVXTDF)>(*9rWCiFtcG z%eK?&k3&T;sSsZSpr;c!xYP3y)X^w$9{D*X#1V^A6&-XQ*Z)@g(Wn6;)x?J^2O+QA z2@LFA0^*^78cDe@g7Bl^XUTkN1H`fV9T9#d3K2}XkR2ew)kgXkvmY=0zreY$N# zZ755kuDUDFi+_c zJz8#ivYI?9+_Eb4FCkFpDrDSh9`--p zMR%o2;`3}_Ro4{AS9pIKULsz%vXnUUzv^xPv+tog%;YDhu|?qXUh$;GU=dAiNENIl zV{0FR4nvkmjpBNT?7fKs?zOff<}{JB`#MPT6@R*TPVhCjAVkX>d5_8#IA~Z*CwkD< zsd#gZ15$xN%XR5sYHr*PhpUvU1-Hs<+g4Y0W>2+esdOY^sc^h>-2(+Xf6st;U-km` zd-=-FS5@{}QlQ(y=3f5njZpym zOof3fr;$u;%+nE4j~6#crEc3ap!4I3Mx$d#=-aZ;O_X^UN=wqoxyS4MqqKkG9kJvI6s*=Dyk zSU3-ZurAX#4Lhn1T7A>XO<1(-wdTY>_m@VR+pJXcA~9N2uR_C>CF~Pf=1WQJye6dz zylgfN`czmb02o6TmAY9K-FGs;^|JC8uE-P!Q|O7DB~ENLFt1*XQIvSK0$?GD7zEm8 zxVd$YCrv-)J$`x6BYw z-4MX5zm|x%HqwMfwE1d88~Lh74O>8bTUh;_a&YI;PoQ<+y&6_nR;GN_`M0v(%@L?3 z@AASRHo46n+*1k7RIZ57M4XvM+nZzP70+H;d^)BG4ljydT4Pkb^Aa@}aScW|{8N9u z>n&}^GEx>%j><^LVHyX37MWKo0Aj|;BksGEt)xnEoo0mX4*`kUq>`oqXp}j?5D9>8 zJ;9f2UrrcJSUn+1##u&*zl5}pG~YHdWY}dfgknI zgI`03VmUpL3PrMLc}nNus)>(f`Y5CCL@QljaErhatyqUS2o8rT@Ko#yps#kKcsw}6Nj z0f62zIQa(yM~CAcXn~PSVt>W6Gf?z>8E_@h=|K2ABpa{~_o{xH3n8*F1wb)0*IG;# z?H`@PZ)(l{pjv(v-bBcoYNI?I;w&KippOVPp)6lnsTK~8y#m&0=t&hBG=Ye5iaNva?~P5ZCLwHLK?d2LA?NTrrSeEd9%LRrd%xl zO<~+4`2el~LIu%F({g_k!23LT{_sTEf4_$b;q zM(<*ke+d-;Dg9moT&>c5o_?zaSR4`S_h2Cn^wiq`p1XI}z;}NanKG-Ppak_4W~~I+ zP2BY6WERuzbPD~4*Cqpi2yOL{s#^(Nfj6?25FvbBhwQe-5>TIj);*g6j2mO`J;!3H zbJ9CjDS+L&_@zpYT7w|%e;ba#pIi^HX*Ty7Blc@JsBY*y@Q@}tqIz|yQ_fA`Q1l2j z328C@)O`Nwg&ZW_A$>K)e~k_GEn~JuAw5V{xh-vwEVGUB zMz0GgieU69yl|fNbzX^SZDkwweyb>*#+T#cNuIt`#>`h2K+ zdp2N5WeCFW>66Y?NV{PD0P#CS$o#2GIIoi8NCVC};nB>6t*XY|(2eNh#XW$S|LVjm z1mr>L3uiN+LCgL5`jJ=3w2N~0%)zfzHkAY%-)QN*g#lrM!K7fGnh$^3Z8f$4g+U*} zfVG6!C)_Q=1$2`Bwq1{@k+JfZLt#Ghg9OZfT7K6OYBh=#(f&N6-+*v6jeoPffw^s5 zzl?+yPpWOrQ_kz_=U17u(zvJaLW(vTxq`iK(?*>e_mp)YYjZD1bTnf7o2NpD_|2o5 ze?MkV182)cT8A%T_~dJr6A)Q}M@WE7ZJF#umq#kSJ+1oVxCZ*l=-5a^z$s>m1%={z z4~0}xHdH=R0U_FcUc*H1R>+YecuMxmNAe92|9_FqrrfW! z=VrFk)6<-DGX^lpSrQ)dITQ?}bKH9Ejk^B&69T)_G~xV@3dG%#$$^Z<9ZI84TjN&= zy6QfZo&b${>8Wa?c%>D07IJM37vbk8yT`xQ(m(@bx^dd&l{U+cVRIi(H>bIuIfIE~ z(pACN1VwPWAuoAB$34iVd1QAq%o8YJTX(o%7)I3PB=X38skbG118>vG;J^}SNA>B7 zO2X*pDT(X5@vngcWCnJ{gxD$`;mrnybNvv8nh6~B9JZW@R|&Wa@-kE3P$VyN%I~Uo zr~=j2oh*yN@EFdDJXwI`dnH$kXs2eCGh=G=`SWU!9{PCCbQH9T1*|V z`WbWSanmeU=sst5IobfFc!Au7ZWK!7-oMAgd*{Lb49);n+#_5n}XU)Mn^|* zkSFn3035N)voG5>LW-4-l{z*i8B~vV-sF|yRQS2z@^5Q7Crxf}i1#Bc`52m!O176wlL`LIy4H65KvB{@1^sN^o zZbqbi|0&cC(gqudqXyvlT&2|Q*6ejD*s4{sg@*cdeWY{|+`)Zet|vc5glGvn8``invI-LS52cGcSxskbzdWNP zR$%MB5cq)Dmt&nxu-L;s0`AMrAkv*Se-^Y87*WxXQyLy7UV%!h^Ch5cPZO#F=>#5H z$&IeX0$O<_fkNS0!ekXJIn_lJwWO}y8g&0q>xM0@9fnb%Eb;)LT~8UXd_@=^qjR~`qn25c_k^90?OfA&_5kEP*n8wrJ8>}%7P%%4HN`MftHO!M2n4DbDX6Y zUC?{0d)_C4??4y;`14kpL|gar!Y$$Py~m=6DMd!nN=G|)PRe+#gQ+{lvH1x|Y?3H5c~P)Rh{c$G^3dy&&(Od>Ff!6z-FESvBbb@i?5n)k*)1Nbeo0Y;g7;^(d{JO?O^>u|KJuzQgubd~5hh-yZ7dzO1!_K2!SO2z~}|66ZFSa=w8#Op;mQrJs{hTE=vc>h$iX)rYy>+D?! zPi5!bPc#v^a=gTttrG+IH-cmyiXiels5Q;j%1DW|$7-lxMw%;%YCCKA%Toc%Z}Wrf?h!k_$Gdn_zG1;NGzEwv^57H6vghmQA(PS5@{uTo0|(Lx zVD)V7AX8;y?YfBqpV!T{SeKQD(mJsJ8cVAeqd5X@I236 z1;DSfRM3SB%tq<$a&_RiJkuc_efG!T9k0&lbQBORqm$MmkoJMSrnq{aNZIEYJ1;wr z5>N^>sAr;~YqKoeu|;n65d_OQLC4hC1uHRGRM+H&cESqK1>*eMQYC$4R=!s39697T zrK@a2`6VjA<}$)IdTt?Kfr9*We6fKz?0~q%zchn);+eh4VUl`9M)TFC5VzOwOdYd? zJS4SysRD7y32?b1Ea-dLt|MwlzcYVu`%&J8tNy=&xwFuorhh;@ELJp78ffk287hQi zW|kKPdD$p}Sdp?WSlT7e*#!}`E!vopw3-ZFE~nIzxT_R0JAS-8U>y7JG``Ev$G7Y1 z9e5`86npmF_&8zX0($L0O)~vK=jGd3S^m#Al&A-MBC&hOks?G{zZv;)f7aMND9|x4;P9CYQUsYP1I?b7#A-#rAx*GyJpr7OWW+j2 zt2FSr&AnlpC&HoxJX863dT$SP`4`Vagx6oZ!GM(lIN$5cXE~K3<6itb5ww7|WDu>= zg|gN#8AtnBgEOG-vvTA-jdT=;bFA0id2Sa$8$sN>tlj?<$pcWUZD~%)nna0b z-k!lU5F*dcIAB$T;(}{iMuTnvB2o5f|BR#1dJI`v><*;P*kN=2g)T~U%0*_nyN_Rk z*@Z1Fgn%6rFcYg7oZE>{&W^x`2V3Mb>!*j)Ii?MQe^Oq$)KFu$s1fJuQ1^x{;9Amz z)5%!+X*&CvgTBOPv)Yoq8%e_7n4&8B0M%2*X!h4@aa@PRLixKBR}x@?v&6{z^J8iN z6%Y|uZEQ2Rt?#gsg#qk4=iP{Xo%DEdq~iH6L;3cb<~row6<+Ii170&5UEU)Jkb~KUBP#w#D6s=);=soxlI0_ zP;f=*>MUrrzEop>J!N3efpfM?^tS$2!B{cZ+(UNV=7}g^GtJw$UQhzB?QBe$ZbclJ zl^ukQS_ zJQvLehBKcotGuo>vR31(&zhR<&TaM@8n*ZC8U~29kl-*(;D&9Mj{p7yj%YJ1rtxWt z@B(e-obk$w%g=Tig2`xZF{`;PG(S@H$?92FIx~xsxO?Y$Ic}xj^VfJHX&V zrQ;)QtP^NEG4zQ)&+zkW=hD8OeOGWI+?WY(fcVc7YA~@YOf&w(QLpX0-<38e(RkA z?L#wizMhHl>%R=~mi`nNHg`BmJ`F;<1!yn0O9}#Hga-ySTEMRqj2R`j+rB0*%c#L0 zw8(>?80#7URQHFf{Q2q9g4dEN)#1*6HC5W>mp4vNmJEl@EDt->AmDA}6%`JfgY@~Z zeN98~6xMwPcULkno*3CKin75=HPLu`V9Z_}qI~20({M7SW0Jt0!i^GUV)NYe)jV+~ zOf0Eb$Ps7&(LP~qur@tzf@dlLSuhmO1W{al4k`~0*tcx*Cc&oEUFuD{qXhhL|Cb~Y{9gAw)jw( zq?Gn-+x-IbHWW;572*-YZI62W{{8365u#h_-VdA3TPmj79Z4N9pfDnr`gcrHc33@m z(P~w)yX;|mS6}9J-;VD!?@R|qLVl^o6OEp>$MpYkN_>^)r2i4rWNjr-jJ?e~WCa&z zcJ4J6+8GO-bIIaC{Vy8D&E!=S3zvH;w~~2+Er>GlqWZkc`dZ&x490L@L%_Y7yuZI} z6G9u=@pwnMMOrNqRie#akIW~wSc0F5+?)TLP!aPyLr@SZznQBBgmW8kjdhrHl^VO_ zJ=xV_ey_#kEo&gr_VGpm6~$nBYH$RrUUWzqwb4yy#9r1Abu9?@Y>cn8A`~LC6Yy z@a_N>svf-_`Buq2`U`JfCM#2WVOnr7!}OWKRzlf zff64!v>$?(WPbNR*wUo2#c`^1zBDuQt#fJ0t})TO{garEY!3-+J}!xQv-#ZiU*9c! z6i!_7!RLeMe&4<4XY2)5Qh(;M3EUwG57oqra>fN~#*QGseVhtD+bS>7qI`BE9za3W zG3@d*a!H4Ht&HjK#-VPXnIjurI!oyRrKK1ZMKUfDYPNI))7~XUvGb(3=Dcp){Ob=n z-cYOopB>2&L}>4T@M~?ehpF6VeNCjmC8GlRTsDSXeVHQZR4>@_7QASU&7|@VH{Dm_ zu8IWl8{%#IpW(*q=c&f0$MFyS>gRn}jD#-y&ans4CijbmlFb)$Zvp+A_z}xzog{^m zOag1(#~l-RY&hr8mT4ZZ9JXIaVWh(NcF0_w=9zY_YVS!>`fnmrJe(m78puoF1= zyWt_NQ%*lV3jdu?TC;+$23lsA3X>etAN*oqW`&o`p7ubo95UDZ`D-$)z(bITaA|d$ zZ0FiRD=T~?|Nl1`8n?ra=(vH%8w5bkcZH3dpomT9j}@tePZ1w=o_CYI3r;~PocSB*`y=*tU_6`DvVXe z!iLvkulo)mz#FDIQPBX!y!Na`-#KpD!qC}w8en+u7#?*!Eq~_if_mK&oL4wOxKq5; zW5w=G{3@QlF!p=}x7foBCz^Gx4{nfE0N z3MGP^@+}k3qMipq<_D4zdKB{&6Nu};zLRDk){|$X56TA!U~~ZA&{v6b0#?P5gprQg zv3o@6_Qa0Q%mjogZl~eXVMFuG6f4W1;avNH4SdB_irJTWB#_@93hiKLD?qwtH*7Jh z0FAGV~CPEWmFLuP_t7noxKmey@LJE1h6U{xAfpkoFWy+G>8Yj zP=!5sUS1miRq1AQM}sI>|Lm^g*uT(}N#1FT^;-eJR{qY(#82RP|j-X`@hMVHH&!HhH%RZ@`6eUGdmj|vBV?0L5C7Ubk_r9Fx#er z!onJBbvWr$hOJ35{q?)g$$!Qv&;~}8!cRA{j=hHe<23Gc-79}a^RIKjSvOzN>CJ?s z60o3j=u4)C&NTCI9XR0**k=fD{DhFBG z4m6S#fu%06L4i(yykMnrx(OsYC$7!+kon(o8o3H&pB`9seIj@0Dj46pTX^V`YD;a& zmUtg@1_+nnO$g?eE)RM88#AIf{FyC$|6D6_uAz=+FD2$sU|e#@yCH}OSJ4b`yEz|`*rs0mc$LCFwqcxoXP(q$Qr-!xjs60Iow@^? zt{z-;-6~scbU<(fn;O0yfoBCy^P=7sXyZi+z`u>smGK_?#NvOO?ZvxSNSs>T` zjLDxnONSqi39OTFsRlk0q=knPu&j1PuvZmrpN)Pb041I(3RIBCDy&%HG0ZbH?0IcH zcJMriSnAM_bm1j#d=8gh8DUO=+A5N>2iQV9rveA+$@@X|m?lSKh>)qcWZr@n5w0!mC;_HiqEWYMTc?8{9 zimz|vH4{s60e8X>5zZ??vXqdjNkXgV&fbWvC+_-#Vd_kD@oqI|L5vUT- zyH%&OgZOR_#%Vv3j(Zq-j0p;<0lo%o&QQX8$N29W_#|z5)C|P`MzOR+<}GwOq!$hn z$g{;U0DPI4pV;go|Kcp=T)K(>Fq6C4_~q7(9l?fwyzu0|O~s^|tQ;F=n}1H;J3T^{sK0u9mBRm^ZvyJ5LV;OmHq| z)EpwTsCz^ap{m?{a5gCN=`LUk8J#~I(NMj0;0sw&XEjU?UTPq89iPp)qV&)2;ftDU zRpQ%p(Zj}=8CtB(Wx_eV%Zwl1{Z$oV7cZA>cw8+Wo>z$%g-loV;zL0AcE194Ifb9Z zVpS9wGh*j=5P%A0*%OtNV$}{G{=&zG#U#)%z({(vc99*m!uTrH_3-At;HNxa6F~`; z{fN-BW-&Lrx@mJ1d59&mZ(WQ?LV?CM3Z4?E;yM8nW9`rPKS2(5kX^@zKu0R)XngMZ zD)ed|5ZGX43K)wJ}$4pC$RU@c%BLGmTr zwCfu^ZW*O=!xCah^IgzOcd?-Hi0Qb|z2Hbe35be&fV`r+TK=q=c%oZ3^tvKov6 z!u2fY{!p5?ndh{jx5{vCq47K;15EN`;VS0fH2f$8uh7i{vBFs;=(;$1rtdwVCrutN=uHT#MP<>dt6{AoL0J16}DNxSb* z-b_}~{%(A~I&N;p%B`E2#)f}>gq!XtmLMkZy9L`R-ZjvhC4Xjco(BPq|G`;kyYuBp zC@9hhFwDtCzsAkNC`+8r>u((u>^A-b`1|VP0z1|?f1D$Gxw_gH?bcgnKzKR!_$qZ{ zV+pi#SWNGm=Omi1-GrRSAPii zIV;iZ!7!Ay5(TS}a~j)@73SBz9}sM5oe{u-_S_2Gd644>L{pmy*bg{E0yQA>+<=_9f7+R^h9OW(O&Hp$FJ!!3GNNlEj|l;>FYczgIm%Sjh|v}f z+!~HX@?NCl13x7A&QphF6)$3Z^@x9-$nd+{apzI^P;uq9C~J}-D$hZ(-cvjNK5#|o zIrZ2JeiArg}L>_1xXQgmyndSn!z2HS_SEjq@5} z7DIk=mWODI$33!~v-5c4EI&M>7`-!Bp3zAhwBnyFqQ({~rxg8aKch@>c#8d@auL#N(8}K{|E}GuVLaG!$1d za}+47*5*5)M~pFN!mF?QXG*~RV-JdV_|B8kn}#@R4Yv*rN5=U@uexy=Fj|!Odk}ri zZ!1zwLgX$*$OZ8cRk5!0yJ$i0_bO|7W6Fx7A`~>vGFoh__UITE#9Q|?{pC*&F(hE% zTV}ICu=iZH`FntAKBNq+9iOn*0*21(kRA~c8%?e+aN9)edV_Du;U*Pcuc{2u} zqisOo1>wG$u^&G#$a#w5;T@2Qf0w|oLXic!K*9nm6aLKg*y#MHO}`J*KtE5CI}Wm( zk@YeFI4(>xvw6~jES1q!9P|Bi!B)Lu1VGBpHj+do#e?bSR&JxXk`!95!n zpC6}7rom^x3T#*L3yhgJ?b|B|K_a)~rNy+wjBfn-Ec}jy=>;eo!N3+ubr67hue*@O zF{HtHu57WH@aGCRevZQF1WF=(pqj5Zv*gpWz3KCmrVj4qRPo*Pg=SfxW8QeVoU3ds+T`?z_y?0zq9 zh(MKukt%xR8r9)#A(a&l^G&%3?SQ>!qo z@B(#lKW}^_GJHAh#gtO9yNz)F0?L6kB!)Qbgk;pVGQDn}Ch-Ow%P!+g{xq4n;%Yyp zq=5RVP&$l&3iwQQ_(;40!1yHRvK|{qT(-@dF4(5JlW&`cOgdFkxw^<~$+?;FaH$cc z!=sZJMTykF55jZ}mV~%BLjHtjfE!s$huhxZF6Qz&RiFz`uRHO96VcCBxkDqA1{V-k}`Op z(osyT9Ty`Vk60*nv)3X*K1Z$(E|r3>)(sgk9VCk7xxTtkb@D zVg5|n`F~-hx#FDoaxU@q-ITQ?p*t^2m*vGb*(vX~*qe^8zg;71?`;PTCaQj>?T6Nh zmR8_{2^9Q^C-ed?Krq62Y&3+fP=mLNDilGe8Wv{FPt3e0cWeDGD#^Y?J#NZ*uDm9j z_C(@T^7q<2b(WQq z{k<(8QAH{&{`C#WqAlvmPdi`hfEV3O=byIp7wx-VnEsks0t{FG2$ogNoBr{54rLb8 zmip#v40$aLo5wnGH?%bK)R#(|so@JZB~my;M6kiMH=@za+A^<&2d3)Z*#o3^b_R!; z#$Vc>5O?;1ST*kI-aY7&ppEj|YQ+3ecdV(ffPfJ__ywbi4-lUQK}n?wD;e99N&J%l zgh%@pD9v2dNUTyX_g~sqaV45(02a|8-gSpI@8b}@d)MQeiLh&n*h`9?4}@39KqFxfaZVaH>+5jKq1g}w zJFE!kCC_`qUMQ6#nhX!V&&c9?=zNAy#U5X{Aw1>Bs@`EWs6FZOwq_Cd`hfi~V~D5j zKz`E}7@P0@Z9otn0pDF)&}GFBI*v&li9G;x3{D;c@L634^wsiTy@b0T!U=oV7`Clz zF!I5d$$ngEWj-&ASx>yAL%2rra3NJrW$jfa65olZIar1G`4DJMj;mGy%KHs39A~;K ziE#&aQE#)j0}8PL7O)PRqzynriU@!Ok{FbL-V#AQP|yHfyCT0p;fCZ&^^J|VPC4Toh;5Z@n#-j9DcFKh}S+(|n6WTZs=I)+q zi3P?za4%Q1pGPl5cR7Ej5B`E z{W5k20Cs9)saBv?zw7=;CtShL(hc3bR4i@Cx~Ksk zm0&?6ZW8!o#lLLl0}$<+WSDhN00)1T2#2_)a_c>fJOsS7SYT^FD?ge=FO>G)4f4R!Qw>Ux)1k zl2-s6(iVmcley#k{z`h5rRaeo(2vXKJcq=bP~~^_rC?qOTDxx@GO>BP{GJu?g~UT* zv>vU)UD}*;Dv%X>-6=P=G(tx6p+&p#tc!!uBeNPN)3pyLB5Z=qxi9@(BoU zb$~Yes7lJqwWM@N+%mjr1(Nosn`zrG{G4>&H?&J;KR*t`=6!w^V z5U8DTUqW2sqs`Vo?)I3D9Ym|cVlE-<55cpRM65@Y0e6V!)6Gc?_+~Ow7`n;{a$JGV zSK*;1c`AdIwE8-U;l$AdkCere3K*2N>UZPm2H=IE{}UC_%WqtkHf#PvB>QX?j0T{w zf{^Xxu@Kd7Bv@A?zU~0p!I}1QjFZK;(em<~INID~&S+SeFJM)ipFMO(=Km6NAV;2J zHm-61pbkDC9Bcny7rF>^t>=&4Ls#b1WbKrVJF~`q?ed`cTApax=i1MFtkt4`?v#`W z0?nlbH-?xR8tZD^OvJv?t1}mztfK?T6a2a=u_G!TioPb4UQmKZF5tecBe)ZS-F{m3 z_x&sd@5l^OM(^ySH5J+cwxra!EXQPL{QLK0a9=-mn*>pPCn>0ZGkHaI$-!S6Cq@Sw z7+!#sfSBMXt@x86B!)dOke%|vuzD6nbU{A(4)&s64VJxmO@#AZWu9GWU+}FPZ~+2J z&~t0D_R8;f^GF!1q3wUK<&WM$WBZ}-q|X1cG+1`|uPo$bQXv9%dogU49Pi`N)%3!G zonof<1dnxDKJbz4Kf$DblTD_8>3CC@&qS~&^**ldE6che$;cVe6et29X;_Dy?6<;< z6yL2#dDY7Og2VmeG-$sRhuXcjGsDNQ{RFg_D)LWfHZGP)oxb8kitPOsQe+8u0B_|P zC(|?U`sPf$R3$!u&l`cSk%(lR(dAaPa-5}Apd*cz{U{Z0WpLMEB6Ape#9~#>1$pnm z1W~kudEG{SZ-}E+xHx<}K1Ys#j^)}|PgvXA54_?o8~>J0&>mAo*NdoZJMzE1_EK4jCgLahemePY$H?t{2Y?1{>TfO7T?=DhEoP2G`Y@%KVQT32_eo5t{c$n9cHnH6m#)}g@3m_th7C_I-7x$ zRcOd}j>f()82dB-c?^qndj0eo2?n0vx91AmFM7aW}n^Vd!_4pl{x|VTmt-@nK1eHg@zkRdXZryZX33z7P`=MHRXWuVJ{Qh{|G=pSI$M_WzU9UAAQy$CmQKjxm9a^dxf}-(rfn1O9RTc8Fl^ znTSES&BnqU?U^5eB=#tsz;dis3GlvE;P1MD6!cS|guc`0fH8}lS1oc&=O#;O*5XM* zz=JewsTytB4;Uu?XhECY76ely>lwl`fSVtKVwV)a&XMUC z!gvBI*~Z8+2U(o~a{I;;pSwA>?Y;i1DnHIj_E+7otr z`?bbS&~<7e_fM1^BDfl1&hD@)8QZjaP*B(FUs1G@BOSmYZ*qhHoTT(ARLq&m{c$Au z5~U7+caUTrNRmJ=jSo=HsYnYTVetj$$c>6JwAo>>rfFtm-wHu;GJh8-|6Z#K zx_fBItLDaSBPtAGLdAt6(EGD?qul+8^!ri|1&9$VBi6?yE+#__o5%7EDXapH;(dHf z?=*PPgLhqdj2B#=f|sfe_{ic%NJkSt!a4$7eO`1aEi=rEKmRu&wh8QCV#zgTe=r{q z|6IB9oW^sLj1heOg1#5>B!ru`3;SoTd=cybViA_9bTSi9@<@aprT|%pw9hBk_ebN^w?Eq_A6)&wh2HQ9{;Lq07XCsk(p51!rkCy29t820xr76Jb(o~;v_6@~u)DpPo z_L*2ndA4AzWUBoX9OQjc9e~p%ffcJc0qH;);t0Sh<6WTd89_y%09KpJEN)!_Tyk6i zYog%v>GWM|H79gSwsGpM)|V{p(SW&q19u%2?6!v+0X^jA7`a*IfpZ?Ffh*K)z_rL5+A%>vhTSYPI=-at~Gp$?v(ff60+Q-%r{ z>>YQ)qQ5KL9hJOksP2mTlly&% zzF4U0Hw9!rZKg(6{6Z!Up4Z6WBHS~FIR|IF23#r$oiw?9dRDp;U>#vD7CkGGxEZE% zD1mAGFcUy7=9t6=e~EHd{`M28v2+1E%KCrTb((qYZ40}QiNc#;0O@#$_bX@ ztX%nj5|@;#%=2>H?10&=8ydcbEWdLus8&CZ!L~E#AhZ5qyASRneaqRPG}VyX7&5f) ziPqg1t!KMRTk6)#YpI7L!eyrJQNVSqdz4ec5E>5S?(Ph`|CAgE<`KWn7bEC}HA&kR zzZHBVbuBRFx66u35a}vFQ&sMqV47Zdx9=pWU5i!r|2M7hApty2DXpX}(->jNSyo=k?9Pr;2%Sy@c2kJifE|Xu(C0K(J6_IwJZ}2saqju`h zpla5=j6pMYd$}!Vg<++J4p}I_xl6GgA{N? z)^06#)B;Psj^Mw2@ju)C2>u?;ODy5d1(cp7Yhh>!_|68g3>E?GQJvGtJ3F)C-#3!U zNAs;z8FH-^@tju<_~GY7>{v)E71un5J-&m^CqSHVKbV`PEcB|KmaLnPj8G%Jy+iHx{oY>-_= z)QNXw`4h6mG5c(6MqVS;_z6R?;OZ-vy^7HUK*R0VFE4=aagE8fqD%;LpExIu*wOF* z6mW_1Nz08SNw}F+e<4TPshAj@S+B8pS0;I9YJ=c27`TU49|FsshW(@h5z(dyj&PlN zHJ%MOCZPIenG~sK} z5Qh-X(FZLG4nH1SxjvmFLR0$a3A_Q}-a8h0!c_i^#G1m3EoS59=helBAgU4WZue@1 z@jOkreT9Z}^}BW2+y$UI?N4gxM7R9M>6?(;mA3*)qsnL>w(QPwn;2_I5pf>x(&k=w z27c0L;+&^NDwxBcRUTvitTgcGW!`O;XV1LdY%3?Cf1*+YZ7m4;FE-|?2YB$!r8!AP zG|_t^oJ;D1f!<@+O-_%ioGiH2*SpWzN`Bgp?_7x=PQ@A~w=JIeSVZm62dytTvdzGw zmmatGf}=IKI)X7f=q82FvwHw`Y(PnBJbrj2LVEpm9YH2DB|qSWf|8U zuFaV}$qB#AJvq`>og12VY{@)71p|A|*7VvI}wCq!XNR!$8{n~Wku4O|%-knaKW%{n3 zvq_xbX#j`FG|pP`EMO5iNPmd-Jfl@F*om^jhhmyd<-U~1iZ7x-;>+GLJ78s&WRL&U z02V_z?~D?L4l9c}Db3wzLxSkelMNQ$tMM*;-4-@I&(^ zicUikCYHFE5t$CFAhmO z?rtVKw^DAr=0E&q{Q&KmzXN~ZFrXKc^cbROf8$)ED)Rr}MF-JxL`Bb#ndlW0SXe{?-Y*?#86B7;_p0DOl4EratPfmvmYAki4G#Xs~e&@lGvLkxb>pa602@s%x^*9!YXsux6rG?Y^|7o$UZ>#!$K zDj+M`D(`!QC~@9tkKTN+a5AQ#`0sx5O6Sb1K8UV8bw&~-NPe+-dU6-C(h+RG1U+R{ z<+~S|{2xhY8VKe0zVY+S!q~^YGZM;PDwSo%lJ-!RLXD`j2`PlkgNP_fq9l_N6-mjG zWu_u3g(PGdi70!v!OT4W^Zotbc;OAt%z5r}&V60i=l)k>bKTDAPfi`f?-6+8SONP2 z>)#MXkYIlb?JDkvK08eo8_Swi-o)D(TMO9fFua-?*wQc>i4|@KT>l+qxMSWVFV%}i zWz)kJ8Z(?;gjT2}m;%*sg3mRgWr@`%hVi&#A6!Y5)M=CDJAyN!AZ=v$)PFM0J|*OC z%#@@40>jAsFJP~$6rA`?P5ep?$CC2k{gtktf42yIiL^cweV&@c?-mT^bVko$vP;-N z#oCpUKaq_TDQH?aKy(@?eusxVV+$im09L3q92bMl0PA=+ARcWYL>S}Z*hb&6C;zCg za4`@w;D`6_70iy8G~A0IGcpAqJ9=T$v8z@QH&RAD^2;A#%X4`?Uyc)3*2%qK==1%2 z-sRp^mXA2sZc@hx)*n7itX>0JhS`4WE$+QE`ksH5!ZSI*lZT%6hRlVnW22(z2A~f* z=V+>Y5LIs$L0y`XX6S`^iWW-v{kLO}S7FIR_c=MQ+p z1=CZ)Wz>8K@87<0Ie%LP^opeA93Yz3vkXYg9Y7h6AhNd~HtK4xNK3f#xl5#kN|%nv zT+A7^K+XsQOD#%A0sV*Hn|%oSwM9=RLb4PV^}vrE*!(7P;C{7PBJ8rvR%15TJRm}7 zO4X4VdD)E}>LU@)HgY9SiOKNi#!@GjI75$GslFIB07-uJS%3%VJSJP>L3$2r=mcyl z^~^?r1HU!T+q&W9fD#}3f>1tKgFE5{zs+(@Z;^dVOzmH<3i;izX_>Ce~U3l+4!NM!W zk8YXc5?X%~921JIgV`MkCJACkgiOZGI`0ifS%yk(Y=uSV&xQ=b?!6f7i9R7^y29qc zjaveWZqkWYV&0`X!08V|hV1m^q5Wo!pI@=>U+;F>EV8V#7ZO-caH+~Iz9jKNocCUs zSc4fQ`o7WmuI`yGjIBcfw=r^(RT4|`5Wjm8u!F4Yx-k;r8cFEpGTj~LrHWDh^+0Dl zujHn@!DH)qRHqd8_fIV#kv5f#ttD(|#>(+1gn;Tjju2(TZ=pEspS2Q!)=yt?^6!lt z6Z7iD$uuGoaqsa6{aoNEg}v-KIQCkot>r{-p5p{ehl?`O zeDZGinA|GDStgD#0DP|X>HNo{fw<@e9D93-GqO=%;${EjQ*|ILii<=*5L&$8XQUs+ z$dYG$^YIZXLhrPS2HMdY<+BQe&aFkBk02;>jG@6=7rd(I6#{ZQBC5>1PgZBTl?yI9 zc=b;FrDyC@a*e)`=O1w|iA#TS>Q+Gb(MiR*^(Un+cxv0%Mk#g>++PXP7Qw@1>S#BX z->}-_3iT*G)+x+3Qt}HtdpmC^6nTfo`^x;7jUGwRg5n> zbsRt60g|bWE4rx^x1mL}69?8&m|B8veYb3VS_C-mF35r4>X&B#vtsTV`e)jf!owGB#~+UNWB~PCdny3WEy<{TiLmlS>92$JC?CsiV zFA>8?{;mu^RsxSFMR?XEUl%ylhxM(TTiyhN zlwuJ02XEIK$?Q|nTd&qKcFD=S^AXR(bZ!p9l9x#qYxjY?omvOBSMK;aw*H|9S%7&O zNBgF$Qxnpc$HHRB!)nEK$^+|WRC2BQ0UPO?I4oe~6wkG1EM0Pu_jWoudQ~5M=B9DY zK#LK>RQ}$`4$vR-6eMWs4!VG7w#VPAv-ti~>RNl`0E8!bglv3{d;ZOVS3@>+KLY^W z@3D^?aqdMm+3D$^^_S$~(F8~Gaxv-WRSg30u=Y_5>YQ-_{imc77#&5il)yh_qE2y% z#aaRf<`~`Rx2ivAP*V~I`;JByosGtf(*-*p99Cc~y;OHyqUSUT_ehQi!?l8#-{c01 zF-PW+RnzX$7f$5-u{vwKmXmKn;pxK9CN&DiT-WlZ6=>(B$#}HuSM$0BL1>sIc_{K- z&(@r(;y%5f9b_7gx;+ZrxQG;lw=};k{sq$G{2RoT0uiUA8w`l0dt^#uK+J zXLzYjOVD>{Q&W1pQDn~t0?iXN)<5KGf6_BSh0p^g|Mk|@6R}=AZEhY=ptyKXLlIN7 z+PlPR8&QH1`fnBlPIv}bpdo;;1Z2FHa+udyN)CdS1$x%Um)w0#iu>**MXQi$8GOT` z+_2B;(b&mt#z#NJ)vxtgqZr-a5a|h$xE9@g|EQO7=wAD=TPr%M@}6I`3Ll%X``r#n zQ!QZX3w-0Ms4Y+l!=5RENQ!quG#cL@uoI8F``ZVYzSHH0Kt#AY_}jdjXEBes z-0YIl5x9~23klz4k)aiS+~`A~eZeGmGAB`dDza9A@egFab&R1ltGZ~Dm!wNFRQGoA z$oj=HFvietpsAd(9uFRdaUhv2y_n$-t zm%GFp0dp6ly0_VjpA|GI-~`2e5!m@WCFvWAZ&z~avbsR}^3l#xnnnpcq-<-@Zy^-j z-h{>gb5QtBIM5U9XX>5YatsT$8N!5d=!eUA;2H8VKLhEXDHj1pGrDtfjR=7{6zwPS zmC{aU#Ae3`%rVUw!ORC_jr0X=x3zWy9bFH(rdcvo`%WCns7*e7QC6f~_t3tM^Mel? zPfdp+v}Xt|>u+<}bg0`d;Is@z=FY#e)ZYMbs(BR!89s&r2|%KE`|*8`VZZPuoD`{q zq<^sa2}L=|e_XxQy2x?ERDDqT-*5?X#=jbQRNGgPCJR!ZYBHK11G0F_AV`kw=H8Lh zgi*cx7}S3rA1O>_pGp>)|G)=Apk<3qFOJ3(W~zdb3XJm-c$)`xwS5AC{LeUAJrs2j ze~9Y$K>mjIMd*PF!~-N}e>Bf8`JSryQK6RX$Qn)%jEM^ggm3K{>Wa8?`4CxwFOnd<8I z8KJnA5sh)s0yEv6--MV!Ec|-pdD&Gq=v)2;(*|E9IB8R_u6lqpWD-t$5cxf}m*%(L-`cgfwe8G={bryEyYJ~eON zAKmP@FZPdAS}cxCft@WgN`Dg(fg98!a$K6cTS%?7o(8vkwVME0H92{|iE)YfCt5wh zB7-=JR&6jn_Vt5k|8F8~twMFm z&+Qhf)x;^7zb53EOid9!PxM`!cMLj_!=Im}!1Ts<;Iw)%iKU23=40jv(C0fcC#mU5 zWSs;?jp^Il)I`iAZEwrt)2zex>M8!J5R{*T|8s$;Q%+vY`7VvS0>>r zW>|Agp^heY7E z`*n=sbpxjfJ+@=5zqm8_^@4K$x@!%c<;4n^6K~_10U8YF+OT)crtjZ5w}}kLn}eir zH|YKWR?(ljyr)?vcxDcFv_NhG4&yRD6A>;AYGLbk>Vx3v;GoG7*CS!oj@P(DgA)w> z=}>Xx$z<^@{*eb-B^x)aii)1XY}Q;htd{Y6LpQ_wu!N{=6)$WM>HJP+uaIi}B2{&X z?uWgwCI9N!*)htT3BNrg%2sJ&c<@47X3UX(rp$FwBUFu@gV+lGASLc{!SAbI@ zGoON^0rK8V1ZS5k_SVBOFtN@}n10-EAxo=x#JXHOWBVA21_LfOmHJ{Ok%~IryyLUZSAcz zem88$)@PHGVECY{6Kq_8pDZ4F=acQl4_qjAci`7+PAkX)iD|$8y24xQw4gW8sUhw$#m|&PPkKkHF(ba)cwCDGf=ZRqESKKv((q{_h_Y(V~;C7V9mw29kxsJ;B z`p7e!ZJb}^LpR-k1^{xhVG<;GuS=x<<{S7rADT^bDBLqt-{o}+0S*=k7do>SwMXe& zq!@5&Vacr&7;Ml(P%pu$S--edb}{s+1WgZQCS4p)+s%1$C8hlHS9t6BItL}fE@;8} zmDuRF>^a3A?&!Ln!TOTyz3xbYR{vHBA^S+PITq>B}K!c1S7d^x%2C^o)?g<2y{#-#-G zFHFPq3WPe_>_FgRA)s9rL1t^agxJfjOX^Yt)CDgSrbmN`E~lkt-TJW2x-X^=U(ekc zr7PwW@yu}k_YQK`s*JhNnxl-5aF^R3*I4{`JKT^wrrzbq1}M9T^oIaTd=?u0>a1-c zWrj0*OOVc;{PPvQ&V(13h!BJWcutXq%e$uhTyTU}IO|=1+|WyCD2fKhqc*}gC9va| z0W=DT@De>prNtpQwcrY*KeZhDPzAU$TAAR(x@|$El7(2*w@q%N@-Y({@%ODCBNRLK z0KBt2XpFZO3QC8eLfRL}5C=f2{$ zEk6xwLI4|B)ux{c0~=f;m_{J=aH`#z<+R%jSeLWMa&};{&Ou&R4{5qmsAVGS7s=?Y zV`^wDWDb2Xc=tloG>g3aYrC^JOL}NqPNuTDjM!~cJ2tGvj9m-3Kvm|pbf~I~%YYwy zz|9%=eR(`$?gKpwJVLv_gG>W01f(1Xx`W|a2=xEeh+Fj7t<;iUtJTa}-fJb8-cx%y z1Qz8B$FNT)drb14@wRX@S>0b&qMTLCO0Cp7&5Mk`3u)Z80XSvz%2ki+Nc8)EWt7<$ z%^WgA{pl)xF^%WGGiYMG&G$Sm-Zs;0`PByS+*woxD`5P|@$ou_kr_dj%CEXGZ95vN z2p!E)r?ON3;mHK1jFa-98@p;ub=vp=%!2;j__94 zJ$$&}u_#RzH{4!Pk@?|DFd;pIgwt=ehH2+xr2S6hmK{7k47Y-ogvvsTQB zsYT+Nt{11m<#0_CIFBhRlCjX+9Rqi@Xb37A4}4dk$FRp3V`V)i`apx8`iHUUV_Yqb zC#|J^bqAoJH1=K6#=+Y#(N*%+M;XVk-1HuHm2af1B_uc4isDvC5)4CD39@l8K-;U5 z>G)caF4u@;ogxCd^!;u@B{10UrMow7|&TCcNiU1Y_Kkgr!|^i1(?ox-9P98ZczZC^Ta<3>C-x z##%>NF;k2KM!H_eH0Dvn91j_J=8bI>28*cv|Lx$CoX}UE1rFU zL(|m?g7n8}P$RtZ(^>@a)rQaA0~H_7XLlyv7$*xMAQ8)+FH^!z5I_a*_|Aa&S*Y&! z1yxedZRK=)Iujxr_!|yrNUemENzmxHL*ocL2f|y+?_!~KRC(85Q69X8O}n7(&UAfq zT&M8Z5SQcdy=F>yT7Hrm2 z<<+O@mqH)^+1w9@(_BtktWX6Hx%6NBjQIn{n;)pB{z*z7YwTDb<@4GGJ;P5M;K11> z%MYyOZP*ZkcuvK4ZK91cAh^ileV)bcKn*BMJE>%1_MAgStqcPqfWf2GCvN&|+cJ)TsAX6tP$-O)-(WFXl`KTDE z2u#K7`oD5Bd$JEE9_(AdOcE}RNJ1gt8mDt6b3pTN+&zXc$M3c4B+Cdm5ospEwl1+R zpcQ!J6`Md_HI&Bn^EvySQBTHPoA~B*(QgSS>TLIXSqxZL2d8sK&%?O07xYT(54ptA z1gkcD+Af5enGljClaww03|o~WeM(@~$UPxlP24DQz3%7MYY9cX=bcaMwsBWKla^ZX zgv@g^p4J-q&oa1qo{|XNjf|j!1b)Ve%U%HR~>>7+& zJtFN`Ol~aLLM=TX@X4Lm3!Fab?B}|>U<{+-QZT0U+^Xh$NN_A>qU;LCOQji2MV7cH3 zoIW<@(MHBi1{4FR)VcwdyV7;Q^ube}+xeQLcYeCeV+c)W971=LJbmgQ0iNGRtx`ll zc@5+a#zwJ*2K2J)g*WkZ#E)F0F{gv{09zhUbJ%2|@6LNWYi{m*WSYt@#H89p`UcHY z40CRz#r;#NvwC}qQ7-Y5{`rGx2KLP#a`#VT~T{N1CM_!8J&fvf~ zvs=deNjxD$+8Rvtn|s*sPF)l3KL%)rhtG~f_1BS_{tqHrE59Z6Z^9zqN%hCXo@={? zx&f!WF>03F(5|oUyz-Mo=VtS|90V;}mVXdwRV;-1U;OF#T>^}Tw+DoD_Oboo|H9U@ zS}MOE38KJrIRJi(ddHw?8_1>{A}IXH=@y`j#lz`m{;2nK zenGb%(<%i3*kPJg;+PY79~FVI?-iF<)~OT?e@@xRD0^JTP;ZakO~bj1oaGIdE%`YH+>L$P(((RZ4x&L6hrtv8 zOnrsPfyvfgt=)I<2$4o>$0JaHf6qD%38+iqQScKKXd}Rmme;9-7z$6yhq~jvAe~&O zba)NuFEa<$S5O-^i|dAXD1*ZS-!$g*ekM1i9$(K;xTgbQw~y?jtPJ#Skbv6`6eMr6 zNC)yR>dXz%dZ*`_?=@J#Aeh)|-LJ0@Q|j>^q(vO;^atpR{G%&gQ7?(o_BA(59#hB1maI{pjM=c^d38cK*$=N|6NXYxV9xChXW_ zv6Uo(w7lFi=B*;HcH$??QmnkvU0JOPDY}Wj*k7H#%hdxgAFm zoy_zOR)wNa;=CmV^n8%`&#S-k@5%SnK#|l#DlUziArlD8RKN<0x$q8uoBwx@ytjR! zZ~{D$^slLs?1A6c*4wovUh~k;8dP@zuU0|v2EGP%aGKYkG>>@&>a`e@jNI96N*a8a`zvPiWDlW%_TpB3rU<0nhR`4y$ z2WZrPEgBgG%t}=evyz3x+-r$P6R>mK^suZj?G5j3eay^I`ict}%FzHa7lnZ+#Xo@> z7ZsW0iP%XkeHt{x{gW5$%_^ZOg3jxd2n`u9@mGkwgim4=bpK;9l6U-9x*oiPtGYc# z1{1O4?Hk+au|AHIIaxfKN6iGxPU!3=-Zx*yp0R)RJz*c)bf~;}TZG1tn%~k_2@q%p z{Cj_WjxZ%?w0Oe&wh*0s1V?L91l68c!PP0d3w1qE^9@cFL6^=Pt$MfZyI1-Sy&Y1*m@YczTX%Ts<3=SwMKA$d}0`i zL3c3GAjVG^>;xp(hdYO6<6aE6!gZuo?TW_spzH9PrH=+c^50dFXMe}h#wNv`j~_L0 z0>({_&PNClc&gJDG;fJqsw64%T8+Za$5Fz#4X$Is;?ojwYr7YOk(2oT8F*?4RVF7= zZv*}@L3fexnLgqF6(FrT>bI4G&1ZxVsrhPk6UApdI1Xb0H+Zq0N>w>J-K_i882fXsU$J3gaGAY{3g$Te$AGQsS*0SsjUN4$!U{(*%cYd39Tckt5!bn2|ji=j%7_%RZz=hlhLHKTdky6foKmmn!_kWu zY2LM&2uR)zD5;GfYXq!sD&)X~d78W{z#(_YotI3!oAF4*COmBBxA@MkFE?0I;x zQKk0;#=g!zL*{X`yw6eqc|I?-fS2vG_klhLZ3D1;O|QxWMKQ-c^B(E!2=lLjX|~e2`@#Y+>BU zhtGUQ^0?9OBdXr&F}qpfl7MMN1J6mc&VKq6n|Zv`7*l2H{w*@=bADZMlJfMK%^%37 zHnO;BYiD7ov{Sr2BM|t+N?HhT)j7VFX2b5Nibqf8f$5B3>=E+(=V^m1P6D%bxiu?)@J9UwSJYZ?_OTYmBU+kRByuz zs>SkOe^OU`7j<4}vK}8d-?8wc-jU(Jd@g>U{xbsUk;NH=_3%{AYaq!gw;4?a0=fSl zxHI_H_;NB?m!LMJ^*2w5u<;Rl0~wBre$8`a?aJyxB+vfKaz)E8D0!cKckNApa7ohq z+dVqQ#=qANWaQx*9L1FHkuz}w!@9nN&o?gt5s9_GiigGbQLK(B5jzl^gHHeVakPCe zj~E`4qwB0u_;!EY0Y*JQSkkn~1{H$-7Al_ECZtpjkE|0AaS#AjI(M}0;B~7KC*^Qw zRtQ|8du(sPv^iZxoxHC#Y7G%oD5YGXrgJf;oi9vJP3KEnPjIpTI82{8&1qK+xy%{ggdIZEhl}{b_Bbg$!6d@rS7%tl8hh=#wD+RhE zL%}x&JwRh`rg-aFQSQHiYvGsw%k}8Tm>2-w zOkLf1nvAU*I4RVg^uebL0dhk=W`KK;@NH=W&Amm(;ECh%SL>bQmG7EZUzV{NTTKGN zMk6}JI(qND4RgJsK8rs%yw3bHm*@e1J)5`Gg@^Aq)D?2tJ|_EeI+c5Lt+7DlC#D`0 zEvntylV6|nB23|oK9-SpysbOk_FH<_sbOkjc_E+__)m~3uLuTI77<>3X1f9^Au@}JEDVn;FLs8yNgiZOox`&{Z?}&k*8aZir-|}$Ev-S zc&@!WLXCi*r2WP2sLEqv_rMixCCPkm@xuD0x)n{)2LEk{lmOk8tsG~gXsccoQ;N6x zO;-Hcp3Sbrvv%$YVv&con(X0vyt?W$C{|qTzjyR*w%fpTHutuk@-J7VZmILM%3O=@vw9@y@N7+pm3m;rmm0Mq`k?}`p z-x0w~m_+W=hR1`fR5dbVYZ(K6I>G~mDj6N~n+G(H{-nFb7$cNFB+F%-OJdFw#PbP} zhGTg#TaP(nInawp2#24}%6hseqe(|#vDSFp?EAgT2>N#$|De{&FP=2C5GQV44JwgJ zd05o>5OleYF!pcc(62QyF%A)w!rx0r#}u}zP5T8iV`5}8@sDv+lgc8<@Da&NiGFGn z)1>e0e@Z_jU+X~l;OU&ciSQq|bFgAr7b0ke$n@~A5JruROe`ptUvZ*(cd_5)lYdK^ z6HXnlNl4nh%Y5TNvCj>+#BhYt@<$T@c};JUaq*$Y(?;DI&+}z}7He5sSn=JI`RWMT zQ_orXW#wQ->6_xKBP^F8!PBx&7n4&?WMDpSqSG=v1aRt+^LEK3+vG!<@ta0F1{iBJ ze(4Wg5mgA$>D*hIW+;IE3Z%p@g|=zp$oy=;Gj1k0x<%_tvAQ8oU*$Hnj@7eL;8QO1 zs+%@`Av1W5vf!MvhTP;oevCfiCrOyaMaMms7V!vB_B3n&5xBhnp3KjexJ3v$3_&@$foKH?OO#psme$CJ8g!%6?JI= zp58UI0qWZ<;)hpNS=vsip2YQXOskf#oE4`8T?v^V>LEwb4JjZxIkf>BV+;g)C#`>z zeVYE#6^V%a$TQLUl96D*iMnXs6=?ycE*uhqJh#lt@42-U>#qx&ZaeZl4~_-5KYq$l z#=B71p_gbQ99y1W%cRKz4*lAL-5amVF}~#O=l+qr;VaWOMv6#R0ynvz^l9Q|*!CmX z8}Qs?2V|OeKoig1Aoj*bh>rs_FRsP>8Pkdp>6z=1?itfz{yFa7+|4Wc6yay{MV&N~ zK}I*mOliK*a*Fd185@H$Kmx^>5`;{^wgxwcLO+%`6nteh{T4n*4g}JIpcVR2L* zME|Q(3P`voQ^dln^GW_RAie(c=-)CnyrdXoZAZW|o@9EYaQNJ|`}7A9#9bs)nzxps zzYus$^;*E%S9#C8=GZQzA-(>-ZLZN7^UD0)VvK{YuBd27L^W%(^l2@@Me=}9D)9pW zViiBWbo^PXspk|Fd{z7Fq-d0|`C4wThN?_(BKeT)l9P^jqS@>+!~Fm@xeC*4895YU zFLZTsXeH_NiobG=ov)?>4P5rHug-DUC7!sjg+{Znp;@ooBZjB^J{)xEg~jKg zeLAQDLH&d#TCy54D7}FbxgU?)DFm7nZ&cH(M1YgDX&guVpQxJaTS(h)o=Gru$zL7t$FS+?Cj(dUlw`|LlznW!3{FLP)!MS>3sWLaNH2r$B zoF}}JhTp9_^for)BvaL9@>o7=0p=&4agOy5G$iEQo1_y-Jmd{_Sq96|lbs^Osox*2 zpI!)H; zBh_*6nDt1y#Sm+=Mk~*%un2o+L$#`92r7Jr{-5IMnr<1<#yQ7R(T@kFW_ICmUvyU} zO`u0)}jHP&GMy;%MsFB`uFQE+ZPAO-_8EnUGqm!4{DNk!pQ|nG8Dn#{8nt$RO=)>y8denfq zh5xabbAbHN}(8Pd3)j5BD%Oel%)p!3Q;y-b`ZB# z$UM(*KV~V4$0=kPi@Y`Qv;Nxn@6{R;gKYlnrX>|`<3GQq!`(z9)N#CD=c-#}Q0KrC zyUGZ<06+6!oeo-Tw{p+zi`E1BbCb$swqiskh1O-eXNdrBZ;O^!1wglDPp3o6aMJ@7 z+S`Emc<_3A;h%Zd4%lHg8txl=p7$pk zpRgZ=%xi!7DaP$RE;7c{fs^aWq^6Zi^}0-N1MOn@?SlLhM zetlph@kZ>^qbKB4mbbErg3W)ycH|LzM@gOtgpqo4#-D}s>OQ|0-KWUEgDW3pM@8yU zMiv$}-<~sH8k=Ykd!zh%k>4^?^+%;|{+ht;E%o;9D;I9^+@1dA#E&RmpKE?F=qPqE zDrO41!j+ieTcF(VFLd6CV{|e8i`SLSa*VpIb$+D_&$jowKh^=|P&f|P`T3Hk_tWBc z#|~a-&21{2D!n(6DwCDwk$Z-LAFEuL`Q-CY%!VzD#~H^^m}{FJ(PB{TSNfe^ies5L z2|#hTE(dGz9ZC(0;?N@W@be_{R)Cv!h5d(Lcbe;aK&rbxuFm)<=VSlYB=p);z#XIL znGXd8I_(ACg^q=XcX*ra9w)38RH^Ol4=wGF@X^z&XS_Qu<y|(-_q8NVV2<$9;r7O`J+#dM$jiy{8F1J}xg9Db6-Hlun+!F6|VuhioceUqC^< zW%uE`PX?u~9veMblP`kv+~jLZyC|7~gKhNTZ!xBb_ic$70ibnU36VhLH?7zfdavs3 zB(ZXNeNoXsl8n}{ zLH_LA6@mG(^J9i&`&*rx-{I17)Ed>yFJzocHD5gO6O-0q+z}iw6OZ~`$ZFz21JT2n zT@1OfF@xOkV2$*~jN@Y8$ePm@Sh_wT>)d$p-{s&*j<%rh9`!bVUbkm}v2N%HY-SGX zEq5Poj?L!W+M5G#XGrL#sf$;WqYsft57g0uRlo@$al@U|7D;rN4fYK`*jR7ufuL|p zO8fAs;HfqLLzq2|eOfdiZGfzqriVU7f(}S9>up>l_RrAJiJ?uPUkP{g^o-G=HN(Lj zXRn5hp1?y2&u?4G>-{8?J84D9PTAGooa!y!;ld=6v433u9y8t-A}f7$vs@@EYk4#I z-?f(*`n7f^H}+mc5kPDA(6YQ0SlCYGSDkvR{g%bfC27{p1sLc|mk-5oB?d4JW9({m z&y4ee5|0O9R>{M+4-?a1T{jo9!W0wMUBYChk9zJFJDu+PCZ3wXZAjUo4#ahKc)$gL{kG+n*Q81`;y5ItT_sySu z?G7-eHWL4)dcb&`&AYYvHNIa) z9Ng@^nRK<51z}3h2=9ldR%!Xsq7LkvzYniiN0}0FWjk6nR~4@gTN;?1uKF<6&9@Br z+A$f@no{20?>q6}fZ!H{Cbi5-L>ZKR(q!crSoSH(=#Vf%-#uH?O{4V21diL4%sI!` z4ohVou=gmF9J^953PszQ(IhQ_Rsu%6gqaO^h+z{Io;<1D(_6)4-(qv8L%s?2bj=(m zJK+hD{0q`~>*LXicyu(h?RqnciqY6LelcXhPAceJ{|MYw$L{T5U&G-cZ~QR%8+2wN zzY2tI0$f^wkl`btu)Q=l{E;opi7>oFMR|ksjS|~{KYI}EnGP7Y4ETvmc@peR9;%31 zW&qb#Gk+r3^Zk69)%b#}il}kBQ832^+sikHBQA3zN+utLLfyJr`++(_d-LIK)}@}} zoXO#_;m?s5+;yAB^<8Ty-s3L34?ABb7UtYq8aYHwq$)3#wodAuzb2BiOYoG#Dv4HTeOustgn>%nPg(S9?-)gR3-sgr$OLN3WxJqHe(F;z^JC;i9;?;Bm@WJdqk2s|vF z#4r&wjvRU zdfVDAY5fq3v^(@T)y;X|@{Gxam-l#mwSC)lIs7B>So3#-kB4Z-7pkY8jm??$T@Nfb zO%=L5$E(rbB%*ADeJM!Zwbzg`nzM&JxHW?JxPu;O!CO6=Klk%yeR0I4+C%Jfl2P>4 zJes%ND?%A0;4tS{^YR@qKr05UZ-eLc$^xcrsZ5w%*6i_`fT&CLQ7LY++9APO31<_^ zh3?E?=YCP#@xL9iPlBT}UJ|sX`zp_`l(BVp#XJ>PJbaV)pw^1vPg$2Q1goB+P=UTv zT;{0heF_{B)5pl~y0==Uh6u`|Y3hNU%1xgKG44rK9z6bAeWzuw6$}ZsUA=a9{^6Tnr+-AcenXSjy*|D^c z^bL{n;H@56vwY=LiWU4b^j&Zc@}iCVnD?Mxm5jS@xZRjUXY;&;4cYq&OHzfijI^Uv zNI|sY#c#aLtUg~DgDWP(6t8vxISLk%zO5e*ZT{8Y_ELP^Yh|_<3XNhdQB|t ze%hMXS-1Xtu-}R+h}pU>;B`$&TAYcx{ASjgH7i=6y7X?=RyPw=bqB+D1? zf0O#Dd3h?ME$I4Ahr0de_77?a0uJ+*436KXG9(4!=hJb>9ta9A*-}{jL?$@x_9x7)oHRP$E z*Ht5mZJF}%{j!9c()*@F44FpHca@9(y+V6-L72Vnpv*Tduq}-a9dZlAK~ELLv-WuC zX8rG1_xH_ls+tRH(xhKAPuiMe>P2U24311nf*(eZxUkWN-Qts7JmaN5WD(W(>KQJ1 z^N?WeM}Ayufl|`dgx1E?0$t&}4*}Lj*50Klb(nZC|689~@3Lyt#f#cW0eZai!b&%I z-+5~PTWnTDAT0v)e{+jE#9;Ws=%mQtkx6<(84jrTon+toGV-D2@{YGehqH~UQ)UM> zMadgC{J_bV5>HMEw-`qcmA0xl0ZIojcSHzwv?rBB<|n=i?#e+N`t$XfO5zts!`DV} z!WYLIaOk6YGxJsBSu47=2&KKj&GOO4fSHwE-J6NWNA0osZTZZnRQ|m7$SA>0&LxMM zMy_|+_3+nW1;M#pdzyKd^m{JH6>@_fuk33+S{JhVhvn=1+=(^(l0*~Aa&d$i!$@#* zvC7)76NkcBzY3t#;e-K2Tg&@RyJ%oD7Vu_{^?3qAs;OW=^BjU+dtGAS{TT&UdZ#ai zu~LUmU!tHsZXD5e`-sTX{-E)f; z)_|?e$a}ak^GkxJ;@N@W#Wj5!=c7tieV@kJnthNhAbq<@?f3VHVM8*5ECNzRM;1q4 zFn;V)RtmmK2oVEclEU(H`}{8~mMln~`!`wsdUIDm>4Z>v^0gPfEK!s*wA-dVea}bC zyt1=la@bx&<_mA9byCdx9~OLd5U9#4XwKRDmU>-9I={{L-|Iu|qr+zz&f_B{r9$ZL zoK_a3BEM5!oVmhypICaa{Z+%gdk8w!Q672zxe!n$AWv^g3js-gp(8*kWNG>7H>&-H zH!)w1)!c`!pm*egIu`kt_$B=0KF|JAm0$Y6Prq^Rm*E*7Wtt7y^nP+oFo;e(x69>a z5D_=A0~8~_&qkq|G-bXTWtC!b9If*A29S~6u=$io>1rgD01Q`M_##0kH}P1 z(U~Wzy|_yYZ&jB9zCWrxgRrIEdKd+q9C98hBeqP;1o)_kpAJoa{6x0a*t;yIVaLeK zuokZi;zt~8bK^^DWoH*Lv>&sp4|uhb-uIwGroMLW;CKfW zJ*@5;dmefaVoO&36OIh?v|ms8M4Qr1jyWQ3=+O6kD@9RLVaUZknif-Re3Z1W+e*c;buHzlEAL%|@pN+EB$BWkbsF!B3hqpj$S3TB`4A|x7mT|{# zYF~Sq!&}dK*URv@b-QXISA+VKE@28b;~XC_7PpOxf;nvG*}E0@rmtY+I3j?RG(9*~ zBoR;I)k;Uyl3NOJryq}ouim%+Q3OkRypFNk6_ou^SaXC_Dzv|$?4!au@v%3WQ@$|q zLtDm2xjMW#fYS8WCImgQA>y1-S`UIK_bvmTkL~T3VL}!;wFg9A%eHxm1*0UcP|;$NANn;UNWonGb-;K_bJ9Jj*_nAy(Eg?E@U@xS z=@KE_3>&FA4H#iBn3wQ{VWP_!ZH*$r*#yL{II{}-`IkM+N) zHJYiTOERrzbfeMg?@Z5KQ7mILtyE8=)Sq3iw}UJYA|+SQn0Lm7AoLz}_L(FBk5`C? z3-aTRxCM;6{lN7exnI7TE(|F^PcHUre~red26<&hXc<=2hiQffL{+&~>3$1JKUTQd zyq^53TiJVJK7Tl^nN_5R+qJRPO0YJZx46<_X)4lWi*Vhp6)X3QEZ}1thXhhnt*P7` zm&fvo^UzX6q+R^-NNe7IdHG~fK(MNJh4k&ibCp4rE4Llvd;I@$c60?--VHEK4V4Hu zeFW|y*YHT}wbf2p{IZ|;0`~#6?zrcjkBGKmo+b_45~G|kj_U86Nkn1K`v(K>lB=^% zM?7o#i9_t&qS+g1-Yc|im_HNZ&Tl+ct5*?>kJS$vG_~!>G9&w$goD!Clnx0=F)-iR2B}Gj33h-j{RF&Lfnk--8Cz7Cuzw_- zE4=ls&eY~#<{i9ePg6#9SN7Krl8T~>x^Cc+?qGS8)% ze=Z&B_yLSDk`-(_J;EDNbd9b2RWys`!d1^b)s|KzQL!Kx#&JVeb}qu=itt4H+x~~- zc5c9AJb(lY_LL>v0_~^BG2^J;Lk9YD?U=pGZw2a+;UoM-+~c0~^u7?@{zC2hvD7ye ziJyU>4VG$CEiWJxVmxu0seVsH=s6fvrL;x@QU*BuBk&n+3dMg}{U}AV{k=CtwSD1G zUBP&X#bkGLikTW}<-Ag`Qu)HA?u*OapSKr*Pu>d1n?hG+wK)LqnQ^sFUmicN??bo6 zd1`m1B&x^Do_;t06*;R#jx)ce2NgVft$)|}4aY+sPyWHOK3?)q#ro$Pc9#aRbu9cb zR?K->sxZE*qi_zvj5_CHd%;K%;kh)t{l=zM6!&&a<4>bEk$se~Qv%m@6g5nSvP_}X zHpO<)!WIW`QT{e>JLGZ`jsrWjvuowX$Uf@9&vB{CfGH{oH3vmm9?oJG3UHKYhmV^H z;$P8lM!CrcJM{1z~>#JoXVW6+bz_$nZiCvV!6+C&tSnOpNUiI6TyP?FS zQIz4SFEX=KhdFw+EJny%3ugvbbJq#0i@vo*>8s!VzO;Qj;V<6o14#hN{ys$d?_GY(EIKtyK@$}I&>?<3Hzsmcs;RB3TF0L?^A zPp`OJ1~gj?OkTd>2P-hsnuv=edOkCB#>Z6vzqRU*ld)lwh>4n7Pwx4m)x+~Wc4{M# zT9b+JAkps?%gTLSajYDHMi?M96RrP!)L*PfGcu%Bioexz90K85a%83B6)i2TCuyVg zt$1JHDUfw=iW*jSV|2mQad$T)cUP;XV76lOe;bt>sTpD)c8Vmf_Ai*5_WNHh@d=RJNiNaN_fjA2}jHt2QyR#(uNNoCJ+D?YpI5)qQ4f0B6TR? zNjM?rmHr>e@I8bFN0#HtVueCs3u60|4K z!SDiaFeGxq{jV#2vJ`0wH{b{BYFOUSxZbudgF?-eY~&75<>si__EN0$Esw+PD^mGR zLUxPQh?9rs@AzP17e}aq%T5{P?7xWV2tG#YPfj*G>3c@p8cpU;>At+JO1=7%_r&R& zUq&rO;K<9FdNxI-UJM59AnGkFM-8yQ&js)MdeTx3NeF4WGwFxXXrbk`Fylg!zV&?| zcyBOt$D*)&^<7bd?2~+q1^s}2e-aZ&31vnnS0;ep0bDTo0LaGQ$)*>jERI#xF1*rE zU&~mL(Tk+y7y;#?ovk_U5wLzs$r6h`^RwtRG{lb(*)KB7`}Meh>c_{64!J1p9y(C` zG12^1)YbsP+#KFUn4uu-(BC6I1d>}MjOk~P`mk&Tc+}RjEe7gwtIrF+?39;QY~VH7 z%BiPR9H?8mEljWZX!tMYk*=d3uo#BYi`4zH@p~jBId9eTZ|23FjnypbAge^ol2EA1NrWr-juX0EodIYB7>cvz53 z_};j}kcv^5%9_Q~MC$9LX(kc?!G_w}-SG5z`=04!JS4c2P6Mn7P=5o!?YIh4a>S_y z1?$4wfT0{4c9wCXU8RX?-)gk<7n;$|2npV}VHaT9eZ z{t546gtyEDU(=JcrkO%93S;wPRWT27VF`Zis(0^(gwrOTacm_+a5F>F6oYcTqWFq=ut#)+G&KIckoh3oBPcz|m z6?FQ^N@>c7cDURe%X|b(IXbl9$ z{TH-b&h~9m!xK6H>5rJ6_(e}9&Xq3@Xtyuk2RE=~s;~Xt1BD)aI`Fc416U9=&SwqA zONRJBqMoCtQF=+{SFt_PrAhDome~{FQikj~f5GaOczF};o6HlS9#YP|hrg&K2sGqv zp-t1%!=*cOrbrgAz2~CfKFcVCD{uH4lFNJ=LaFe91fas`holZ|E)&BmN@~o*6sjPy z#FAC|?8WA88F!GKnd17#I{>vIh-L3{(Rp1R-{D)uOvixz!gezMkI=F|*It?RkI}jd zE%yEX>Eq66Nc*hL+sbC6`?tGhNBSp6UI{Pqv`#)Zm+L6&Q%jo^jbBfn1{GkS{Fkr! zLWy%P=jsT~z+gbZWxEL$3kB@nJws0Y!j~hhy!mu0%Pfv9Nsyt`Mp*^S(r9#92M5i3w;*i&gzxQZ}^nwK5lJ(${SkM z9C2SS%j(dF3<>a*b4aku`ex)1DBBXip;tilBBpID*``WFnl(Cdr~0Hi05+(2A^ti6 zJFkXDgn+-||DA!EYnj)@QgMGs79JYnp$oP);aYnuMO0M}aK^Li;c$(`q1xCn1?+Jl{kZB4&7eXfl%%?59*;+ zGK~p{H3GoVTQ7%CCQwa&jcrnKe((e8r|FXm#?OjX5v}<7$Y%k$c{wkwLK1*g+wVjr zjkDF^H{S?FLmvZ%zMYKVl~!f|BSy1-kcR@p3EIwgwxmi{&>b~W>t~d57Bh75N1ZLn ziM_Sj^W^NgjSm>UO3nJPcq|=iY6SLykY1K}5yOM4HCCw8(np90gPR~*mA7n>p<}2y zJ{_szCgob1p4a4yL$PTk?-aH!6fciRGWa?24OV8W$0Q0(w>d z{(2mc6yEs1LGN1myv|k0CVkST5_MQApN7p3COd^SL_f850`@pO0Ef#Dm8%>f1`wUj zf}j;|M1TARety~$%VHxgzyjPtfCStL9A>W-Fx^L7#jBJ|Z~;zOxg2i9P-OiPw7h^F zDJot-cnLn-R%XYtB1!}NMRi3$Eb062=p8R|O?V=mp*yqOz?QG0EPY7TcBKg)hp7c+ z0hotC=rO6twLX5Og_7#d*8^CIn_pCM=BB%waeF1o&8hB@^N8a;dDu%uKVuWGjwmJb zBCK2{v|^26Hi+I`{8wrnQuYWclCliosSmJPT^N)7`ayhcN%k;;Dzvh)Xj~-mQK+s+ zZ*P&6v0$X%$@BrwuJ@dJKMX#eS-dnlP@((#{lzcAmDXFqCZk>S+C*Vx;fl7T9{6bl z`hLsZD&Dmb=S@L|$U-991En!jn3vBU|0!auWLtu)$fOD;%4zqRpXQ`2Ke+3j_{uEQ zBk3Q|L4u9AOQv47h=K!jnS<|)r|i|=KPX-P(_*_{5fXJ4ctECl6$>AOa-w^;#amRcd zoah6Mw&-}>+(8O1xuZl70xmH!6(W2i_n!lM?ccG5z-|7*8&CScfznwS> z8b(Ug#utWBLDhg0Q_Q!A`!}kEtnWi>qWfXbD7%+<)zzrAeD|~q*f)!4#AP#(WMApG zHPOekodbj0in|K-fsDz7(JIz)Nh-M^CtDK2<tM z4RDG_AWrpBM~@2XY!NA~sY)*yScD?!hpiLA;ZWDq5BNLoWyPE~&3A!9z@c8DhI&?a zB?(3!GQ@C!o)`~7n_ac}lV+dvZyrCHHtTsB@TxGv4S0-Ne0WFj)ki!274#Q^i+g+@ zX{V-&10A$E|N4hyLf*Gt_~h%tD*8Gr7QXYw_qh^cYt@t<%IQ)(jOGK_;FE?@1Dv*-*5IW9aoD(Y7nRX7sCX`mHb&grPi{L3A^y3OW+Ms%^gqGC+6A*SI|I*5ZE3PkmGZJ3R0I|JV z|GQ#opT4Qiv+hhtXBNbfX}kh_B3643Y$>c?{MK2JYifr7`1m&`?&MMoW{uWMM-sxFW;%e;g zKJC|PLbQ!$=qpQX4hP09|E7CH%~9^ev_Rz%Izco|MTea~k}LAWOGOy7y08ec_r>+F z*WC&gpSl$Ws#X5rt1aM|-bI{XWn9(_sJ?bLmJy7I>&Xj64u&WdVir(DU?mNnm!ssz zNjQ~~$stFZ;7|PD_Vv&9B|k3oMgsLdO#S?3Y0e0?&&nRThB_&F8q`waSwr!h{&Jb- z{37`0%^kG4R$?L59Hu!x}nOE5T(f?!aa5SNBUPkg!*2 z?IE!D`={COdV`Cv+ARuis;%3uj1a-N=s-9dpUI4F$XI5LU;%yv1x|OLDyN4SHDgRj zDM2r_&EbYOWd44a<`sOn4O}AzGrZSynzpHoEB${&q5CdkCHi2@Nl$?Ks&GaBLa2H8 zvJLqc_dC23(o*4ow2>C|!BaxMfMt|KZSQD5 z!Ql(1JwqbC|);Vs$qIn6m^F# zF1q4&$ej#=sAo zBeEw3%$#=`0^D!!n>-5U$EJLkl1;ETe?fR{v_7+{-G;pxrAvTWQ)3HxB7w+mw9osZ zbzza16C**AfH%7;e+Dpf<|P!AX&74+-X_dh6K)UDOagw8F8&OtxK#V6c~#}rCGetZ zq-L`5emFZB%j0Rn=j283wqoY*vD5$H}-&33CX zVevyIz+-buic1MGStK!&jdABY(#@9v52jCL$98PTjh_a{olg(rPJsm!naR~V zmLjB-i7KgLrzPrTy&|X_|Hg_F!Cowf*}1CD5JIu5!7F3wv&x^=0?i>ar=lmn8BN-A z@=kwnV4HTn8Np=_v6I<6i~gwLh*dAMiT=!CzFF@+>xK!t=ivzO&D z8cMj59#y-m=hM>XpPiQ#zEX@^oz1*v`fH~A=IX>|63NPBM$3b(9OG#WV0K8j3QVJ{ zeMxpyf{EzzlVC`OJxCc&qHi?Q`JuxJrNJW#p&|*wpDzFs`mwsJfBScql-f8jZXQs+Eq3e1QKfr-YO7om>Z?rP@nCNb1+qEwIwemkDs8V{jD5-!s zoh}Pn|1)1vLQ{pMiG3lpH58(F73@s(CGk30pij_VWK)w7aZ%6tJ2}_ZxOeU+(4JFP zDZNv>?@b!7;6@Pb4V(P7a`g+Qr{8^r2Gp_wCs0g4b&v*X`ZmE9u}AhzKiaD%tO zoV1NsTacB1XdUFM0!*15v%#S5s-`yegQnYb|5a%BU9LO*v&9(%0^eo($;7Q07GZ8o z#X9Sv+)ryS&p`U)ITR2WE8&3E^B+jGAF+RmC)l#?e(|Z?g5YF{LQa2*Fcg8V@d5`g!C)t%%=b zG;A79>e4X~qVjWN-~ETW`t1HsBQ@z;x_Y56%c8y=-9*pEuaZOgx{Nsru!Rq&kxe+D z`@<6z1j}1uO@EKm9&NRk)(*}mTKJ-Mp1KY)c0H+Y`28PU7#@( zcC^J62aFh>Q+VVdENR5S!d=lB-|bAM(hcx4g!V3$EFb-R+LQm0CnaSz%PWGB73fbAmBZf>q2_p~D$b+w{h};U6$_)xmFnrfLGxmEuHn*DnMkL?eMd zy%yvIF((4YpFu!l6D|%tNPr-VCGgw*o*cG6=HG_ggRm$WN-=e0A@1C!d!^u8$M za->#(W4OoR(eV88=cI$1w)vGssNx0fu39m5v#@y*qqqi?luE*+yv-(G$R+zom+CA)1L;D%H3o>$@ zI5Y+@AZ09pbEt>TB&A)e)Oaa>peK~T$0#-SSe;K^RD@zt$N1nstHbbMkJ6@=10vwA z23&%{C8J^aG?PHos`MIiK8ty8#c%5trg0nrDd^HVMMRxP3{L88bHVP7NT2)5Jp#OM zZ2MfdKaGjX7A!XFkoq_2p@rJY>s9(b)b#~7KR>F;9{2uR)~vQLruP+QfO!azauU?< zetqWeF4}BWBf61rPwAWG{VLf}3$p+G?9)co{y?9R3P>K)$h}(y zFJE+CM1THn8a`F-g#@EMA-Jo4uq9W1k6V>yWj3U&<(S(V9t~0;nIM8B7zT}5oSwW~ z5Z>ROpz=ECJ*c5vJr3LBxJ}}?ZaMcU{Qj8h-`BHYM zaBR3~(x2e@-cN+oe$OItT2wLCgE*72RK%4WI0xiCevt71^?4b%QmHg20`tD}4yp}C z6wfl{@u~}d{ry7Yb*6q)ofVnAyJu=7;moD_P za)9eC`?zp_oW}-RqzjYES*Qb5!3E#>B(cMvUU7qPEv=h}`Dw<|52p!M5RT@JYlFII zS`D7RN~;y zhjb@h=~im!Q%M|x2UD8s;FeRBtMGBfNDw2jJMCddt@45GiOnf(Os~TOrVbmUa{I4? z14&J-$3#Fy;o4^;Kwg3rAvI1YNn8PAZEVjgo2eUE^>IKP<0yw}0mb z&BMIusiJ;-d+GSVj6cAGUe&j@H0L3w_r7#<*w^guE}-J)zw1N+uIL&{z=5RUwg!F= z6dNlFPsVb|C2I%af|r`_bU;yIYK>G`%C1MZ+!{0F5aRw8U;B0rv>38-Kd{V|+ul&g zCMnTlqwD$1U1}d9LgOa*j@$j7*wvd}ToQ!VZjKts8%GIJlXP&# zFWmjy<0~Kx&0MDe;&SF3e1#?C(Vdrk6nm~@&Gm0ZA&cbCGF60;9%6Wo`mkACi2CgU{`&ZY4_grc&i|dXW^WzW*1-j}2Mu_jp8QMJ za&f~gtUBnREK$r}6o+(WeR-oZ7aof;&FRpZa%k~I*>e~C2a?5okSOkMT#x#*`)5fu zi}>bc7`m1p6fJ2((>jIztGUR8v_p~lXbiz>GU%H2if8Td+@_bJr1#B?MpDQ@v7X}} zSDV7|T)w6KV$c*}Bqm5Cwirj-?%k!U3m_%$mj!NxI)`x;DSZ$(Q>cA>_3>`?QkoQr z{abLDK84!5mZ)me!Nzi-%<{Pm_0I5+Ejn1U^e&sh+i)e_^uN{kF}=ZN!R?*lA%-3g z-IjM;9_vW1Ipb56eD2wGdbVchW?JrxP<$7o=)e^K0k&{JGE!|pu5UAMX8Y-Bs2W8c zj-9s9U&=ui%WrMRLN#VzEv&S7F#-o|)Ajiy&stMn!bzO~p3#V=Z>Is?O0$kdJ)b?k zr7(?|KHXW}Y66`avA-x>8H$Xk+Rrg_d-Ga)pxWW@c8>ofWadn zh(b_UZa#>9jIQ;^0Kt7gWXr9(t6Qpn2}P;&Gi%!oau6JiH~Oz{6LC!mD&Bx7QmZSn z5NxuuCy0)lQW{*A7^9r`zShvDVQGdi@$$Ru^xMv%SWCBF?VA_ZmcHhyW^`yoMAEta zMDDZx>QP?eL%@INNWo5Czl^%YG0MfcZ`z?C#SuNyV9O{t!vt@%nmLC60aAqnTzR;U z=~bLKpwh+Hgd0J?WgitNlLgXTl`3pE8)XHM z5^S8T`*RmzW`>edxJ>W zUq^cGUZ13QW**0wbRW%;4#EC@mU!iiK-~#OYTO77cpKhkob#R_2IQT81l>m-*j+}~ z(BMAF1gT%mqby6(M+Z7WHQ1SRwBZtfMuG%iaSnma-f4)U+H0zNh~*nt5A4cx9;Ser z>%k>#r;i^v-OEBeH^4xNG%@KP$jXu89-(i=LgBZrqE#||d z_4KF8RdpTionLsc%$l2L>ZX?+0iEM%vWh-Ow+YI|L5V{>6EC%u*v+kRB%kn?G3VWa zQ%2;R5&ny)^sb$0Y>ygw`qAl=XdJrhp(nqDV;2RJ#9u<9%uT|h8Lk*O3Bq|nZ9ZO= zkvF@LO_gcpDHqq4Fjg&S$};ONFG;>-tCLR+Th3-n87ZTfbONyZhZ^)kr``5|) zi2H3I2l~iSV+HwCQ$<7})E=y*O(Gq}kxIK7dAX11&J%>6dxw{I6X>y#Ik?f06cYxBk(h&MG(q)SiPtS%WnEv>y-)=b8&s?cVn?U zSf=-Hk$Ikq2Cr=yw_+Qh#(xahiP97ULWiJFeWkyuy;RC;omV%AbO{XPz=-taYUxtX@wtf2~U)KR@Eg2R%;&PSw`813MqX3aqH)hL?2W4rG5+A z|BNC`k`o`WGHmdgPV)N4_vo|-poRo8Z|~zEu4wg}yWHkY=>#1@@>k+R-o

WQOwr zeybucG!3XCdRd1EJn0|b#2%%0pu#pI`20&SEYG-n2`JB(o%EJ?*x4Z;PhCr2iT*x$ zx4-sn!tk!z{!W4L1eJKc8d_d-mX6WZxG?n0C4H^0jNCZCN zYE?@tzEwPA&R?2#aTsZ+B9?FdO^4sM{fFFYYp>9-RUpMr4-6iNB2R;PBBuo*3ZgG2 zGhrN0%^A4K^#too4U<9{KSe^jN%PTjX_qJ&;f!kg8oNr@Nt>|T5S4ik6<;gXmb}tW zp?(>5;GY!SHv;u7+xuY~K_}*@P3*gA=>!--IZUe@%lYUA>EW#vu4MIXO&Ao@du2aL z5Uy>rcNMh*J1~#*5It{vO{a0Nr##7R9aFky$%q)~%V#=yqwJFxo{$`xavJsBLC2P` z#n51+fYAIQr% z3+l{p@FY8wl21JT8Al#>!>8Nlw zNKC$`qOy+;+$>O=IOs%@(vd($P57!xKjsSIM9`}s{pt{E;4x4y%tW(u&lRUSxuDoI zv)-jg3Dr6^(NszPXM6vWQss(v6gOQY*F~9s&D)-tic34NtrnWmY=H}W#IFZGCyEJ1 zDIfCdG_JCy-(#260ZS1dy5s_$*TRHofqSNTemfFbh+%#*32-?Rf21)#Lmig#K9*m9i(#UlPOV{meOsHNpsIO z;%L7s=IX_9@RDYFIM3u96-OBRB*}FA(FIbY|3vr$Zf(Xu4ZQnvzh)~u=-l7?W7^&F zh1pY@&-_ZrlO&KRDH>TEwu++V|NBAh8z)=BN3+mhj7s5OEZBnDZ&xiSGK$oeVj*^! z9eFz!69vG9uk&E?AX?LV-Ma-!?bNT$C;txva{4WTuJpt+A0a*@EHO}{Gp zD|>R^g(s#xtkUZVTfY)J65j7PcyhMC=wD$%>AQNF&X6D^&+ zyXc&YshEH5Db$@YJJJhB)ox)CcO4yN+pa2u{uhim-}{DzA7-^fPIM1N1QJuflrn7z zX+`mplK1riHqS>;?HgdFx2+~ygsjX8d%ZdeeSz&xC<`nr$F0|zW=&$GgwT^IM|3&e`@VyTUD!j1+6VE?fAJIS-p*Y-{P*s{-&FUk`rZHDNe|E#4vdAo&fgN# zQqy6uiST?E=|Pq4dhY&3_j$*+8=CN&7ytRT^9`IN z2jY_uU0t)h=Ut=19jLrfG3L!qPqAJ{xBx>@-ogLH_{Oa*s^DTHiAX)Zwa^5>)#V@_ z7svbct`KRz=MF3?U)eL#51+TIkNA6WB^0&n;W8Bj8{2qJru==^RrMg6WfitH7uoW0 zM4Cc3Mqo546W=Q@awoMjMXc!NKRfqA04uR=rvi^?Y_$Jt6~RUkd45_qg>C`iMy}D! ze|I>by%PcVCQs1e|NW6*`vCdqk@2eFM*M1X9A{Tt#7PG-O=wJUi`2wD|KBbD|4Z^} aJeJ~~sNZ&L7?B?U_tVuf)cmUM5dD7*zoIh$ literal 0 HcmV?d00001 diff --git a/src/tools/miri/tex/final-presentation/slides.tex b/src/tools/miri/tex/final-presentation/slides.tex new file mode 100644 index 0000000000..c5e1f51ba1 --- /dev/null +++ b/src/tools/miri/tex/final-presentation/slides.tex @@ -0,0 +1,444 @@ +\documentclass{beamer} +\usecolortheme{beaver} +\beamertemplatenavigationsymbolsempty + +% Fonts +\usepackage{fontspec} +\setmainfont{Source Serif Pro}[Ligatures=TeX] +\setsansfont{Source Sans Pro}[Ligatures=TeX] +\setmonofont{Source Code Pro}[ + BoldFont={* Medium}, + BoldItalicFont={* Medium Italic}, +] + +\usepackage[outputdir=out]{minted} +\usepackage{tikz} +\usetikzlibrary{positioning, fit} + +\tikzset{ + invisible/.style={opacity=0,text opacity=0}, + highlight/.style={color=red}, + intro/.code args={<#1>}{% + \only<#1>{\pgfkeysalso{highlight}} + \alt<#1->{}{\pgfkeysalso{invisible}} + }, +} + +\title{Miri} +\subtitle{An interpreter for Rust's mid-level intermediate representation} +\author{ + Scott Olson + \texorpdfstring{\\ \scriptsize{Supervisor: Christopher Dutchyn}}{} +} +\institute{ + CMPT 400 \\ + University of Saskatchewan +} +\date{} +\titlegraphic{ + \includegraphics[width=64px,height=64px]{rust-logo-512x512.png} \\ + \scriptsize{\url{https://www.rust-lang.org}} +} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Intro slides +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{document} + +\maketitle + +\begin{frame}[fragile] + \frametitle{What is Rust? \small{[review]}} + + According to the website\dots + + \begin{quote} + \textbf{Rust} is a systems programming language that runs blazingly fast, + prevents nearly all segfaults, and guarantees thread safety. + \end{quote} + + It's a new programming language from Mozilla, and it looks like this: + + \begin{minted}[ + autogobble, + fontsize=\footnotesize, + mathescape, + xleftmargin=.3in, + ]{rust} + fn factorial(n: u64) -> u64 { + (1..n).fold(1, |a, b| a * b) + } + + fn main() { + for x in 1..6 { + println!("{}", factorial(x)); + } + // $\Rightarrow$ 1 + // $\Rightarrow$ 1 + // $\Rightarrow$ 2 + // $\Rightarrow$ 6 + // $\Rightarrow$ 24 + } + \end{minted} +\end{frame} + +\begin{frame} + \frametitle{How does Rust compile code? \onslide<-6>{\small{[review]}}} + + \begin{center} + \begin{tikzpicture}[x=4cm, y=3.5cm, auto, rounded corners] + \tikzstyle{basic-stage}=[rectangle, draw, thick, align=center] + \tikzstyle{stage}=[basic-stage, font=\tiny] + \tikzstyle{pass}=[thick, -stealth] + \tikzstyle{pass-label}=[font=\footnotesize] + + \node[basic-stage] (src) at (0,0) {Source\\Code}; + \node[basic-stage] (mach) at (2,-1) {Machine\\Code}; + + \draw<1>[pass, out=0, in=180] + (src.east) to node[font=\Huge] {?} (mach.west); + + \node[stage, intro=<2>] (ast) at (1,0) + {\normalsize{AST} \\ Abstract Syntax Tree}; + \draw[pass, intro=<2>] + (src) -- node[pass-label] {Parse} (ast); + + \node[stage, intro=<3>] (hir) at (2,0) + {\normalsize{HIR} \\ High-level Intermediate\\Representation}; + \draw[pass, intro=<3>] + (ast) -- node[pass-label] {Simplify} (hir); + + \node[stage, intro=<4>] (mir) at (0,-1) + {\normalsize{MIR} \\ Mid-level Intermediate\\Representation}; + \path (hir.south) -- coordinate (middle) (mir.north); + \draw[pass, intro=<4>] + (hir.south) |- (middle) -| (mir.north); + \node[pass-label, above, intro=<4>] at (middle) {Lower}; + + \node[stage, intro=<5>] (llvm) at (1,-1) + {\normalsize{LLVM IR} \\ Low-level Intermediate\\Representation}; + \draw[pass, intro=<5>] + (mir) -- node[pass-label] {Translate} (llvm); + + \draw<6->[pass, intro=<6>] + (llvm) -- node[pass-label] {Magic} (mach); + + \node[stage, intro=<7>] (exec) at (1,-1.75) + {\normalsize{Execution}}; + \draw[pass, intro=<7>] + (mach) -- node[pass-label] {CPU} (exec); + + \draw[pass, intro=<8>] + (mir) -- node[pass-label] {Miri} (exec); + \end{tikzpicture} + \end{center} +\end{frame} + +\begin{frame} + \frametitle{Why build Miri?} + \begin{itemize} + \item For fun and learning. + + \item I originally planned to use it for testing the compiler and execution + of unsafe code, but shifted my goals along the way. \pause + + \item Now it serves as an experimental implementation of the upcoming + compile-time function evaluation feature in Rust. \pause + + \begin{itemize} + \item Similar to C++14's \mintinline{cpp}{constexpr} feature. + + \item You can do complicated calculations at compile time and compile + their \emph{results} into the executable. \pause + + \item For example, you can compute a ``perfect hash function'' for a + statically-known map at compile-time and have guaranteed no-collision + lookup at runtime. \pause + + \item Miri actually supports far more of Rust than C++14's + \mintinline{cpp}{constexpr} does of C++ --- even heap allocation and + unsafe code. + \end{itemize} + \end{itemize} +\end{frame} + +\begin{frame} + \frametitle{How was it built?} + + At first I wrote a naive version with a number of downsides: + + \begin{itemize} + \item represented values in a traditional dynamic language format, where + every value was the same size. + + \item didn't work well for aggregates (structs, enums, arrays, etc.). + + \item made unsafe programming tricks that make assumptions about low-level + value layout essentially impossible. + \end{itemize} +\end{frame} + +\begin{frame} + \frametitle{How was it built?} + \begin{itemize} + \item Later, a Rust compiler team member proposed a ``Rust abstract + machine'' with specialized value layout which solved my previous problems. + \pause + + \item His proposal was intended for a compile-time function evaluator in the + Rust compiler, so I effectively implemented an experimental version of + that. \pause + + \item After this point, making Miri work well was primarily a software + engineering problem. + \end{itemize} +\end{frame} + +\begin{frame} + \frametitle{Data layout} + \begin{itemize} + \item Memory in Miri is literally a HashMap from ``allocation IDs'' to + ``abstract allocations''. + + \item Allocations are represented by: \pause + \begin{enumerate} + \item An array of \textbf{raw bytes} with a size based on the type of + the value \pause + \item A set of \textbf{relocations} --- pointers into other abstract + allocations \pause + \item A mask determining which bytes are \textbf{undefined} + \end{enumerate} + \end{itemize} +\end{frame} + +\begin{frame}[fragile] + \frametitle{\texttt{square} example} + \begin{center} + \begin{minted}[autogobble,fontsize=\scriptsize]{rust} + // Rust + fn square(n: u64) -> u64 { + n * n + } + + // Generated MIR + fn square(arg0: u64) -> u64 { + let var0: u64; // n // On function entry, Miri creates + // virtual allocations for all the + // arguments, variables, and + // temporaries. + + bb0: { + var0 = arg0; // Copy the argument into `n`. + return = Mul(var0, var0); // Multiply `n` with itself. + goto -> bb1; // Jump to basic block `bb1`. + } + + bb1: { + return; // Return from the current fn. + } + } + \end{minted} + \end{center} +\end{frame} + +\begin{frame}[fragile] + \frametitle{\texttt{sum} example} + \begin{center} + \begin{minted}[autogobble,fontsize=\tiny]{rust} + // Rust + fn sum() -> u64 { + let mut sum = 0; let mut i = 0; + while i < 10 { sum += i; i += 1; } + sum + } + + // Generated MIR + fn sum() -> u64 { + let mut var0: u64; // sum + let mut var1: u64; // i + let mut tmp0: bool; + + bb0: { + // sum = 0; i = 0; + var0 = const 0u64; var1 = const 0u64; goto -> bb1; + } + bb1: { + // if i < 10 { goto bb2; } else { goto bb3; } + tmp0 = Lt(var1, const 10u64); + if(tmp0) -> [true: bb2, false: bb3]; + } + bb2: { + var0 = Add(var0, var1); // sum = sum + i; + var1 = Add(var1, const 1u64); // i = i + 1; + goto -> bb1; + } + bb3: { + return = var0; goto -> bb4; + } + bb4: { return; } + } + \end{minted} + \end{center} +\end{frame} + +\begin{frame}[fragile] + \frametitle{Heap allocations!} + \begin{minted}[autogobble,fontsize=\scriptsize]{rust} + fn make_vec() -> Vec { + // Empty array with space for 4 bytes - allocated on the heap! + let mut vec = Vec::with_capacity(4); + // Initialize the first two slots. + vec.push(1); + vec.push(2); + vec + } + + // For reference: + // struct Vec { capacity: usize, data: *mut T, length: usize } + + // Resulting allocations (on 32-bit little-endian architectures): + // Region A: + // 04 00 00 00 00 00 00 00 02 00 00 00 + // └───(B)───┘ + // + // Region B: + // 01 02 __ __ (underscores denote undefined bytes) + \end{minted} + + \footnotesize{Evaluating the above involves a number of compiler built-ins, + ``unsafe'' code blocks, and more inside the standard library, + but Miri handles it all.} +\end{frame} + +\begin{frame}[fragile] + \frametitle{Unsafe code!} + \begin{minted}[autogobble,fontsize=\scriptsize]{rust} + fn out_of_bounds() -> u8 { + let mut vec = vec![1, 2] + unsafe { *vec.get_unchecked(5) } + } + + // test.rs:3: error: pointer offset outside bounds of allocation + // test.rs:3: unsafe { *vec.get_unchecked(5) } + // ^~~~~~~~~~~~~~~~~~~~~ + + fn undefined_bytes() -> u8 { + let mut vec = Vec::with_capacity(10); + unsafe { *vec.get_unchecked(5) } + } + + // test.rs:3: error: attempted to read undefined bytes + // test.rs:3: unsafe { *vec.get_unchecked(5) } + // ^~~~~~~~~~~~~~~~~~~~~ + \end{minted} +\end{frame} + +\begin{frame} + \frametitle{What can't Miri do?} + \begin{itemize} + \item Miri can't do all the stuff I didn't implement yet. :) + \begin{itemize} + \item non-trivial casts + \item function pointers + \item calling destructors and freeing memory + \item taking target architecture endianess and alignment information + into account when computing data layout + \item handling all constants properly (but, well, Miri might be + replacing the old constants system) + \end{itemize} + \pause + + \item Miri can't do foreign function calls (e.g. calling functions defined + in C or C++), but there is a reasonable way it could be done with libffi. + \begin{itemize} + \item On the other hand, for constant evaluation in the compiler, you + want the evaluator to be deterministic and safe, so FFI calls might be + banned anyway. + \end{itemize} + \pause + + \item Without quite some effort, Miri will probably never handle inline + assembly... + \end{itemize} +\end{frame} + +\begin{frame} + \begin{center} + \LARGE{Questions?} + \end{center} +\end{frame} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Extra slides +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{frame}[fragile] + \frametitle{\texttt{varN} vs. \texttt{argN}} + \begin{center} + \begin{minted}[autogobble,fontsize=\scriptsize]{rust} + // Rust + type Pair = (u64, u64); + fn swap((a, b): Pair) -> Pair { + (b, a) + } + + // Generated MIR + fn swap(arg0: (u64, u64)) -> (u64, u64) { + let var0: u64; // a + let var1: u64; // b + + bb0: { + var0 = arg0.0; // get the 1st part of the pair + var1 = arg0.1; // get the 2nd part of the pair + return = (var1, var0); // build a new pair in the result + goto -> bb1; + } + + bb1: { + return; + } + } + \end{minted} + \end{center} +\end{frame} + +\begin{frame}[fragile] + \frametitle{\texttt{factorial} example} + \begin{center} + \begin{minted}[autogobble,fontsize=\tiny]{rust} + // Rust + fn factorial(n: u64) -> u64 { + (1..n).fold(1, |a, b| a * b) + } + + // Generated MIR + fn factorial(arg0: u64) -> u64 { + let var0: u64; // n + let mut tmp0: Range; // Miri calculates sizes for generics like Range. + let mut tmp1: [closure]; + + bb0: { + var0 = arg0; + + // tmp0 = 1..n + tmp0 = Range { start: const 1u64, end: var0 }; + + // tmp1 = |a, b| a * b + tmp1 = [closure]; + + // This loads the MIR for the `fold` fn from the standard library. + // In general, MIR for any function from any library can be loaded. + // return tmp0.fold(1, tmp1) + return = Range::fold(tmp0, const 1u64, tmp1) -> bb1; + } + + bb1: { + return; + } + } + \end{minted} + \end{center} +\end{frame} + +\end{document} diff --git a/src/tools/miri/tex/report/latexmkrc b/src/tools/miri/tex/report/latexmkrc new file mode 100644 index 0000000000..23aa1a481b --- /dev/null +++ b/src/tools/miri/tex/report/latexmkrc @@ -0,0 +1,12 @@ +# vim: ft=perl + +$pdf_mode = 1; +$pdflatex = 'lualatex --shell-escape %O %S'; +$out_dir = 'out'; + +# This improves latexmk's detection of source files and generated files. +$recorder = 1; + +# Ignore always-regenerated *.pyg files from the minted package when considering +# whether to run pdflatex again. +$hash_calc_ignore_pattern{'pyg'} = '.*'; diff --git a/src/tools/miri/tex/report/miri-report.tex b/src/tools/miri/tex/report/miri-report.tex new file mode 100644 index 0000000000..f8bb37b911 --- /dev/null +++ b/src/tools/miri/tex/report/miri-report.tex @@ -0,0 +1,663 @@ +% vim: tw=100 + +\documentclass[twocolumn]{article} +\usepackage{blindtext} +\usepackage[hypcap]{caption} +\usepackage{fontspec} +\usepackage[colorlinks, urlcolor={blue!80!black}]{hyperref} +\usepackage[outputdir=out]{minted} +\usepackage{relsize} +\usepackage{xcolor} + +\setmonofont{Source Code Pro}[ + BoldFont={* Medium}, + BoldItalicFont={* Medium Italic}, + Scale=MatchLowercase, +] + +\newcommand{\rust}[1]{\mintinline{rust}{#1}} + +\begin{document} + +\title{Miri: \\ \smaller{An interpreter for Rust's mid-level intermediate representation}} +\author{Scott Olson\footnote{\href{mailto:scott@solson.me}{scott@solson.me}} \\ + \smaller{Supervised by Christopher Dutchyn}} +\date{April 12th, 2016} +\maketitle + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Abstract} + +The increasing need for safe low-level code in contexts like operating systems and browsers is +driving the development of Rust\footnote{\url{https://www.rust-lang.org}}, a programming language +promising high performance without the risk of memory unsafety. To make programming more convenient, +it's often desirable to be able to generate code or perform some computation at compile-time. The +former is mostly covered by Rust's existing macro feature or build-time code generation, but the +latter is currently restricted to a limited form of constant evaluation capable of little beyond +simple math. + +The architecture of the compiler at the time the existing constant evaluator was built limited its +potential for future extension. However, a new intermediate representation was recently +added\footnote{\href{https://github.com/rust-lang/rfcs/blob/master/text/1211-mir.md}{Rust RFC \#1211: Mid-level IR (MIR)}} +to the Rust compiler between the abstract syntax tree and the back-end LLVM IR, called mid-level +intermediate representation, or MIR for short. This report will demonstrate that writing an +interpreter for MIR is a surprisingly effective approach for supporting a large proportion of Rust's +features in compile-time execution. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Background} + +The Rust compiler generates an instance of \rust{Mir} for each function [\autoref{fig:mir}]. Each +\rust{Mir} structure represents a control-flow graph for a given function, and contains a list of +``basic blocks'' which in turn contain a list of statements followed by a single terminator. Each +statement is of the form \rust{lvalue = rvalue}. An \rust{Lvalue} is used for referencing variables +and calculating addresses such as when dereferencing pointers, accessing fields, or indexing arrays. +An \rust{Rvalue} represents the core set of operations possible in MIR, including reading a value +from an lvalue, performing math operations, creating new pointers, structures, and arrays, and so +on. Finally, a terminator decides where control will flow next, optionally based on the value of a +boolean or integer. + +\begin{figure}[ht] + \begin{minted}[autogobble]{rust} + struct Mir { + basic_blocks: Vec, + // ... + } + + struct BasicBlockData { + statements: Vec, + terminator: Terminator, + // ... + } + + struct Statement { + lvalue: Lvalue, + rvalue: Rvalue + } + + enum Terminator { + Goto { target: BasicBlock }, + If { + cond: Operand, + targets: [BasicBlock; 2] + }, + // ... + } + \end{minted} + \caption{MIR (simplified)} + \label{fig:mir} +\end{figure} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{First implementation} + +\subsection{Basic operation} + +To investigate the possibility of executing Rust at compile-time I wrote an interpreter for MIR +called Miri\footnote{\url{https://github.com/solson/miri}}. The structure of the interpreter closely +mirrors the structure of MIR itself. It starts executing a function by iterating the statement list +in the starting basic block, translating the lvalue into a pointer and using the rvalue to decide +what to write into that pointer. Evaluating the rvalue may involve reads (such as for the two sides +of a binary operation) or construction of new values. When the terminator is reached, it is used to +decide which basic block to jump to next. Finally, Miri repeats this entire process, reading +statements from the new block. + +\subsection{Function calls} + +To handle function call terminators\footnote{Calls occur only as terminators, never as rvalues.}, +Miri is required to store some information in a virtual call stack so that it may pick up where it +left off when the callee returns. Each stack frame stores a reference to the \rust{Mir} for the +function being executed, its local variables, its return value location\footnote{Return value +pointers are passed in by callers.}, and the basic block where execution should resume. When Miri +encounters a \rust{Return} terminator in the MIR, it pops one frame off the stack and resumes the +previous function. Miri's execution ends when the function it was initially invoked with returns, +leaving the call stack empty. + +It should be noted that Miri does not itself recurse when a function is called; it merely pushes a +virtual stack frame and jumps to the top of the interpreter loop. Consequently, Miri can interpret +deeply recursive programs without overflowing its native call stack. This approach would allow Miri +to set a virtual stack depth limit and report an error when a program exceeds it. + +\subsection{Flaws} + +This version of Miri supported quite a bit of the Rust language, including booleans, integers, +if-conditions, while-loops, structures, enums, arrays, tuples, pointers, and function calls, +requiring approximately 400 lines of Rust code. However, it had a particularly naive value +representation with a number of downsides. It resembled the data layout of a dynamic language like +Ruby or Python, where every value has the same size\footnote{An \rust{enum} is a discriminated union +with a tag and space to fit the largest variant, regardless of which variant it contains.} in the +interpreter: + +\begin{minted}[autogobble]{rust} + enum Value { + Uninitialized, + Bool(bool), + Int(i64), + Pointer(Pointer), // index into stack + Aggregate { + variant: usize, + data: Pointer, + }, + } +\end{minted} + +This representation did not work well for aggregate types\footnote{That is, structures, enums, +arrays, tuples, and closures.} and required strange hacks to support them. Their contained values +were allocated elsewhere on the stack and pointed to by the aggregate value, which made it more +complicated to implement copying aggregate values from place to place. + +Moreover, while the aggregate issues could be worked around, this value representation made common +unsafe programming tricks (which make assumptions about the low-level value layout) fundamentally +impossible. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Current implementation} + +Roughly halfway through my time working on Miri, Eduard +Burtescu\footnote{\href{https://github.com/eddyb}{eddyb on GitHub}} from the Rust compiler +team\footnote{\url{https://www.rust-lang.org/team.html\#Compiler}} made a post on Rust's internal +forums about a ``Rust Abstract Machine'' +specification\footnote{\href{https://internals.rust-lang.org/t/mir-constant-evaluation/3143/31}{Burtescu's +reply on ``MIR constant evaluation''}} which could be used to implement more powerful compile-time +function execution, similar to what is supported by C++14's \mintinline{cpp}{constexpr} feature. +After clarifying some of the details of the data layout with Burtescu via IRC, I started +implementing it in Miri. + +\subsection{Raw value representation} + +The main difference in the new value representation was to represent values by ``abstract +allocations'' containing arrays of raw bytes with different sizes depending on their types. This +mimics how Rust values are represented when compiled for physical machines. In addition to the raw +bytes, allocations carry information about pointers and undefined bytes. + +\begin{minted}[autogobble]{rust} + struct Memory { + map: HashMap, + next_id: AllocId, + } + + struct Allocation { + bytes: Vec, + relocations: BTreeMap, + undef_mask: UndefMask, + } +\end{minted} + +\subsubsection{Relocations} + +The abstract machine represents pointers through ``relocations'', which are analogous to relocations +in linkers\footnote{\href{https://en.wikipedia.org/wiki/Relocation_(computing)}{Relocation +(computing) - Wikipedia}}. Instead of storing a global memory address in the raw byte representation +like on a physical machine, we store an offset from the start of the target allocation and add an +entry to the relocation table which maps the index of the offset bytes to the target allocation. + +In \autoref{fig:reloc}, the relocation stored at offset 0 in \rust{y} points to offset 2 in \rust{x} +(the 2nd 16-bit integer). Thus, the relocation table for \rust{y} is \texttt{\{0 => +x\}}, meaning the next $N$ bytes after offset 0 denote an offset into allocation \rust{x} where $N$ +is the size of a pointer (4 in this example). The example shows this as a labelled line beneath the +offset bytes. + +In effect, the abstract machine represents pointers as \rust{(allocation_id, offset)} pairs. This +makes it easy to detect when pointer accesses go out of bounds. + +\begin{figure}[hb] + \begin{minted}[autogobble]{rust} + let x: [i16; 3] = [0xAABB, 0xCCDD, 0xEEFF]; + let y = &x[1]; + // x: BB AA DD CC FF EE (6 bytes) + // y: 02 00 00 00 (4 bytes) + // └───(x)───┘ + \end{minted} + \caption{Example relocation on 32-bit little-endian} + \label{fig:reloc} +\end{figure} + +\subsubsection{Undefined byte mask} + +The final piece of an abstract allocation is the undefined byte mask. Logically, we store a boolean +for the definedness of every byte in the allocation, but there are multiple ways to make the storage +more compact. I tried two implementations: one based on the endpoints of alternating ranges of +defined and undefined bytes and the other based on a bitmask. The former is more compact but I found +it surprisingly difficult to update cleanly. I currently use the much simpler bitmask system. + +See \autoref{fig:undef} for an example of an undefined byte in a value, represented by underscores. +Note that there is a value for the second byte in the byte array, but it doesn't matter what it is. +The bitmask would be $10_2$, i.e.\ \rust{[true, false]}. + +\begin{figure}[hb] + \begin{minted}[autogobble]{rust} + let x: [u8; 2] = unsafe { + [1, std::mem::uninitialized()] + }; + // x: 01 __ (2 bytes) + \end{minted} + \caption{Example undefined byte} + \label{fig:undef} +\end{figure} + +\subsection{Computing data layout} + +Currently, the Rust compiler's data layouts for types are hidden from Miri, so it does its own data +layout computation which will not always match what the compiler does, since Miri doesn't take +target type alignments into account. In the future, the Rust compiler may be modified so that Miri +can use the exact same data layout. + +Miri's data layout calculation is a relatively simple transformation from Rust types to a structure +with constant size values for primitives and sets of fields with offsets for aggregate types. These +layouts are cached for performance. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Deterministic execution} +\label{sec:deterministic} + +In order to be effective as a compile-time evaluator, Miri must have \emph{deterministic execution}, +as explained by Burtescu in the ``Rust Abstract Machine'' post. That is, given a function and +arguments to that function, Miri should always produce identical results. This is important for +coherence in the type checker when constant evaluations are involved in types, such as for sizes of +array types: + +\begin{minted}[autogobble,mathescape]{rust} + const fn get_size() -> usize { /* $\ldots$ */ } + let array: [i32; get_size()]; +\end{minted} + +Since Miri allows execution of unsafe code\footnote{In fact, the distinction between safe and unsafe +doesn't exist at the MIR level.}, it is specifically designed to remain safe while interpreting +potentially unsafe code. When Miri encounters an unrecoverable error, it reports it via the Rust +compiler's usual error reporting mechanism, pointing to the part of the original code where the +error occurred. Below is an example from Miri's +repository.\footnote{\href{https://github.com/solson/miri/blob/master/test/errors.rs}{miri/test/errors.rs}} + +\begin{minted}[autogobble]{rust} + let b = Box::new(42); + let p: *const i32 = &*b; + drop(b); + unsafe { *p } + // ~~ error: dangling pointer + // was dereferenced +\end{minted} +\label{dangling-pointer} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Language support} + +In its current state, Miri supports a large proportion of the Rust language, detailed below. The +major exception is a lack of support for FFI\footnote{Foreign Function Interface, e.g.\ calling +functions defined in Assembly, C, or C++.}, which eliminates possibilities like reading and writing +files, user input, graphics, and more. However, for compile-time evaluation in Rust, this limitation +is desired. + +\subsection{Primitives} + +Miri supports booleans, integers of various sizes and signed-ness (i.e.\ \rust{i8}, \rust{i16}, +\rust{i32}, \rust{i64}, \rust{isize}, \rust{u8}, \rust{u16}, \rust{u32}, \rust{u64}, \rust{usize}), +and unary and binary operations over these types. The \rust{isize} and \rust{usize} types will be +sized according to the target machine's pointer size just like in compiled Rust. The \rust{char} and +float types (\rust{f32}, \rust{f64}) are not supported yet, but there are no known barriers to doing +so. + +When examining a boolean in an \rust{if} condition, Miri will report an error if its byte +representation is not precisely 0 or 1, since having any other value for a boolean is undefined +behaviour in Rust. The \rust{char} type will have similar restrictions once it is implemented. + +\subsection{Pointers} + +Both references and raw pointers are supported, with essentially no difference between them in Miri. +It is also possible to do pointer comparisons and math. However, a few operations are considered +errors and a few require special support. + +Firstly, pointers into the same allocations may be compared for ordering, but pointers into +different allocations are considered unordered and Miri will complain if you attempt this. The +reasoning is that different allocations may have different orderings in the global address space at +runtime, making this non-deterministic. However, pointers into different allocations \emph{may} be +compared for direct equality (they are always unequal). + +Secondly, pointers represented using relocations may be compared against pointers casted from +integers (e.g.\ \rust{0 as *const i32}) for things like null pointer checks. To handle these cases, +Miri has a concept of ``integer pointers'' which are always unequal to abstract pointers. Integer +pointers can be compared and operated upon freely. However, note that it is impossible to go from an +integer pointer to an abstract pointer backed by a relocation. It is not valid to dereference an +integer pointer. + +\subsubsection{Slice pointers} + +Rust supports pointers to ``dynamically-sized types'' such as \rust{[T]} and \rust{str} which +represent arrays of indeterminate size. Pointers to such types contain an address \emph{and} the +length of the referenced array. Miri supports these fully. + +\subsubsection{Trait objects} + +Rust also supports pointers to ``trait objects'' which represent some type that implements a trait, +with the specific type unknown at compile-time. These are implemented using virtual dispatch with a +vtable, similar to virtual methods in C++. Miri does not currently support these at all. + +\subsection{Aggregates} + +Aggregates include types declared with \rust{struct} or \rust{enum} as well as tuples, arrays, and +closures. Miri supports all common usage of all of these types. The main missing piece is to handle +\texttt{\#[repr(..)]} annotations which adjust the layout of a \rust{struct} or \rust{enum}. + +\subsection{Lvalue projections} + +This category includes field accesses, dereferencing, accessing data in an \rust{enum} variant, and +indexing arrays. Miri supports all of these, including nested projections such as +\rust{*foo.bar[2]}. + +\subsection{Control flow} + +All of Rust's standard control flow features, including \rust{loop}, \rust{while}, \rust{for}, +\rust{if}, \rust{if let}, \rust{while let}, \rust{match}, \rust{break}, \rust{continue}, and +\rust{return} are supported. In fact, supporting these was quite easy since the Rust compiler +reduces them all down to a small set of control-flow graph primitives in MIR. + +\subsection{Function calls} + +As previously described, Miri supports arbitrary function calls without growing the native stack +(only its virtual call stack). It is somewhat limited by the fact that cross-crate\footnote{A crate +is a single Rust library (or executable).} calls only work for functions whose MIR is stored in +crate metadata. This is currently true for \rust{const}, generic, and inline functions. +A branch of the compiler could be made that stores MIR for all functions. This would be a non-issue +for a compile-time evaluator based on Miri, since it would only call \rust{const fn}s. + +\subsubsection{Method calls} + +Miri supports trait method calls, including invoking all the compiler-internal lookup needed to find +the correct implementation of the method. + +\subsubsection{Closures} + +Calls to closures are also supported with the exception of one edge case\footnote{Calling a closure +that takes a reference to its captures via a closure interface that passes the captures by value is +not yet supported.}. The value part of a closure that holds the captured variables is handled as an +aggregate and the function call part is mostly the same as a trait method call, but with the added +complication that closures use a separate calling convention within the compiler. + +\subsubsection{Function pointers} + +Function pointers are not currently supported by Miri, but there is a relatively simple way they +could be encoded using a relocation with a special reserved allocation identifier. The offset of the +relocation would determine which function it points to in a special array of functions in the +interpreter. + +\subsubsection{Intrinsics} + +To support unsafe code, and in particular to support Rust's standard library, it became clear that +Miri would have to support calls to compiler +intrinsics\footnote{\url{https://doc.rust-lang.org/stable/std/intrinsics/index.html}}. Intrinsics +are function calls which cause the Rust compiler to produce special-purpose code instead of a +regular function call. Miri simply recognizes intrinsic calls by their unique +ABI\footnote{Application Binary Interface, which defines calling conventions. Includes ``C'', +``Rust'', and ``rust-intrinsic''.} and name and runs special-purpose code to handle them. + +An example of an important intrinsic is \rust{size_of} which will cause Miri to write the size of +the type in question to the return value location. The Rust standard library uses intrinsics heavily +to implement various data structures, so this was a major step toward supporting them. Intrinsics +have been implemented on a case-by-case basis as tests which required them were written, and not all +intrinsics are supported yet. + +\subsubsection{Generic function calls} + +Miri needs special support for generic function calls since Rust is a \emph{monomorphizing} +compiler, meaning it generates a special version of each function for each distinct set of type +parameters it gets called with. Since functions in MIR are still polymorphic, Miri has to do the +same thing and substitute function type parameters into all types it encounters to get fully +concrete, monomorphized types. For example, in\ldots + +\begin{minted}[autogobble]{rust} + fn some(t: T) -> Option { Some(t) } +\end{minted} + +\ldots{}Miri needs to know the size of \rust{T} to copy the right amount of bytes from the argument +to the return value. If we call \rust{some(10i32)} Miri will execute \rust{some} knowing that +\rust{T = i32} and generate a representation for \rust{Option}. + +Miri currently does this monomorphization lazily on-demand unlike the Rust back-end which does it +all ahead of time. + +\subsection{Heap allocations} + +The next piece of the puzzle for supporting interesting programs (and the standard library) was heap +allocations. There are two main interfaces for heap allocation in Rust: the built-in \rust{Box} +rvalue in MIR and a set of C ABI foreign functions including \rust{__rust_allocate}, +\rust{__rust_reallocate}, and \rust{__rust_deallocate}. These correspond approximately to +\mintinline{c}{malloc}, \mintinline{c}{realloc}, and \mintinline{c}{free} in C. + +The \rust{Box} rvalue allocates enough space for a single value of a given type. This was easy to +support in Miri. It simply creates a new abstract allocation in the same manner as for +stack-allocated values, since there's no major difference between them in Miri. + +The allocator functions, which are used to implement things like Rust's standard \rust{Vec} type, +were a bit trickier. Rust declares them as \rust{extern "C" fn} so that different allocator +libraries can be linked in at the user's option. Since Miri doesn't actually support FFI and wants +full control of allocations for safety, it ``cheats'' and recognizes these allocator functions in +essentially the same way it recognizes compiler intrinsics. Then, a call to \rust{__rust_allocate} +simply creates another abstract allocation with the requested size and \rust{__rust_reallocate} +grows one. + +In the future, Miri should also track which allocations came from \rust{__rust_allocate} so it can +reject reallocate or deallocate calls on stack allocations. + +\subsection{Destructors} + +When a value which ``owns'' some resource (like a heap allocation or file handle) goes out of scope, +Rust inserts \emph{drop glue} that calls the user-defined destructor for the type if it has one, and +then drops all of the subfields. Destructors for types like \rust{Box} and \rust{Vec} +deallocate heap memory. + +Miri doesn't yet support calling user-defined destructors, but it has most of the machinery in place +to do so already. There \emph{is} support for dropping \rust{Box} types, including deallocating +their associated allocations. This is enough to properly execute the dangling pointer example in +\autoref{sec:deterministic}. + +\subsection{Constants} + +Only basic integer, boolean, string, and byte-string literals are currently supported. Evaluating +more complicated constant expressions in their current form would be a somewhat pointless exercise +for Miri. Instead, we should lower constant expressions to MIR so Miri can run them directly, which +is precisely what would need be done to use Miri as the compiler's constant evaluator. + +\subsection{Static variables} + +Miri doesn't currently support statics, but they would need support similar to constants. Also note +that while it would be invalid to write to static (i.e.\ global) variables in Miri executions, it +would probably be fine to allow reads. + +\subsection{Standard library} + +Throughout the implementation of the above features, I often followed this process: + +\begin{enumerate} + \item Try using a feature from the standard library. + \item See where Miri runs into stuff it can't handle. + \item Fix the problem. + \item Go to 1. +\end{enumerate} + +At present, Miri supports a number of major non-trivial features from the standard library along +with tons of minor features. Smart pointer types such as \rust{Box}, \rust{Rc}\footnote{Reference +counted shared pointer} and \rust{Arc}\footnote{Atomically reference-counted thread-safe shared +pointer} all seem to work. I've also tested using the shared smart pointer types with \rust{Cell} +and \rust{RefCell}\footnote{\href{https://doc.rust-lang.org/stable/std/cell/index.html}{Rust +documentation for cell types}} for internal mutability, and that works as well, although +\rust{RefCell} can't ever be borrowed twice until I implement destructor calls, since a destructor +is what releases the borrow. + +But the standard library collection I spent the most time on was \rust{Vec}, the standard +dynamically-growable array type, similar to C++'s \texttt{std::vector} or Java's +\texttt{java.util.ArrayList}. In Rust, \rust{Vec} is an extremely pervasive collection, so +supporting it is a big win for supporting a larger swath of Rust programs in Miri. + +See \autoref{fig:vec} for an example (working in Miri today) of initializing a \rust{Vec} with a +small amount of space on the heap and then pushing enough elements to force it to reallocate its +data array. This involves cross-crate generic function calls, unsafe code using raw pointers, heap +allocation, handling of uninitialized memory, compiler intrinsics, and more. + +\begin{figure}[t] + \begin{minted}[autogobble]{rust} + struct Vec { + data: *mut T, // 4 byte pointer + capacity: usize, // 4 byte integer + length: usize, // 4 byte integer + } + + let mut v: Vec = + Vec::with_capacity(2); + // v: 00 00 00 00 02 00 00 00 00 00 00 00 + // └─(data)──┘ + // data: __ __ + + v.push(1); + // v: 00 00 00 00 02 00 00 00 01 00 00 00 + // └─(data)──┘ + // data: 01 __ + + v.push(2); + // v: 00 00 00 00 02 00 00 00 02 00 00 00 + // └─(data)──┘ + // data: 01 02 + + v.push(3); + // v: 00 00 00 00 04 00 00 00 03 00 00 00 + // └─(data)──┘ + // data: 01 02 03 __ + \end{minted} + \caption{\rust{Vec} example on 32-bit little-endian} + \label{fig:vec} +\end{figure} + +Miri supports unsafe operations on \rust{Vec} like \rust{v.set_len(10)} or +\rust{v.get_unchecked(2)}, provided that such calls do no invoke undefined behaviour. If a call +\emph{does} invoke undefined behaviour, Miri will abort with an appropriate error message (see +\autoref{fig:vec-error}). + +\begin{figure}[t] + \begin{minted}[autogobble]{rust} + fn out_of_bounds() -> u8 { + let v = vec![1, 2]; + let p = unsafe { v.get_unchecked(5) }; + *p + 10 + // ~~ error: pointer offset outside + // bounds of allocation + } + + fn undefined_bytes() -> u8 { + let v = Vec::::with_capacity(10); + let p = unsafe { v.get_unchecked(5) }; + *p + 10 + // ~~~~~~~ error: attempted to read + // undefined bytes + } + \end{minted} + \caption{\rust{Vec} examples with undefined behaviour} + \label{fig:vec-error} +\end{figure} + +\newpage + +Here is one final code sample Miri can execute that demonstrates many features at once, including +vectors, heap allocation, iterators, closures, raw pointers, and math: + +\begin{minted}[autogobble]{rust} + let x: u8 = vec![1, 2, 3, 4] + .into_iter() + .map(|x| x * x) + .fold(0, |x, y| x + y); + // x: 1e (that is, the hex value + // 0x1e = 30 = 1 + 4 + 9 + 16) +\end{minted} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Future directions} + +\subsection{Finishing the implementation} + +There are a number of pressing items on my to-do list for Miri, including: + +\begin{itemize} + \item A much more comprehensive and automated test suite. + \item User-defined destructor calls. + \item Non-trivial casts between primitive types like integers and pointers. + \item Handling statics and global memory. + \item Reporting errors for all undefined behaviour.\footnote{\href{https://doc.rust-lang.org/reference.html\#behavior-considered-undefined}{The Rust reference on what is considered undefined behaviour}} + \item Function pointers. + \item Accounting for target machine primitive type alignment and endianness. + \item Optimizations (undefined byte masks, tail-calls). + \item Benchmarking Miri vs. unoptimized Rust. + \item Various \texttt{TODO}s and \texttt{FIXME}s left in the code. + \item Integrating into the compiler proper. +\end{itemize} + +\subsection{Future projects} + +Other possible Miri-related projects include: + +\begin{itemize} + \item A read-eval-print-loop (REPL) for Rust, which may be easier to implement on top of Miri than + the usual LLVM back-end. + \item A graphical or text-mode debugger that steps through MIR execution one statement at a time, + for figuring out why some compile-time execution is raising an error or simply learning how Rust + works at a low level. + \item A less restricted version of Miri that is able to run foreign functions from C/C++ and + generally has full access to the operating system. Such an interpreter could be used to more + quickly prototype changes to the Rust language that would otherwise require changes to the LLVM + back-end. + \item Unit-testing the compiler by comparing the results of Miri's execution against the results + of LLVM-compiled machine code's execution. This would help to guarantee that compile-time + execution works the same as runtime execution. + \item Some kind of Miri-based symbolic evaluator that examines multiple possible code paths at + once to determine if undefined behaviour could be observed on any of them. +\end{itemize} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Final thoughts} + +Writing an interpreter which models values of varying sizes, stack and heap allocation, unsafe +memory operations, and more requires some unconventional techniques compared to conventional +interpreters targeting dynamically-typed languages. However, aside from the somewhat complicated +abstract memory model, making Miri work was primarily a software engineering problem, and not a +particularly tricky one. This is a testament to MIR's suitability as an intermediate representation +for Rust---removing enough unnecessary abstraction to keep it simple. For example, Miri doesn't even +need to know that there are different kinds of loops, or how to match patterns in a \rust{match} +expression. + +Another advantage to targeting MIR is that any new features at the syntax-level or type-level +generally require little to no change in Miri. For example, when the new ``question mark'' syntax +for error handling\footnote{ + \href{https://github.com/rust-lang/rfcs/blob/master/text/0243-trait-based-exception-handling.md} + {Question mark syntax RFC}} +was added to rustc, Miri required no change to support it. +When specialization\footnote{ + \href{https://github.com/rust-lang/rfcs/blob/master/text/1210-impl-specialization.md} + {Specialization RFC}} +was added, Miri supported it with just minor changes to trait method lookup. + +Of course, Miri also has limitations. The inability to execute FFI and inline assembly reduces the +amount of Rust programs Miri could ever execute. The good news is that in the constant evaluator, +FFI can be stubbed out in cases where it makes sense, like I did with \rust{__rust_allocate}. For a +version of Miri not intended for constant evaluation, it may be possible to use libffi to call C +functions from the interpreter. + +In conclusion, Miri is a surprisingly effective project, and a lot of fun to implement. Due to MIR's +tendency to collapse multiple source-level features into one, I often ended up supporting features I +hadn't explicitly intended to. I am excited to work with the compiler team going forward to try to +make Miri useful for constant evaluation in Rust. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Thanks} + +A big thanks goes to Eduard Burtescu for writing the abstract machine specification and answering my +incessant questions on IRC, to Niko Matsakis for coming up with the idea for Miri and supporting my +desire to work with the Rust compiler, and to my research supervisor Christopher Dutchyn. Thanks +also to everyone else on the compiler team and on Mozilla IRC who helped me figure stuff out. +Finally, thanks to Daniel Keep and everyone else who helped fix my numerous writing mistakes. + +\end{document} diff --git a/src/tools/miri/xargo/Cargo.lock b/src/tools/miri/xargo/Cargo.lock new file mode 100644 index 0000000000..031ad9a879 --- /dev/null +++ b/src/tools/miri/xargo/Cargo.lock @@ -0,0 +1,4 @@ +[root] +name = "miri-xargo" +version = "0.0.0" + diff --git a/src/tools/miri/xargo/Cargo.toml b/src/tools/miri/xargo/Cargo.toml new file mode 100644 index 0000000000..9129c105b1 --- /dev/null +++ b/src/tools/miri/xargo/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "miri-xargo" +description = "A dummy project for building libstd with xargo." +version = "0.0.0" + +[dependencies] diff --git a/src/tools/miri/xargo/Xargo.toml b/src/tools/miri/xargo/Xargo.toml new file mode 100644 index 0000000000..4b650b97de --- /dev/null +++ b/src/tools/miri/xargo/Xargo.toml @@ -0,0 +1,2 @@ +[dependencies] +std = {features = ["panic_unwind", "jemalloc", "backtrace"]} diff --git a/src/tools/miri/xargo/build.sh b/src/tools/miri/xargo/build.sh new file mode 100755 index 0000000000..15a7c77091 --- /dev/null +++ b/src/tools/miri/xargo/build.sh @@ -0,0 +1,3 @@ +#!/bin/sh +cd "$(dirname "$0")" +RUSTFLAGS='-Zalways-encode-mir -Zmir-emit-validate=1' xargo build diff --git a/src/vendor/unicode-segmentation/.cargo-ok b/src/tools/miri/xargo/src/lib.rs similarity index 100% rename from src/vendor/unicode-segmentation/.cargo-ok rename to src/tools/miri/xargo/src/lib.rs diff --git a/src/tools/rust-installer/Cargo.toml b/src/tools/rust-installer/Cargo.toml index 93f50eb585..d054e3d623 100644 --- a/src/tools/rust-installer/Cargo.toml +++ b/src/tools/rust-installer/Cargo.toml @@ -5,11 +5,11 @@ version = "0.0.0" [[bin]] doc = false -name = "rust-installer" +name = "fabricate" path = "src/main.rs" [dependencies] -error-chain = "0.10.0" +error-chain = "0.11.0" flate2 = "0.2.19" tar = "0.4.13" walkdir = "1.0.7" diff --git a/src/tools/rust-installer/combine-installers.sh b/src/tools/rust-installer/combine-installers.sh index e56dc8dbe8..4931c34ddc 100755 --- a/src/tools/rust-installer/combine-installers.sh +++ b/src/tools/rust-installer/combine-installers.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Copyright 2014 The Rust Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution and at # http://rust-lang.org/COPYRIGHT. diff --git a/src/tools/rust-installer/gen-install-script.sh b/src/tools/rust-installer/gen-install-script.sh index 1420814a41..b4559d147a 100755 --- a/src/tools/rust-installer/gen-install-script.sh +++ b/src/tools/rust-installer/gen-install-script.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Copyright 2014 The Rust Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution and at # http://rust-lang.org/COPYRIGHT. diff --git a/src/tools/rust-installer/gen-installer.sh b/src/tools/rust-installer/gen-installer.sh index 60fac3b949..198cfe7425 100755 --- a/src/tools/rust-installer/gen-installer.sh +++ b/src/tools/rust-installer/gen-installer.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Copyright 2014 The Rust Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution and at # http://rust-lang.org/COPYRIGHT. diff --git a/src/tools/rust-installer/install-template.sh b/src/tools/rust-installer/install-template.sh index 44a85f5631..e584aa0b2e 100644 --- a/src/tools/rust-installer/install-template.sh +++ b/src/tools/rust-installer/install-template.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Copyright 2014 The Rust Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution and at # http://rust-lang.org/COPYRIGHT. @@ -319,62 +319,6 @@ abs_path() { (unset CDPATH && cd "$path" > /dev/null && pwd) } -get_host_triple() { - local _uname_value=$(uname -s) - local _ostype - case $_uname_value in - - Linux) - _ostype=unknown-linux-gnu - ;; - - FreeBSD) - _ostype=unknown-freebsd - ;; - - DragonFly) - _ostype=unknown-dragonfly - ;; - - Bitrig) - _ostype=unknown-bitrig - ;; - - NetBSD) - _ostype=unknown-netbsd - ;; - - OpenBSD) - _ostype=unknown-openbsd - ;; - - Darwin) - _ostype=apple-darwin - ;; - - MINGW*) - _ostype=pc-windows-gnu - ;; - - MSYS*) - _ostype=pc-windows-gnu - ;; - - CYGWIN*) - _ostype=pc-windows-gnu - ;; - Haiku) - _ostype=unknown-haiku - ;; - - *) - err "unknown value from uname -s: $_uname_value" - ;; - esac - - RETVAL="$_ostype" -} - uninstall_legacy() { local _abs_libdir="$1" @@ -675,11 +619,13 @@ install_components() { maybe_backup_path "$_file_install_path" - if echo "$_file" | grep "^bin/" > /dev/null + if echo "$_file" | grep "^bin/" > /dev/null || test -x "$_src_dir/$_component/$_file" then - run install -m755 "$_src_dir/$_component/$_file" "$_file_install_path" + run cp "$_src_dir/$_component/$_file" "$_file_install_path" + run chmod 755 "$_file_install_path" else - run install -m644 "$_src_dir/$_component/$_file" "$_file_install_path" + run cp "$_src_dir/$_component/$_file" "$_file_install_path" + run chmod 644 "$_file_install_path" fi critical_need_ok "file creation failed" @@ -719,11 +665,10 @@ install_components() { maybe_configure_ld() { local _abs_libdir="$1" - get_host_triple - local _ostype="$RETVAL" + local _ostype="$(uname -s)" assert_nz "$_ostype" "ostype" - if [ "$_ostype" = "unknown-linux-gnu" -a ! -n "${CFG_DISABLE_LDCONFIG-}" ]; then + if [ "$_ostype" = "Linux" -a ! -n "${CFG_DISABLE_LDCONFIG-}" ]; then # Fedora-based systems do not configure the dynamic linker to look # /usr/local/lib, which is our default installation directory. To @@ -756,11 +701,10 @@ maybe_configure_ld() { } maybe_unconfigure_ld() { - get_host_triple - local _ostype="$RETVAL" + local _ostype="$(uname -s)" assert_nz "$_ostype" "ostype" - if [ "$_ostype" != "unknown-linux-gnu" ]; then + if [ "$_ostype" != "Linux" ]; then return 0 fi diff --git a/src/tools/rust-installer/src/tarballer.rs b/src/tools/rust-installer/src/tarballer.rs index ec0cc974e0..f1eeb901c4 100644 --- a/src/tools/rust-installer/src/tarballer.rs +++ b/src/tools/rust-installer/src/tarballer.rs @@ -9,8 +9,10 @@ // except according to those terms. use std::fs::File; -use std::io::{self, Write}; +use std::io::Write; use std::path::Path; +use std::sync::Arc; +use std::thread; use flate2; use flate2::write::GzEncoder; @@ -55,15 +57,9 @@ impl Tarballer { .chain_err(|| "failed to collect file paths")?; files.sort_by(|a, b| a.bytes().rev().cmp(b.bytes().rev())); - // Prepare the .tar.gz file - let gz = GzEncoder::new(create_new_file(tar_gz)?, flate2::Compression::Best); - - // Prepare the .tar.xz file - let xz = XzEncoder::new(create_new_file(tar_xz)?, 9); - // Write the tar into both encoded files. We write all directories // first, so files may be directly created. (see rustup.rs#1092) - let mut builder = Builder::new(Tee(gz, xz)); + let mut builder = Builder::new(Vec::new()); for path in dirs { let src = Path::new(&self.work_dir).join(&path); builder.append_dir(&path, &src) @@ -75,12 +71,25 @@ impl Tarballer { builder.append_data(&mut header(&src, &file)?, &path, &file) .chain_err(|| format!("failed to tar file '{}'", src.display()))?; } - let Tee(gz, xz) = builder.into_inner() + let contents = builder.into_inner() .chain_err(|| "failed to finish writing .tar stream")?; + let contents = Arc::new(contents); + + // Prepare the .tar.gz file + let contents2 = contents.clone(); + let t = thread::spawn(move || { + let mut gz = GzEncoder::new(create_new_file(tar_gz)?, + flate2::Compression::Best); + gz.write_all(&contents2).chain_err(|| "failed to write .gz")?; + gz.finish().chain_err(|| "failed to finish .gz") + }); + + // Prepare the .tar.xz file + let mut xz = XzEncoder::new(create_new_file(tar_xz)?, 9); + xz.write_all(&contents).chain_err(|| "failed to write .xz")?; + xz.finish().chain_err(|| "failed to finish .xz")?; - // Finish both encoded files - gz.finish().chain_err(|| "failed to finish .tar.gz file")?; - xz.finish().chain_err(|| "failed to finish .tar.xz file")?; + t.join().unwrap()?; Ok(()) } @@ -129,17 +138,3 @@ fn get_recursive_paths(root: P, name: Q) -> Result<(Vec, Vec(A, B); - -impl Write for Tee { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.write_all(buf) - .and(self.1.write_all(buf)) - .and(Ok(buf.len())) - } - - fn flush(&mut self) -> io::Result<()> { - self.0.flush().and(self.1.flush()) - } -} diff --git a/src/tools/rust-installer/test.sh b/src/tools/rust-installer/test.sh index c85e4d3ae9..bb066866ae 100755 --- a/src/tools/rust-installer/test.sh +++ b/src/tools/rust-installer/test.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -e -u diff --git a/src/tools/rustbook/Cargo.toml b/src/tools/rustbook/Cargo.toml index e759e0bc13..4aa096246b 100644 --- a/src/tools/rustbook/Cargo.toml +++ b/src/tools/rustbook/Cargo.toml @@ -8,5 +8,5 @@ license = "MIT/Apache-2.0" clap = "2.25.0" [dependencies.mdbook] -version = "0.0.22" +version = "0.0.26" default-features = false diff --git a/src/tools/rustbook/src/main.rs b/src/tools/rustbook/src/main.rs index 33326de9c1..a0c3e811a7 100644 --- a/src/tools/rustbook/src/main.rs +++ b/src/tools/rustbook/src/main.rs @@ -13,13 +13,13 @@ extern crate mdbook; extern crate clap; use std::env; -use std::error::Error; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use clap::{App, ArgMatches, SubCommand, AppSettings}; use mdbook::MDBook; +use mdbook::errors::Result; fn main() { let d_message = "-d, --dest-dir=[dest-dir] @@ -41,7 +41,6 @@ fn main() { // Check which subcomamnd the user ran... let res = match matches.subcommand() { ("build", Some(sub_matches)) => build(sub_matches), - ("test", Some(sub_matches)) => test(sub_matches), (_, _) => unreachable!(), }; @@ -50,42 +49,21 @@ fn main() { ::std::process::exit(101); } } - // Build command implementation -fn build(args: &ArgMatches) -> Result<(), Box> { - let book = build_mdbook_struct(args); +pub fn build(args: &ArgMatches) -> Result<()> { + let book_dir = get_book_dir(args); + let book = MDBook::new(&book_dir).read_config()?; let mut book = match args.value_of("dest-dir") { - Some(dest_dir) => book.set_dest(Path::new(dest_dir)), - None => book + Some(dest_dir) => book.with_destination(dest_dir), + None => book, }; - try!(book.build()); - - Ok(()) -} - -fn test(args: &ArgMatches) -> Result<(), Box> { - let mut book = build_mdbook_struct(args); - - try!(book.test()); + book.build()?; Ok(()) } -fn build_mdbook_struct(args: &ArgMatches) -> mdbook::MDBook { - let book_dir = get_book_dir(args); - let mut book = MDBook::new(&book_dir).read_config(); - - // By default mdbook will attempt to create non-existent files referenced - // from SUMMARY.md files. This is problematic on CI where we mount the - // source directory as readonly. To avoid any issues, we'll disabled - // mdbook's implicit file creation feature. - book.create_missing = false; - - book -} - fn get_book_dir(args: &ArgMatches) -> PathBuf { if let Some(dir) = args.value_of("dir") { // Check if path is relative from current dir, or absolute... diff --git a/src/tools/tidy/src/deps.rs b/src/tools/tidy/src/deps.rs index f572ad9cd0..38df657769 100644 --- a/src/tools/tidy/src/deps.rs +++ b/src/tools/tidy/src/deps.rs @@ -33,6 +33,15 @@ static EXCEPTIONS: &'static [&'static str] = &[ "openssl", // BSD+advertising clause, cargo, mdbook "pest", // MPL2, mdbook via handlebars "thread-id", // Apache-2.0, mdbook + "cssparser", // MPL-2.0, rustdoc + "smallvec", // MPL-2.0, rustdoc + // FIXME: remove magenta references when "everything" has moved over to using the zircon name. + "magenta-sys", // BSD-3-Clause, rustdoc + "magenta", // BSD-3-Clause, rustdoc + "zircon-sys", // BSD-3-Clause, rustdoc + "zircon", // BSD-3-Clause, rustdoc + "cssparser-macros", // MPL-2.0, rustdoc + "selectors", // MPL-2.0, rustdoc ]; pub fn check(path: &Path, bad: &mut bool) { diff --git a/src/tools/tidy/src/lib.rs b/src/tools/tidy/src/lib.rs index 020570e61d..90bf7a5e0a 100644 --- a/src/tools/tidy/src/lib.rs +++ b/src/tools/tidy/src/lib.rs @@ -62,7 +62,10 @@ fn filter_dirs(path: &Path) -> bool { "src/rt/hoedown", "src/tools/cargo", "src/tools/rls", + "src/tools/clippy", "src/tools/rust-installer", + "src/tools/rustfmt", + "src/tools/miri", ]; skip.iter().any(|p| path.ends_with(p)) } diff --git a/src/tools/tidy/src/pal.rs b/src/tools/tidy/src/pal.rs index 10c9971382..fdbcfd10bd 100644 --- a/src/tools/tidy/src/pal.rs +++ b/src/tools/tidy/src/pal.rs @@ -65,11 +65,10 @@ const EXCEPTION_PATHS: &'static [&'static str] = &[ "src/rtstartup", // Not sure what to do about this. magic stuff for mingw // temporary exceptions - "src/libstd/rtdeps.rs", // Until rustbuild replaces make + "src/libstd/lib.rs", // FIXME(#44217) "src/libstd/path.rs", "src/libstd/f32.rs", "src/libstd/f64.rs", - "src/libstd/lib.rs", // Until next stage0 snapshot bump "src/libstd/sys_common/mod.rs", "src/libstd/sys_common/net.rs", "src/libterm", // Not sure how to make this crate portable, but test needs it @@ -77,6 +76,7 @@ const EXCEPTION_PATHS: &'static [&'static str] = &[ // std testing crates, ok for now at least "src/libcore/tests", + "src/liballoc/tests/lib.rs", // non-std crates "src/test", diff --git a/src/tools/toolstate.toml b/src/tools/toolstate.toml new file mode 100644 index 0000000000..1700daa0af --- /dev/null +++ b/src/tools/toolstate.toml @@ -0,0 +1,36 @@ +# This file reflects the current status of all tools which are allowed +# to fail without failing the build. +# +# There are three states a tool can be in: +# 1. Broken: The tool doesn't build +# 2. Building: The tool builds but its tests are failing +# 3. Testing: The tool builds and its tests are passing +# +# In the future there will be further states like "Distributing", which +# configures whether the tool is included in the Rust distribution. +# +# If a tool was working before your PR but is broken now, consider +# opening a PR against the tool so that it works with your changes. +# If the tool stops compiling, change its state to `Broken`. If it +# still builds, change it to `Compiling`. +# How to do that is described in +# "CONTRIBUTING.md#External Dependencies". If the effort required is not +# warranted (e.g. due to the tool abusing some API that you changed, and +# fixing the tool would mean a significant refactoring) remember to ping +# the tool authors, so they can fix it, instead of being surprised by the +# breakage. +# +# Each tool has a list of people to ping + +# ping @oli-obk @RalfJung @eddyb +miri = "Broken" + +# ping @Manishearth @llogiq @mcarton @oli-obk +clippy = "Broken" + +# ping @nrc +rls = "Testing" + +# ping @nrc +rustfmt = "Testing" + diff --git a/src/vendor/backtrace-sys/.cargo-checksum.json b/src/vendor/backtrace-sys/.cargo-checksum.json index 1ce10630a0..67d5a77e33 100644 --- a/src/vendor/backtrace-sys/.cargo-checksum.json +++ b/src/vendor/backtrace-sys/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"744f124e8a67a644dfee3665baad89b5f98ac93d79034aca685e0ac79802a5d6","Cargo.toml.orig":"81529e8587962787e1201bc677e501dc81064b37b8766d5c76a153230c1a38f5","build.rs":"dd3a9eeb5f071ebf073657b691593aa9432a5280d3c7719bd8357e20e6ccdf7f","src/lib.rs":"cb45ba047240bceac6ea74da50c2f48ae81a965b578c833a766a3ea0db1075f3","src/libbacktrace/ChangeLog":"ad6f9058524b950aed734db83419e0624df55a48af8091c8bcad7a125aeeffdd","src/libbacktrace/ChangeLog.jit":"ee143b5c9dc571d9aca006be246a83b1f019c39a3fd7b0025eb37463e30200ce","src/libbacktrace/Makefile.am":"74d0036c9715cb7fa6c749cead794ba96283f47d243cf2b25bc6fac8f04667d5","src/libbacktrace/Makefile.in":"9ca92df95c8d62b8a0e106357a796ef4d2c5ec7fd02ab133fd0a65de1984e096","src/libbacktrace/README":"3d1a15714583197945df8db054da6f30613316ec311c5ec2d68b92fab12364a2","src/libbacktrace/aclocal.m4":"e3a6dfa4ce929624ac289b260987a7fa9b512cfbf3ff6f39c5d1e38cbad1a0ea","src/libbacktrace/alloc.c":"829ceab369210fc60758f339807fe0bf55832c77adb3a4450fae586a90a9673d","src/libbacktrace/ansidecl.h":"14acfcd6c044bc73de85b120cbc3e626771a01712911ee1f6137585fd710fb1e","src/libbacktrace/atomic.c":"3e1882a520e0e1115107e194fbd97daa8d3feef15a70047969f4976e7542c263","src/libbacktrace/backtrace-supported.h.in":"42277f3c383386b6cfa3d3d889336e92303fac0ae1a9fb8a6a56737245dfb8f3","src/libbacktrace/backtrace.c":"76bdabddc4eb8a46a7eec98ce610c86909c89c629eb850a0cde98f5a78c9b99d","src/libbacktrace/backtrace.h":"cd2db6474556b655e42c0a973b871c1b6cb4796809490bf59cc8d3d0e8c083f5","src/libbacktrace/btest.c":"fff55e4acf6ad02a6f719046fc4e3f8bf795166ed9956122a79e17c6be266738","src/libbacktrace/config.guess":"8131853dbc8c3be5171fa96353de7a884a79d3f1d6b3fbf48f99037f5f95fe27","src/libbacktrace/config.h.in":"9b0d30416c93906e5c0ce36c95a7426bfd41228114f381949e89d263305b5b65","src/libbacktrace/config.sub":"3b739084e4b409aacf8531f87b57efa602eccdd17b5cddbc4ae1313a2c866f34","src/libbacktrace/configure":"2b45c89d06809c6ed49d70bfdf51a93e44370fc81f689169a073f47b14b366e5","src/libbacktrace/configure.ac":"360b1a033e33d2c58d2f36aaa6644a2aac0dd8960417df6d64202cc099a1c3f3","src/libbacktrace/dwarf.c":"b969c3abb07a09aa4ac5094dba5777d2c23fee77ade4ec0934142d6b81049785","src/libbacktrace/dwarf2.def":"d947302d8b259f0abdd9dd2f50a0713f4e7395c05ce88ddfdb37a7145b045d82","src/libbacktrace/dwarf2.h":"b3b13fa412a1573e3ee8dcb1d603ec33de7140f00b3bbbaa5292724f0087ace3","src/libbacktrace/elf.c":"728d7e47bf077de4dad6eecc76c134b4b4898dc5866572587692848f29aaec5c","src/libbacktrace/fileline.c":"bae25b6662bfd01a43bccc645206e00aeea27f18a51f6303f00f4fa1d9f2e861","src/libbacktrace/filenames.h":"bce400200dbc7452ca2a7f27dd36516949c4c94a135a6a322f011f3ce7817377","src/libbacktrace/filetype.awk":"9dd5352f288c3737fccd24a2c7583f4d0dbca2b95b2735a90a26b644478018af","src/libbacktrace/hashtab.h":"4cb7f68db5c2e8ff1190ecf859e7feb58447ea20bd9818bb1f447b46e2813c58","src/libbacktrace/install-sh":"d7c113e5484fce8b48f9a34a7a91e385ec279247d415b0b7d749bd56ad8ee8a2","src/libbacktrace/internal.h":"ed657e14aa33b4b5217919435e69019c5a677a00a9c134bf0297f000f146b093","src/libbacktrace/ltmain.sh":"ee4019f80b993cdb1bd13bf39434ad4f1ef15ae57a97594c53fb7b1d76d776a4","src/libbacktrace/missing":"b1a337b731bbb58846d31ca2f5143c293a455fa41b481e236e89a9016d96b034","src/libbacktrace/mmap.c":"fec72286a2d8a0c53fd56ea61b3766b87031f6f803f6de6c3c656e39bc85cdc2","src/libbacktrace/mmapio.c":"32a672fd2b8b3395c8a16ef11d839437ca35570fbc235bb0e2e70ef8097a12fc","src/libbacktrace/nounwind.c":"7694636af38ef488c814ab9b71854d6a259c1f3f635bd4c3ed99a88ca2beb5f1","src/libbacktrace/pecoff.c":"00cf6976cfc1e018f5e5473d4fbef4ae86de825dd2749897c45dfcebcefc6abf","src/libbacktrace/posix.c":"73754fcfa0a5b0342e8fe32166e7228b61e022699a8a3c9a2ef8e46260736704","src/libbacktrace/print.c":"ec1e80e17e27130bf9412470e8dc9155a0dc9ca3a78125081f5073b438b54157","src/libbacktrace/read.c":"399458fb06389aa4bd5ce376c4620cf733b5555d1dabe7ef19f185da1e37e614","src/libbacktrace/simple.c":"686d976c2bdb1a074f9be05af026d4b4c7a4978c2602268747d492563f18e0dc","src/libbacktrace/sort.c":"1a1e44af4e4d02fc568b318aa1517792edd6972a27029ba78e938856ef8ba1a0","src/libbacktrace/state.c":"49d547f5622b0ea0c39bc873b09a7dce276fdc6469118f5ab2e72c55bd0457fb","src/libbacktrace/stest.c":"aa1b90fe2d6dc8ac65ac0168776c2d242efc6460ac596deb14ac1e72bf764871","src/libbacktrace/unknown.c":"914aa113cd17ed678d40927391b35f6027e87b3a02e36e9f1822ec1efe9c0e20","symbol-map":"b941b2516efbb9e46521eeb166c60b31d52b2a4aaaf35d7f9af24946bbe1328a"},"package":"afccc5772ba333abccdf60d55200fa3406f8c59dcf54d5f7998c9107d3799c7c"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"95f05ff22dfc6584b02c1ea002712ae419c1cac5b8e145883478c2f05b5df672","Cargo.toml.orig":"2169d4eedf762f95111c63e9c54858687cceb80362b0d8a12783a76405191c6f","build.rs":"e1b627f7d46df4ade4aa3950f120b973f419970f984bf8d9f647e1669b2a30d7","src/lib.rs":"cb45ba047240bceac6ea74da50c2f48ae81a965b578c833a766a3ea0db1075f3","src/libbacktrace/ChangeLog":"ad6f9058524b950aed734db83419e0624df55a48af8091c8bcad7a125aeeffdd","src/libbacktrace/ChangeLog.jit":"ee143b5c9dc571d9aca006be246a83b1f019c39a3fd7b0025eb37463e30200ce","src/libbacktrace/Makefile.am":"74d0036c9715cb7fa6c749cead794ba96283f47d243cf2b25bc6fac8f04667d5","src/libbacktrace/Makefile.in":"9ca92df95c8d62b8a0e106357a796ef4d2c5ec7fd02ab133fd0a65de1984e096","src/libbacktrace/README":"3d1a15714583197945df8db054da6f30613316ec311c5ec2d68b92fab12364a2","src/libbacktrace/aclocal.m4":"e3a6dfa4ce929624ac289b260987a7fa9b512cfbf3ff6f39c5d1e38cbad1a0ea","src/libbacktrace/alloc.c":"829ceab369210fc60758f339807fe0bf55832c77adb3a4450fae586a90a9673d","src/libbacktrace/ansidecl.h":"14acfcd6c044bc73de85b120cbc3e626771a01712911ee1f6137585fd710fb1e","src/libbacktrace/atomic.c":"3e1882a520e0e1115107e194fbd97daa8d3feef15a70047969f4976e7542c263","src/libbacktrace/backtrace-supported.h.in":"42277f3c383386b6cfa3d3d889336e92303fac0ae1a9fb8a6a56737245dfb8f3","src/libbacktrace/backtrace.c":"76bdabddc4eb8a46a7eec98ce610c86909c89c629eb850a0cde98f5a78c9b99d","src/libbacktrace/backtrace.h":"cd2db6474556b655e42c0a973b871c1b6cb4796809490bf59cc8d3d0e8c083f5","src/libbacktrace/btest.c":"fff55e4acf6ad02a6f719046fc4e3f8bf795166ed9956122a79e17c6be266738","src/libbacktrace/config.guess":"8131853dbc8c3be5171fa96353de7a884a79d3f1d6b3fbf48f99037f5f95fe27","src/libbacktrace/config.h.in":"9b0d30416c93906e5c0ce36c95a7426bfd41228114f381949e89d263305b5b65","src/libbacktrace/config.sub":"3b739084e4b409aacf8531f87b57efa602eccdd17b5cddbc4ae1313a2c866f34","src/libbacktrace/configure":"2b45c89d06809c6ed49d70bfdf51a93e44370fc81f689169a073f47b14b366e5","src/libbacktrace/configure.ac":"360b1a033e33d2c58d2f36aaa6644a2aac0dd8960417df6d64202cc099a1c3f3","src/libbacktrace/dwarf.c":"b969c3abb07a09aa4ac5094dba5777d2c23fee77ade4ec0934142d6b81049785","src/libbacktrace/dwarf2.def":"d947302d8b259f0abdd9dd2f50a0713f4e7395c05ce88ddfdb37a7145b045d82","src/libbacktrace/dwarf2.h":"b3b13fa412a1573e3ee8dcb1d603ec33de7140f00b3bbbaa5292724f0087ace3","src/libbacktrace/elf.c":"728d7e47bf077de4dad6eecc76c134b4b4898dc5866572587692848f29aaec5c","src/libbacktrace/fileline.c":"bae25b6662bfd01a43bccc645206e00aeea27f18a51f6303f00f4fa1d9f2e861","src/libbacktrace/filenames.h":"bce400200dbc7452ca2a7f27dd36516949c4c94a135a6a322f011f3ce7817377","src/libbacktrace/filetype.awk":"9dd5352f288c3737fccd24a2c7583f4d0dbca2b95b2735a90a26b644478018af","src/libbacktrace/hashtab.h":"4cb7f68db5c2e8ff1190ecf859e7feb58447ea20bd9818bb1f447b46e2813c58","src/libbacktrace/install-sh":"d7c113e5484fce8b48f9a34a7a91e385ec279247d415b0b7d749bd56ad8ee8a2","src/libbacktrace/internal.h":"ed657e14aa33b4b5217919435e69019c5a677a00a9c134bf0297f000f146b093","src/libbacktrace/ltmain.sh":"ee4019f80b993cdb1bd13bf39434ad4f1ef15ae57a97594c53fb7b1d76d776a4","src/libbacktrace/missing":"b1a337b731bbb58846d31ca2f5143c293a455fa41b481e236e89a9016d96b034","src/libbacktrace/mmap.c":"fec72286a2d8a0c53fd56ea61b3766b87031f6f803f6de6c3c656e39bc85cdc2","src/libbacktrace/mmapio.c":"32a672fd2b8b3395c8a16ef11d839437ca35570fbc235bb0e2e70ef8097a12fc","src/libbacktrace/nounwind.c":"7694636af38ef488c814ab9b71854d6a259c1f3f635bd4c3ed99a88ca2beb5f1","src/libbacktrace/pecoff.c":"00cf6976cfc1e018f5e5473d4fbef4ae86de825dd2749897c45dfcebcefc6abf","src/libbacktrace/posix.c":"73754fcfa0a5b0342e8fe32166e7228b61e022699a8a3c9a2ef8e46260736704","src/libbacktrace/print.c":"ec1e80e17e27130bf9412470e8dc9155a0dc9ca3a78125081f5073b438b54157","src/libbacktrace/read.c":"399458fb06389aa4bd5ce376c4620cf733b5555d1dabe7ef19f185da1e37e614","src/libbacktrace/simple.c":"686d976c2bdb1a074f9be05af026d4b4c7a4978c2602268747d492563f18e0dc","src/libbacktrace/sort.c":"1a1e44af4e4d02fc568b318aa1517792edd6972a27029ba78e938856ef8ba1a0","src/libbacktrace/state.c":"49d547f5622b0ea0c39bc873b09a7dce276fdc6469118f5ab2e72c55bd0457fb","src/libbacktrace/stest.c":"aa1b90fe2d6dc8ac65ac0168776c2d242efc6460ac596deb14ac1e72bf764871","src/libbacktrace/unknown.c":"914aa113cd17ed678d40927391b35f6027e87b3a02e36e9f1822ec1efe9c0e20","symbol-map":"b941b2516efbb9e46521eeb166c60b31d52b2a4aaaf35d7f9af24946bbe1328a"},"package":"c63ea141ef8fdb10409d0f5daf30ac51f84ef43bff66f16627773d2a292cd189"} \ No newline at end of file diff --git a/src/vendor/backtrace-sys/Cargo.toml b/src/vendor/backtrace-sys/Cargo.toml index 95bfc39bd8..a125257664 100644 --- a/src/vendor/backtrace-sys/Cargo.toml +++ b/src/vendor/backtrace-sys/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "backtrace-sys" -version = "0.1.12" +version = "0.1.14" authors = ["Alex Crichton "] build = "build.rs" description = "Bindings to the libbacktrace gcc library\n" @@ -22,5 +22,5 @@ license = "MIT/Apache-2.0" repository = "https://github.com/alexcrichton/backtrace-rs" [dependencies.libc] version = "0.2" -[build-dependencies.gcc] -version = "0.3" +[build-dependencies.cc] +version = "1.0" diff --git a/src/vendor/backtrace-sys/Cargo.toml.orig b/src/vendor/backtrace-sys/Cargo.toml.orig index dbad16f9d7..edd5aa6af6 100644 --- a/src/vendor/backtrace-sys/Cargo.toml.orig +++ b/src/vendor/backtrace-sys/Cargo.toml.orig @@ -1,6 +1,6 @@ [package] name = "backtrace-sys" -version = "0.1.12" +version = "0.1.14" authors = ["Alex Crichton "] build = "build.rs" license = "MIT/Apache-2.0" @@ -15,4 +15,4 @@ Bindings to the libbacktrace gcc library libc = "0.2" [build-dependencies] -gcc = "0.3" +cc = "1.0" diff --git a/src/vendor/backtrace-sys/build.rs b/src/vendor/backtrace-sys/build.rs index 95f89e1b96..28b10970af 100644 --- a/src/vendor/backtrace-sys/build.rs +++ b/src/vendor/backtrace-sys/build.rs @@ -1,4 +1,4 @@ -extern crate gcc; +extern crate cc; use std::env; use std::ffi::OsString; @@ -14,7 +14,7 @@ macro_rules! t { }) } -fn try_tool(compiler: &gcc::Tool, cc: &str, compiler_suffix: &str, tool_suffix: &str) +fn try_tool(compiler: &cc::Tool, cc: &str, compiler_suffix: &str, tool_suffix: &str) -> Option { if !cc.ends_with(compiler_suffix) { return None @@ -28,7 +28,7 @@ fn try_tool(compiler: &gcc::Tool, cc: &str, compiler_suffix: &str, tool_suffix: } } -fn find_tool(compiler: &gcc::Tool, cc: &str, tool: &str) -> PathBuf { +fn find_tool(compiler: &cc::Tool, cc: &str, tool: &str) -> PathBuf { // Allow overrides via env var if let Some(s) = env::var_os(tool.to_uppercase()) { return s.into() @@ -87,7 +87,7 @@ fn main() { OsString::from_wide(&chars) }; - let cfg = gcc::Config::new(); + let cfg = cc::Build::new(); let compiler = cfg.get_compiler(); let cc = compiler.path().file_name().unwrap().to_str().unwrap(); let mut flags = OsString::new(); @@ -98,11 +98,13 @@ fn main() { flags.push(flag); } let ar = find_tool(&compiler, cc, "ar"); + let ranlib = find_tool(&compiler, cc, "ranlib"); let mut cmd = Command::new("sh"); cmd.arg(configure) .current_dir(&dst) .env("AR", &ar) + .env("RANLIB", &ranlib) .env("CC", compiler.path()) .env("CFLAGS", flags) .arg("--with-pic") diff --git a/src/vendor/backtrace/.cargo-checksum.json b/src/vendor/backtrace/.cargo-checksum.json index c977f753e7..aa8527834e 100644 --- a/src/vendor/backtrace/.cargo-checksum.json +++ b/src/vendor/backtrace/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"6f11d19317924088c90d605e1d13e4af230b10e289c5df6139c40a62409eabd9","Cargo.toml":"f893b7b701611e48ab80db25670342084b11d8d4ed3f3471b68842c490d247b8","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"11061a4c1e27fde148b5b1fb6df553f8eb464e279be0f5e278d262bf35d7b0f8","appveyor.yml":"29d9f44137560479a75b02a7953cfa0c378c2577ed1925f579b400295c7937e3","examples/backtrace.rs":"fd6e1cc6c3378ec7d41cd03b2bef187051298dceb01147e71f207dbb8a0c4867","examples/raw.rs":"f07be26d1f97cd7ac79290ac99d19c4eec5d27031fe270ab5364c25d9c2ad9e0","src/backtrace/dbghelp.rs":"45c5052763857d4a3727c3082be1b2f6c28f7973afd66706657defda5d97c6ec","src/backtrace/libunwind.rs":"cc9cdc1d389571cdedf43dfc2d39b8c3af85531a3965ed700c724f436afb213e","src/backtrace/mod.rs":"d00f4a574fae44df81b1d40bf44acea84addb70b4c76c85bfaa1f3ab0bcd7f0d","src/backtrace/noop.rs":"dc4a6602e9852b945c382194402314d3d68c8ca90199af9a8159419fb91a3c99","src/backtrace/unix_backtrace.rs":"1bb4a4a2f1e56f8ac04002dd77411116d8b4920f905d1ddfcb289e242f939a86","src/capture.rs":"de3250fcb9ff941391dea24decc8166d058f17abe77d25ff82b766ac384f00ea","src/dylib.rs":"09f3d7f32849cf0daa4de9df48f8e4a4d5ba62e20723c79578201bd271dc4777","src/lib.rs":"7ccd8cc3679435f0e39efd26f23d9d7a01094ab09ba6d411c6587b2420c6886b","src/symbolize/coresymbolication.rs":"99280684791694f560824b39291ee7ad78a2b53f82e5972ff3d9b77b43671f60","src/symbolize/dbghelp.rs":"d743545bb3e64eafc4903e3e7aec115b64da2174e75afd7b465bc0b89573b88a","src/symbolize/dladdr.rs":"8287cbca440a9e92e74d88c5a7b920f6b4cf6d8f50bc8b0f61aca5ba42d5b5ec","src/symbolize/libbacktrace.rs":"0cdad7de2501baef9da193ee6aab21c453d26348a2071c805a133efe1209eaa1","src/symbolize/mod.rs":"7f2efe54ce40f42ba38673614cff2a510632123cb6d1bc3da88566f12bcba588","src/symbolize/noop.rs":"b622fcecb4e22b42c3d3e2ef5dc5a6ab14601fec83c7797ee1fbbacc12fe6ca1","tests/smoke.rs":"fc882d7db0f4842e4415e3319774a33ba27c4d6412a62f8ee194a5dafd8a6128"},"package":"72f9b4182546f4b04ebc4ab7f84948953a118bd6021a1b6a6c909e3e94f6be76"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"7c4c8bd9d72ba4186b6ff777a5168a1eacb5b2245bb675ab8f4abd5859ec906f","Cargo.toml":"e4c5543dd4144dec207c77df49246b1fec9dfcecde2b4ee50d2f016fd11d9a1b","Cargo.toml.orig":"71c38add9510bf39fda384f27f32fde1f527a810f4206b61a793a941e9e2407c","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"11061a4c1e27fde148b5b1fb6df553f8eb464e279be0f5e278d262bf35d7b0f8","appveyor.yml":"29d9f44137560479a75b02a7953cfa0c378c2577ed1925f579b400295c7937e3","examples/backtrace.rs":"fd6e1cc6c3378ec7d41cd03b2bef187051298dceb01147e71f207dbb8a0c4867","examples/raw.rs":"f07be26d1f97cd7ac79290ac99d19c4eec5d27031fe270ab5364c25d9c2ad9e0","src/backtrace/dbghelp.rs":"45c5052763857d4a3727c3082be1b2f6c28f7973afd66706657defda5d97c6ec","src/backtrace/libunwind.rs":"cc9cdc1d389571cdedf43dfc2d39b8c3af85531a3965ed700c724f436afb213e","src/backtrace/mod.rs":"91a544bd9e89da6b580e2580ab15ead354f13243bca50516ff5cefe68a8cd199","src/backtrace/noop.rs":"dc4a6602e9852b945c382194402314d3d68c8ca90199af9a8159419fb91a3c99","src/backtrace/unix_backtrace.rs":"31204989a8852428792a1c99d36717559aad14d93526e8a37744214adf188268","src/capture.rs":"a6f379300f6a578c52fce5927461fb0d084b2eb080113561a2e0cc11aa1f5c73","src/dylib.rs":"09f3d7f32849cf0daa4de9df48f8e4a4d5ba62e20723c79578201bd271dc4777","src/lib.rs":"20006ff65d1e9fd7f8fea52ae5515f99be99ffcc90b988f22d6cf276062cc9a0","src/symbolize/coresymbolication.rs":"99280684791694f560824b39291ee7ad78a2b53f82e5972ff3d9b77b43671f60","src/symbolize/dbghelp.rs":"6557ef254bb146609efd1534c4ac698e0278c4ce0b5f005108c6c8cd834f7250","src/symbolize/dladdr.rs":"8287cbca440a9e92e74d88c5a7b920f6b4cf6d8f50bc8b0f61aca5ba42d5b5ec","src/symbolize/gimli.rs":"44abf9436f53ed38e053d324548509731234577445eb5d2fe17f0adbb5834c03","src/symbolize/libbacktrace.rs":"0cdad7de2501baef9da193ee6aab21c453d26348a2071c805a133efe1209eaa1","src/symbolize/mod.rs":"5cce6abcdebe8f5766b0697fc47fdfa119afd231b99048d0b9c67af3a295b4ae","src/symbolize/noop.rs":"b622fcecb4e22b42c3d3e2ef5dc5a6ab14601fec83c7797ee1fbbacc12fe6ca1","tests/long_fn_name.rs":"1291e92a2dceab9ad3d424a26dd32bd8a3b77e86c2409ab8930c56918c3d961f","tests/smoke.rs":"85b5de78290cb1b247494fa2f118096c1fda2f249db47f3d94dd52446c5f0d7d"},"package":"99f2ce94e22b8e664d95c57fff45b98a966c2252b60691d0b7aeeccd88d70983"} \ No newline at end of file diff --git a/src/vendor/backtrace/.travis.yml b/src/vendor/backtrace/.travis.yml index c56985a739..a5a769d249 100644 --- a/src/vendor/backtrace/.travis.yml +++ b/src/vendor/backtrace/.travis.yml @@ -1,11 +1,28 @@ language: rust + rust: - stable - beta - nightly + sudo: false + +dist: trusty + +addons: + sources: + # Provides clang-3.9 + - llvm-toolchain-trusty-3.9 + apt: + packages: + # Required for `bindgen`, which is required by `findshlibs`, which is + # required by the `gimli` feature. + - clang-3.9 + before_script: - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH + - export LIBCLANG_PATH=/usr/lib/llvm-3.9/lib + script: - cargo test - cargo test --no-default-features @@ -19,15 +36,19 @@ script: - cargo test --no-default-features --features 'serialize-rustc' - cargo test --no-default-features --features 'serialize-rustc serialize-serde' - cargo test --no-default-features --features 'cpp_demangle' + - cargo test --no-default-features --features 'gimli-symbolize' - cd ./cpp_smoke_test && cargo test && cd .. - cargo clean && cargo build - rustdoc --test README.md -L target/debug/deps -L target/debug - cargo doc --no-deps + notifications: email: on_success: never + after_success: - travis-cargo --only nightly doc-upload + env: global: # serde-codegen has historically needed a large stack to expand diff --git a/src/vendor/backtrace/Cargo.toml b/src/vendor/backtrace/Cargo.toml index f23135e50e..63d14fefd9 100644 --- a/src/vendor/backtrace/Cargo.toml +++ b/src/vendor/backtrace/Cargo.toml @@ -1,87 +1,81 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] name = "backtrace" -version = "0.3.2" -authors = ["Alex Crichton ", - "The Rust Project Developers"] -license = "MIT/Apache-2.0" -readme = "README.md" -repository = "https://github.com/alexcrichton/backtrace-rs" +version = "0.3.3" +authors = ["Alex Crichton ", "The Rust Project Developers"] +description = "A library to acquire a stack trace (backtrace) at runtime in a Rust program.\n" homepage = "https://github.com/alexcrichton/backtrace-rs" documentation = "http://alexcrichton.com/backtrace-rs" -description = """ -A library to acquire a stack trace (backtrace) at runtime in a Rust program. -""" -[dependencies] -libc = "0.2" -cfg-if = "0.1" -rustc-demangle = "0.1.4" +readme = "README.md" +license = "MIT/Apache-2.0" +repository = "https://github.com/alexcrichton/backtrace-rs" +[dependencies.cpp_demangle] +version = "0.2.3" +optional = true +default-features = false -# Optionally enable the ability to serialize a `Backtrace` -serde = { version = "1.0", optional = true } -serde_derive = { version = "1.0", optional = true } -rustc-serialize = { version = "0.3", optional = true } +[dependencies.serde_derive] +version = "1.0" +optional = true -# Optionally demangle C++ frames' symbols in backtraces. -cpp_demangle = { default-features = false, version = "0.2.3", optional = true } +[dependencies.serde] +version = "1.0" +optional = true -[target.'cfg(windows)'.dependencies] -dbghelp-sys = { version = "0.2", optional = true } -kernel32-sys = { version = "0.2", optional = true } -winapi = { version = "0.2.5", optional = true } +[dependencies.rustc-demangle] +version = "0.1.4" -[target.'cfg(all(unix, not(target_os = "emscripten"), not(target_os = "macos"), not(target_os = "ios")))'.dependencies] -backtrace-sys = { path = "backtrace-sys", version = "0.1.3", optional = true } +[dependencies.cfg-if] +version = "0.1" + +[dependencies.findshlibs] +version = "0.3.3" +optional = true + +[dependencies.rustc-serialize] +version = "0.3" +optional = true + +[dependencies.addr2line] +version = "0.5.0" +optional = true -# Each feature controls the two phases of finding a backtrace: getting a -# backtrace and then resolving instruction pointers to symbols. The default -# feature enables all the necessary features for each platform this library -# supports, but it can be disabled to have finer grained control over the -# dependencies. -# -# Note that not all features are available on all platforms, so even though a -# feature is enabled some other feature may be used instead. [features] +gimli-symbolize = ["addr2line", "findshlibs"] +coresymbolication = [] +dladdr = [] +kernel32 = [] +unix-backtrace = [] +dbghelp = ["kernel32-sys", "winapi", "dbghelp-sys"] default = ["libunwind", "libbacktrace", "coresymbolication", "dladdr", "dbghelp"] +libunwind = [] +serialize-rustc = ["rustc-serialize"] +libbacktrace = ["backtrace-sys"] +serialize-serde = ["serde", "serde_derive"] +[target."cfg(windows)".dependencies.dbghelp-sys] +version = "0.2" +optional = true - #======================================= - # Methods of acquiring a backtrace - # - # - libunwind: when using this the libgcc library is linked against to get - # the unwinding support. This is generally the most reliable method to get - # a backtrace on unix. - # - unix-backtrace: this uses the backtrace(3) function to acquire a - # backtrace, but is not as reliable as libunwind. It is, however, - # generally found in more locations. - # - dbghelp: on windows this enables usage of dbghelp.dll to find a - # backtrace at runtime - # - kernel32: on windows this enables using RtlCaptureStackBackTrace as the - # function to acquire a backtrace - libunwind = [] - unix-backtrace = [] - dbghelp = ["kernel32-sys", "winapi", "dbghelp-sys"] - kernel32 = [] - - #======================================= - # Methods of resolving symbols - # - # - libbacktrace: this feature activates the `backtrace-sys` dependency, - # building the libbacktrace library found in gcc repos. This library - # parses the DWARF info of ELF executables to find symbol names, and it - # can also provide filename/line number information if debuginfo is - # compiled in. This library currently only primarily works on unixes that - # are not OSX, however. - # - dladdr: this feature uses the dladdr(3) function (a glibc extension) to - # resolve symbol names. This is fairly unreliable on linux, but works well - # enough on OSX. - # - coresymbolication: this feature uses the undocumented core symbolication - # framework on OS X to symbolize. - libbacktrace = ["backtrace-sys"] - dladdr = [] - coresymbolication = [] +[target."cfg(windows)".dependencies.winapi] +version = "0.2.5" +optional = true - #======================================= - # Methods of serialization - # - # Various features used for enabling rustc-serialize or syntex codegen. - serialize-rustc = ["rustc-serialize"] - serialize-serde = ["serde", "serde_derive"] +[target."cfg(windows)".dependencies.kernel32-sys] +version = "0.2" +optional = true +[target."cfg(all(unix, not(target_os = \"emscripten\"), not(target_os = \"macos\"), not(target_os = \"ios\")))".dependencies.backtrace-sys] +version = "0.1.3" +optional = true +[target."cfg(unix)".dependencies.libc] +version = "0.2" diff --git a/src/vendor/backtrace/Cargo.toml.orig b/src/vendor/backtrace/Cargo.toml.orig new file mode 100644 index 0000000000..4698eb84ed --- /dev/null +++ b/src/vendor/backtrace/Cargo.toml.orig @@ -0,0 +1,98 @@ +[package] +name = "backtrace" +version = "0.3.3" +authors = ["Alex Crichton ", + "The Rust Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/alexcrichton/backtrace-rs" +homepage = "https://github.com/alexcrichton/backtrace-rs" +documentation = "http://alexcrichton.com/backtrace-rs" +description = """ +A library to acquire a stack trace (backtrace) at runtime in a Rust program. +""" +[dependencies] +cfg-if = "0.1" +rustc-demangle = "0.1.4" + +# Optionally enable the ability to serialize a `Backtrace` +serde = { version = "1.0", optional = true } +serde_derive = { version = "1.0", optional = true } +rustc-serialize = { version = "0.3", optional = true } + +# Optionally demangle C++ frames' symbols in backtraces. +cpp_demangle = { default-features = false, version = "0.2.3", optional = true } + +addr2line = { version = "0.5.0", optional = true } +findshlibs = { version = "0.3.3", optional = true } + +[target.'cfg(unix)'.dependencies] +libc = "0.2" + +[target.'cfg(windows)'.dependencies] +dbghelp-sys = { version = "0.2", optional = true } +kernel32-sys = { version = "0.2", optional = true } +winapi = { version = "0.2.5", optional = true } + +[target.'cfg(all(unix, not(target_os = "emscripten"), not(target_os = "macos"), not(target_os = "ios")))'.dependencies] +backtrace-sys = { path = "backtrace-sys", version = "0.1.3", optional = true } + +# Each feature controls the two phases of finding a backtrace: getting a +# backtrace and then resolving instruction pointers to symbols. The default +# feature enables all the necessary features for each platform this library +# supports, but it can be disabled to have finer grained control over the +# dependencies. +# +# Note that not all features are available on all platforms, so even though a +# feature is enabled some other feature may be used instead. +[features] +default = ["libunwind", "libbacktrace", "coresymbolication", "dladdr", "dbghelp"] + + #======================================= + # Methods of acquiring a backtrace + # + # - libunwind: when using this the libgcc library is linked against to get + # the unwinding support. This is generally the most reliable method to get + # a backtrace on unix. + # - unix-backtrace: this uses the backtrace(3) function to acquire a + # backtrace, but is not as reliable as libunwind. It is, however, + # generally found in more locations. + # - dbghelp: on windows this enables usage of dbghelp.dll to find a + # backtrace at runtime + # - kernel32: on windows this enables using RtlCaptureStackBackTrace as the + # function to acquire a backtrace + libunwind = [] + unix-backtrace = [] + dbghelp = ["kernel32-sys", "winapi", "dbghelp-sys"] + kernel32 = [] + + #======================================= + # Methods of resolving symbols + # + # - libbacktrace: this feature activates the `backtrace-sys` dependency, + # building the libbacktrace library found in gcc repos. This library + # parses the DWARF info of ELF executables to find symbol names, and it + # can also provide filename/line number information if debuginfo is + # compiled in. This library currently only primarily works on unixes that + # are not OSX, however. + # - dladdr: this feature uses the dladdr(3) function (a glibc extension) to + # resolve symbol names. This is fairly unreliable on linux, but works well + # enough on OSX. + # - coresymbolication: this feature uses the undocumented core symbolication + # framework on OS X to symbolize. + # - gimli-symbolize: use the `gimli-rs/addr2line` crate to symbolicate + # addresses into file, line, and name using DWARF debug information. At + # the moment, this is only possible when targetting Linux, since macOS + # splits DWARF out into a separate object file. Enabling this feature + # means one less C dependency. + libbacktrace = ["backtrace-sys"] + dladdr = [] + coresymbolication = [] + gimli-symbolize = ["addr2line", "findshlibs"] + + #======================================= + # Methods of serialization + # + # Various features used for enabling rustc-serialize or syntex codegen. + serialize-rustc = ["rustc-serialize"] + serialize-serde = ["serde", "serde_derive"] diff --git a/src/vendor/backtrace/src/backtrace/mod.rs b/src/vendor/backtrace/src/backtrace/mod.rs index d1d18dd8b8..53fe5b416c 100644 --- a/src/vendor/backtrace/src/backtrace/mod.rs +++ b/src/vendor/backtrace/src/backtrace/mod.rs @@ -36,7 +36,7 @@ use std::os::raw::c_void; /// }); /// } /// ``` -#[inline(never)] // if this is never inlined then the first frame can be konwn +#[inline(never)] // if this is never inlined then the first frame can be known // to be skipped pub fn trace bool>(mut cb: F) { trace_imp(&mut cb) @@ -90,6 +90,7 @@ impl fmt::Debug for Frame { cfg_if! { if #[cfg(all(unix, not(target_os = "emscripten"), + not(all(target_os = "ios", target_arch = "arm")), feature = "libunwind"))] { mod libunwind; use self::libunwind::trace as trace_imp; diff --git a/src/vendor/backtrace/src/backtrace/unix_backtrace.rs b/src/vendor/backtrace/src/backtrace/unix_backtrace.rs index a6088d7070..061bba9ef6 100644 --- a/src/vendor/backtrace/src/backtrace/unix_backtrace.rs +++ b/src/vendor/backtrace/src/backtrace/unix_backtrace.rs @@ -25,7 +25,7 @@ extern { } #[inline(always)] -pub fn trace(mut cb: &mut FnMut(&super::Frame) -> bool) { +pub fn trace(cb: &mut FnMut(&super::Frame) -> bool) { const SIZE: usize = 100; let mut buf: [*mut c_void; SIZE]; diff --git a/src/vendor/backtrace/src/capture.rs b/src/vendor/backtrace/src/capture.rs index 8846ea47e1..570b117b01 100644 --- a/src/vendor/backtrace/src/capture.rs +++ b/src/vendor/backtrace/src/capture.rs @@ -26,7 +26,7 @@ pub struct Backtrace { pub struct BacktraceFrame { ip: usize, symbol_address: usize, - symbols: Vec, + symbols: Option>, } /// Captured version of a symbol in a backtrace. @@ -49,7 +49,7 @@ impl Backtrace { /// /// This function is useful for representing a backtrace as an object in /// Rust. This returned value can be sent across threads and printed - /// elsewhere, and thie purpose of this value is to be entirely self + /// elsewhere, and the purpose of this value is to be entirely self /// contained. /// /// # Examples @@ -60,21 +60,36 @@ impl Backtrace { /// let current_backtrace = Backtrace::new(); /// ``` pub fn new() -> Backtrace { + let mut bt = Backtrace::new_unresolved(); + bt.resolve(); + return bt + } + + /// Similar to `new` except that this does not resolve any symbols, this + /// simply captures the backtrace as a list of addresses. + /// + /// At a later time the `resolve` function can be called to resolve this + /// backtrace's symbols into readable names. This function exists because + /// the resolution process can sometimes take a significant amount of time + /// whereas any one backtrace may only be rarely printed. + /// + /// # Examples + /// + /// ``` + /// use backtrace::Backtrace; + /// + /// let mut current_backtrace = Backtrace::new_unresolved(); + /// println!("{:?}", current_backtrace); // no symbol names + /// current_backtrace.resolve(); + /// println!("{:?}", current_backtrace); // symbol names now present + /// ``` + pub fn new_unresolved() -> Backtrace { let mut frames = Vec::new(); trace(|frame| { - let mut symbols = Vec::new(); - resolve(frame.ip(), |symbol| { - symbols.push(BacktraceSymbol { - name: symbol.name().map(|m| m.as_bytes().to_vec()), - addr: symbol.addr().map(|a| a as usize), - filename: symbol.filename().map(|m| m.to_path_buf()), - lineno: symbol.lineno(), - }); - }); frames.push(BacktraceFrame { ip: frame.ip() as usize, symbol_address: frame.symbol_address() as usize, - symbols: symbols, + symbols: None, }); true }); @@ -90,6 +105,26 @@ impl Backtrace { pub fn frames(&self) -> &[BacktraceFrame] { &self.frames } + + /// If this backtrace was created from `new_unresolved` then this function + /// will resolve all addresses in the backtrace to their symbolic names. + /// + /// If this backtrace has been previously resolved or was created through + /// `new`, this function does nothing. + pub fn resolve(&mut self) { + for frame in self.frames.iter_mut().filter(|f| f.symbols.is_none()) { + let mut symbols = Vec::new(); + resolve(frame.ip as *mut _, |symbol| { + symbols.push(BacktraceSymbol { + name: symbol.name().map(|m| m.as_bytes().to_vec()), + addr: symbol.addr().map(|a| a as usize), + filename: symbol.filename().map(|m| m.to_path_buf()), + lineno: symbol.lineno(), + }); + }); + frame.symbols = Some(symbols); + } + } } impl From> for Backtrace { @@ -116,17 +151,18 @@ impl BacktraceFrame { pub fn symbol_address(&self) -> *mut c_void { self.symbol_address as *mut c_void } -} -impl BacktraceFrame { /// Returns the list of symbols that this frame corresponds to. /// /// Normally there is only one symbol per frame, but sometimes if a number /// of functions are inlined into one frame then multiple symbols will be /// returned. The first symbol listed is the "innermost function", whereas /// the last symbol is the outermost (last caller). + /// + /// Note that if this frame came from an unresolved backtrace then this will + /// return an empty list. pub fn symbols(&self) -> &[BacktraceSymbol] { - &self.symbols + self.symbols.as_ref().map(|s| &s[..]).unwrap_or(&[]) } } @@ -162,11 +198,18 @@ impl fmt::Debug for Backtrace { let ip = frame.ip(); try!(write!(fmt, "\n{:4}: {:2$?}", idx, ip, hex_width)); - if frame.symbols.len() == 0 { + let symbols = match frame.symbols { + Some(ref s) => s, + None => { + try!(write!(fmt, " - ")); + continue + } + }; + if symbols.len() == 0 { try!(write!(fmt, " - ")); } - for (idx, symbol) in frame.symbols().iter().enumerate() { + for (idx, symbol) in symbols.iter().enumerate() { if idx != 0 { try!(write!(fmt, "\n {:1$}", "", hex_width)); } diff --git a/src/vendor/backtrace/src/lib.rs b/src/vendor/backtrace/src/lib.rs index c9b9c4eb1b..78e7700e22 100644 --- a/src/vendor/backtrace/src/lib.rs +++ b/src/vendor/backtrace/src/lib.rs @@ -25,7 +25,7 @@ //! //! # API Principles //! -//! This library attempts to be as flexible as possible to accomodate different +//! This library attempts to be as flexible as possible to accommodate different //! backend implementations of acquiring a backtrace. Consequently the currently //! exported functions are closure-based as opposed to the likely expected //! iterator-based versions. This is done due to limitations of the underlying @@ -69,14 +69,12 @@ #![deny(missing_docs)] #![deny(warnings)] +#[cfg(unix)] extern crate libc; #[cfg(all(windows, feature = "kernel32-sys"))] extern crate kernel32; #[cfg(all(windows, feature = "winapi"))] extern crate winapi; #[cfg(all(windows, feature = "dbghelp"))] extern crate dbghelp; -#[cfg(feature = "serde")] -extern crate serde; - #[cfg(feature = "serde_derive")] #[cfg_attr(feature = "serde_derive", macro_use)] extern crate serde_derive; @@ -92,6 +90,15 @@ extern crate rustc_demangle; #[cfg(feature = "cpp_demangle")] extern crate cpp_demangle; +#[cfg(all(feature = "gimli-symbolize", + unix, + target_os = "linux"))] +extern crate addr2line; +#[cfg(all(feature = "gimli-symbolize", + unix, + target_os = "linux"))] +extern crate findshlibs; + #[allow(dead_code)] // not used everywhere #[cfg(unix)] #[macro_use] diff --git a/src/vendor/backtrace/src/symbolize/dbghelp.rs b/src/vendor/backtrace/src/symbolize/dbghelp.rs index 160a7cb890..60a93b0c53 100644 --- a/src/vendor/backtrace/src/symbolize/dbghelp.rs +++ b/src/vendor/backtrace/src/symbolize/dbghelp.rs @@ -70,8 +70,15 @@ pub fn resolve(addr: *mut c_void, cb: &mut FnMut(&super::Symbol)) { if ret != TRUE { return } + + // If the symbol name is greater than MaxNameLen, SymFromAddrW will + // give a buffer of (MaxNameLen - 1) characters and set NameLen to + // the real value. + let name_len = ::std::cmp::min(info.NameLen as usize, + info.MaxNameLen as usize - 1); + let name = slice::from_raw_parts(info.Name.as_ptr() as *const u16, - info.NameLen as usize); + name_len); let name = OsString::from_wide(name); let mut line = mem::zeroed::(); diff --git a/src/vendor/backtrace/src/symbolize/gimli.rs b/src/vendor/backtrace/src/symbolize/gimli.rs new file mode 100644 index 0000000000..5ee7150538 --- /dev/null +++ b/src/vendor/backtrace/src/symbolize/gimli.rs @@ -0,0 +1,170 @@ +use addr2line; +use findshlibs::{self, Segment, SharedLibrary}; +use std::cell::RefCell; +use std::env; +use std::os::raw::c_void; +use std::path::{Path, PathBuf}; +use std::u32; + +use SymbolName; + +const MAPPINGS_CACHE_SIZE: usize = 4; + +thread_local! { + // A very small, very simple LRU cache for debug info mappings. + // + // The hit rate should be very high, since the typical stack doesn't cross + // between many shared libraries. + // + // The `addr2line::Mapping` structures are pretty expensive to create. Its + // cost is expected to be amortized by subsequent `locate` queries, which + // leverage the structures built when constructing `addr2line::Mapping`s to + // get nice speedups. If we didn't have this cache, that amortization would + // never happen, and symbolicating backtraces would be ssssllllooooowwww. + static MAPPINGS_CACHE: RefCell> + = RefCell::new(Vec::with_capacity(MAPPINGS_CACHE_SIZE)); +} + +fn with_mapping_for_path(path: PathBuf, mut f: F) +where + F: FnMut(&mut addr2line::Mapping) +{ + MAPPINGS_CACHE.with(|cache| { + let mut cache = cache.borrow_mut(); + + let idx = cache.iter().position(|&(ref p, _)| p == &path); + + // Invariant: after this conditional completes without early returning + // from an error, the cache entry for this path is at index 0. + + if let Some(idx) = idx { + // When the mapping is already in the cache, move it to the front. + if idx != 0 { + let entry = cache.remove(idx); + cache.insert(0, entry); + } + } else { + // When the mapping is not in the cache, create a new mapping, + // insert it into the front of the cache, and evict the oldest cache + // entry if necessary. + let opts = addr2line::Options::default() + .with_functions(); + + let mapping = match opts.build(&path) { + Err(_) => return, + Ok(m) => m, + }; + + if cache.len() == MAPPINGS_CACHE_SIZE { + cache.pop(); + } + + cache.insert(0, (path, mapping)); + } + + f(&mut cache[0].1); + }); +} + +pub fn resolve(addr: *mut c_void, cb: &mut FnMut(&super::Symbol)) { + // First, find the file containing the segment that the given AVMA (after + // relocation) address falls within. Use the containing segment to compute + // the SVMA (before relocation) address. + // + // Note that the OS APIs that `SharedLibrary::each` is implemented with hold + // a lock for the duration of the `each` call, so we want to keep this + // section as short as possible to avoid contention with other threads + // capturing backtraces. + let addr = findshlibs::Avma(addr as *mut u8 as *const u8); + let mut so_info = None; + findshlibs::TargetSharedLibrary::each(|so| { + use findshlibs::IterationControl::*; + + for segment in so.segments() { + if segment.contains_avma(so, addr) { + let addr = so.avma_to_svma(addr); + let path = so.name().to_string_lossy(); + so_info = Some((addr, path.to_string())); + return Break; + } + } + + Continue + }); + let (addr, path) = match so_info { + None => return, + Some((a, p)) => (a, p), + }; + + // Second, fixup the path. Empty path means that this address falls within + // the main executable, not a shared library. + let path = if path.is_empty() { + match env::current_exe() { + Err(_) => return, + Ok(p) => p, + } + } else { + PathBuf::from(path) + }; + + // Finally, get a cached mapping or create a new mapping for this file, and + // evaluate the DWARF info to find the file/line/name for this address. + with_mapping_for_path(path, |mapping| { + let (file, line, func) = match mapping.locate(addr.0 as u64) { + Ok(None) | Err(_) => return, + Ok(Some((file, line, func))) => (file, line, func), + }; + + let sym = super::Symbol { + inner: Symbol::new(addr.0 as usize, + file, + line, + func.map(|f| f.to_string())) + }; + + cb(&sym); + }); +} + +pub struct Symbol { + addr: usize, + file: PathBuf, + line: Option, + name: Option, +} + +impl Symbol { + fn new(addr: usize, + file: PathBuf, + line: Option, + name: Option) + -> Symbol { + Symbol { + addr, + file, + line, + name, + } + } + + pub fn name(&self) -> Option { + self.name.as_ref().map(|s| SymbolName::new(s.as_bytes())) + } + + pub fn addr(&self) -> Option<*mut c_void> { + Some(self.addr as *mut c_void) + } + + pub fn filename(&self) -> Option<&Path> { + Some(self.file.as_ref()) + } + + pub fn lineno(&self) -> Option { + self.line + .and_then(|l| if l > (u32::MAX as u64) { + None + } else { + Some(l as u32) + }) + } +} diff --git a/src/vendor/backtrace/src/symbolize/mod.rs b/src/vendor/backtrace/src/symbolize/mod.rs index 23c1c69719..7ed65479fc 100644 --- a/src/vendor/backtrace/src/symbolize/mod.rs +++ b/src/vendor/backtrace/src/symbolize/mod.rs @@ -17,7 +17,7 @@ use rustc_demangle::{try_demangle, Demangle}; /// also may be called more than once in the case of inlined functions. /// /// Symbols yielded represent the execution at the specified `addr`, returning -/// file/line pairs for that addres (if available). +/// file/line pairs for that address (if available). /// /// # Example /// @@ -46,7 +46,7 @@ pub fn resolve(addr: *mut c_void, mut cb: F) { /// `backtrace::resolve` function, and it is virtually dispatched as it's /// unknown which implementation is behind it. /// -/// A symbol can give contextual information about a funciton, for example the +/// A symbol can give contextual information about a function, for example the /// name, filename, line number, precise address, etc. Not all information is /// always available in a symbol, however, so all methods return an `Option`. pub struct Symbol { @@ -255,6 +255,12 @@ cfg_if! { mod dbghelp; use self::dbghelp::resolve as resolve_imp; use self::dbghelp::Symbol as SymbolImp; + } else if #[cfg(all(feature = "gimli-symbolize", + unix, + target_os = "linux"))] { + mod gimli; + use self::gimli::resolve as resolve_imp; + use self::gimli::Symbol as SymbolImp; } else if #[cfg(all(feature = "libbacktrace", unix, not(target_os = "emscripten"), diff --git a/src/vendor/backtrace/tests/long_fn_name.rs b/src/vendor/backtrace/tests/long_fn_name.rs new file mode 100644 index 0000000000..90750c1fe4 --- /dev/null +++ b/src/vendor/backtrace/tests/long_fn_name.rs @@ -0,0 +1,56 @@ +extern crate backtrace; + +#[cfg(all(windows, feature = "dbghelp"))] +extern crate winapi; + +use backtrace::Backtrace; + +// 50-character module name +mod _234567890_234567890_234567890_234567890_234567890 { + // 50-character struct name + #[allow(non_camel_case_types)] + pub struct _234567890_234567890_234567890_234567890_234567890(T); + impl _234567890_234567890_234567890_234567890_234567890 { + #[allow(dead_code)] + pub fn new() -> ::Backtrace { + ::Backtrace::new() + } + } +} + +// Long function names must be truncated to (MAX_SYM_NAME - 1) characters. +// Only run this test for msvc, since gnu prints "" for all frames. +#[test] +#[cfg(all(windows, feature = "dbghelp", target_env = "msvc"))] +fn test_long_fn_name() { + use _234567890_234567890_234567890_234567890_234567890:: + _234567890_234567890_234567890_234567890_234567890 as S; + + // 10 repetitions of struct name, so fully qualified function name is + // atleast 10 * (50 + 50) * 2 = 2000 characters long. + // It's actually longer since it also includes `::`, `<>` and the + // name of the current module + let bt = S::>>>>>>>>>::new(); + println!("{:?}", bt); + + let mut found_long_name_frame = false; + + for frame in bt.frames() { + let symbols = frame.symbols(); + if symbols.is_empty() { + continue; + } + + if let Some(function_name) = symbols[0].name() { + let function_name = function_name.as_str().unwrap(); + if function_name.contains( + "::_234567890_234567890_234567890_234567890_234567890") + { + found_long_name_frame = true; + assert_eq!(function_name.len(), winapi::MAX_SYM_NAME - 1); + } + } + } + + assert!(found_long_name_frame); +} diff --git a/src/vendor/backtrace/tests/smoke.rs b/src/vendor/backtrace/tests/smoke.rs index bef71af37e..1ec6f2b17c 100644 --- a/src/vendor/backtrace/tests/smoke.rs +++ b/src/vendor/backtrace/tests/smoke.rs @@ -12,6 +12,9 @@ static CORESYMBOLICATION: bool = cfg!(all(any(target_os = "macos", target_os = " static DLADDR: bool = cfg!(all(unix, feature = "dladdr")); static DBGHELP: bool = cfg!(all(windows, feature = "dbghelp")); static MSVC: bool = cfg!(target_env = "msvc"); +static GIMLI_SYMBOLIZE: bool = cfg!(all(feature = "gimli-symbolize", + unix, + target_os = "linux")); #[test] fn smoke_test_frames() { @@ -71,7 +74,7 @@ fn smoke_test_frames() { } let mut resolved = 0; - let can_resolve = DLADDR || LIBBACKTRACE || CORESYMBOLICATION || DBGHELP; + let can_resolve = DLADDR || LIBBACKTRACE || CORESYMBOLICATION || DBGHELP || GIMLI_SYMBOLIZE; let mut name = None; let mut addr = None; diff --git a/src/vendor/bitflags-0.7.0/.cargo-checksum.json b/src/vendor/bitflags-0.7.0/.cargo-checksum.json new file mode 100644 index 0000000000..c2012b5454 --- /dev/null +++ b/src/vendor/bitflags-0.7.0/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"2b615144d3f4b2e63ba6ec435cc18df7d76354aa07c2a02d6c707028cc448784","Cargo.toml":"db8c2e9ea912c5f3d2d89cf4cf936c448300e356b0fb533db8875923cb135256","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"8cfbc986af45867d9e620188af2392320fe6e0d9536753ba415c94ab522f5fb5","src/lib.rs":"618ce383bb219725363fba174fc66beb4874d9682e5da953f9e3e9cb3f786d5f","tests/external.rs":"546e549ec831876a5dc272bd0537adc9e9886c6da54656c825e7bffc079e2c74","tests/external_no_std.rs":"48929f5109aabc156442d5ae2ab07b4bce5d648488bf49dba725f6ab23bcb48a"},"package":"aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"} \ No newline at end of file diff --git a/src/vendor/bitflags-0.7.0/.cargo-ok b/src/vendor/bitflags-0.7.0/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/bitflags-0.7.0/.gitignore b/src/vendor/bitflags-0.7.0/.gitignore new file mode 100644 index 0000000000..4fffb2f89c --- /dev/null +++ b/src/vendor/bitflags-0.7.0/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/src/vendor/bitflags-0.7.0/.travis.yml b/src/vendor/bitflags-0.7.0/.travis.yml new file mode 100644 index 0000000000..60344466a1 --- /dev/null +++ b/src/vendor/bitflags-0.7.0/.travis.yml @@ -0,0 +1,24 @@ +language: rust +rust: + - stable + - beta + - nightly +sudo: false +script: + - cargo build --verbose + - cargo test --verbose + - cargo doc +after_success: | + [ $TRAVIS_BRANCH = master ] && + [ $TRAVIS_PULL_REQUEST = false ] && + [ $TRAVIS_RUST_VERSION = nightly ] && + echo '' > target/doc/index.html && + pip install ghp-import --user $USER && + $HOME/.local/bin/ghp-import -n target/doc && + git push -qf https://${TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages +env: + global: + secure: d+l63TtlF6cfFVDGauYRexgx4lBww4ORqqK4Vt75nWbiCbjZYsKXbcTUdhAr193nIVGiNW50A8SekM01F3EngHwHwr6u5kFleOggm+HA0kkBVeX+k2A4WCVVfYI+gth+zk99WaF8h46MA0evhx6FYDoqeyl9oqmVifI4kaqhMwc= +notifications: + email: + on_success: never diff --git a/src/vendor/bitflags-0.7.0/Cargo.toml b/src/vendor/bitflags-0.7.0/Cargo.toml new file mode 100644 index 0000000000..042497e9ca --- /dev/null +++ b/src/vendor/bitflags-0.7.0/Cargo.toml @@ -0,0 +1,13 @@ +[package] + +name = "bitflags" +version = "0.7.0" +authors = ["The Rust Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/rust-lang/bitflags" +homepage = "https://github.com/rust-lang/bitflags" +documentation = "https://doc.rust-lang.org/bitflags" +description = """ +A macro to generate structures which behave like bitflags. +""" diff --git a/src/vendor/gcc/LICENSE-APACHE b/src/vendor/bitflags-0.7.0/LICENSE-APACHE similarity index 100% rename from src/vendor/gcc/LICENSE-APACHE rename to src/vendor/bitflags-0.7.0/LICENSE-APACHE diff --git a/src/vendor/bitflags-0.7.0/LICENSE-MIT b/src/vendor/bitflags-0.7.0/LICENSE-MIT new file mode 100644 index 0000000000..39d4bdb5ac --- /dev/null +++ b/src/vendor/bitflags-0.7.0/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/bitflags-0.7.0/README.md b/src/vendor/bitflags-0.7.0/README.md new file mode 100644 index 0000000000..3edd8a361e --- /dev/null +++ b/src/vendor/bitflags-0.7.0/README.md @@ -0,0 +1,24 @@ +bitflags +======== + +A Rust macro to generate structures which behave like a set of bitflags + +[![Build Status](https://travis-ci.org/rust-lang-nursery/bitflags.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/bitflags) + +[Documentation](https://doc.rust-lang.org/bitflags) + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +bitflags = "0.6" +``` + +and this to your crate root: + +```rust +#[macro_use] +extern crate bitflags; +``` diff --git a/src/vendor/bitflags-0.7.0/src/lib.rs b/src/vendor/bitflags-0.7.0/src/lib.rs new file mode 100644 index 0000000000..698799dab2 --- /dev/null +++ b/src/vendor/bitflags-0.7.0/src/lib.rs @@ -0,0 +1,808 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A typesafe bitmask flag generator. + +#![no_std] + +#[cfg(test)] +#[macro_use] +extern crate std; + +// Re-export libstd/libcore using an alias so that the macros can work in no_std +// crates while remaining compatible with normal crates. +#[allow(private_in_public)] +#[doc(hidden)] +pub use core as __core; + +/// The `bitflags!` macro generates a `struct` that holds a set of C-style +/// bitmask flags. It is useful for creating typesafe wrappers for C APIs. +/// +/// The flags should only be defined for integer types, otherwise unexpected +/// type errors may occur at compile time. +/// +/// # Example +/// +/// ```{.rust} +/// #[macro_use] +/// extern crate bitflags; +/// +/// bitflags! { +/// flags Flags: u32 { +/// const FLAG_A = 0b00000001, +/// const FLAG_B = 0b00000010, +/// const FLAG_C = 0b00000100, +/// const FLAG_ABC = FLAG_A.bits +/// | FLAG_B.bits +/// | FLAG_C.bits, +/// } +/// } +/// +/// fn main() { +/// let e1 = FLAG_A | FLAG_C; +/// let e2 = FLAG_B | FLAG_C; +/// assert_eq!((e1 | e2), FLAG_ABC); // union +/// assert_eq!((e1 & e2), FLAG_C); // intersection +/// assert_eq!((e1 - e2), FLAG_A); // set difference +/// assert_eq!(!e2, FLAG_A); // set complement +/// } +/// ``` +/// +/// The generated `struct`s can also be extended with type and trait +/// implementations: +/// +/// ```{.rust} +/// #[macro_use] +/// extern crate bitflags; +/// +/// use std::fmt; +/// +/// bitflags! { +/// flags Flags: u32 { +/// const FLAG_A = 0b00000001, +/// const FLAG_B = 0b00000010, +/// } +/// } +/// +/// impl Flags { +/// pub fn clear(&mut self) { +/// self.bits = 0; // The `bits` field can be accessed from within the +/// // same module where the `bitflags!` macro was invoked. +/// } +/// } +/// +/// impl fmt::Display for Flags { +/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +/// write!(f, "hi!") +/// } +/// } +/// +/// fn main() { +/// let mut flags = FLAG_A | FLAG_B; +/// flags.clear(); +/// assert!(flags.is_empty()); +/// assert_eq!(format!("{}", flags), "hi!"); +/// assert_eq!(format!("{:?}", FLAG_A | FLAG_B), "FLAG_A | FLAG_B"); +/// assert_eq!(format!("{:?}", FLAG_B), "FLAG_B"); +/// } +/// ``` +/// +/// # Visibility +/// +/// The generated struct and its associated flag constants are not exported +/// out of the current module by default. A definition can be exported out of +/// the current module by adding `pub` before `flags`: +/// +/// ```{.rust},ignore +/// #[macro_use] +/// extern crate bitflags; +/// +/// mod example { +/// bitflags! { +/// pub flags Flags1: u32 { +/// const FLAG_A = 0b00000001, +/// } +/// } +/// bitflags! { +/// flags Flags2: u32 { +/// const FLAG_B = 0b00000010, +/// } +/// } +/// } +/// +/// fn main() { +/// let flag1 = example::FLAG_A; +/// let flag2 = example::FLAG_B; // error: const `FLAG_B` is private +/// } +/// ``` +/// +/// # Attributes +/// +/// Attributes can be attached to the generated `struct` by placing them +/// before the `flags` keyword. +/// +/// # Trait implementations +/// +/// The `Copy`, `Clone`, `PartialEq`, `Eq`, `PartialOrd`, `Ord` and `Hash` +/// traits automatically derived for the `struct` using the `derive` attribute. +/// Additional traits can be derived by providing an explicit `derive` +/// attribute on `flags`. +/// +/// The `Extend` and `FromIterator` traits are implemented for the `struct`, +/// too: `Extend` adds the union of the instances of the `struct` iterated over, +/// while `FromIterator` calculates the union. +/// +/// The `Debug` trait is also implemented by displaying the bits value of the +/// internal struct. +/// +/// ## Operators +/// +/// The following operator traits are implemented for the generated `struct`: +/// +/// - `BitOr` and `BitOrAssign`: union +/// - `BitAnd` and `BitAndAssign`: intersection +/// - `BitXor` and `BitXorAssign`: toggle +/// - `Sub` and `SubAssign`: set difference +/// - `Not`: set complement +/// +/// As long as the assignment operators are unstable rust feature they are only +/// available with the crate feature `assignment_ops` enabled. +/// +/// # Methods +/// +/// The following methods are defined for the generated `struct`: +/// +/// - `empty`: an empty set of flags +/// - `all`: the set of all flags +/// - `bits`: the raw value of the flags currently stored +/// - `from_bits`: convert from underlying bit representation, unless that +/// representation contains bits that do not correspond to a flag +/// - `from_bits_truncate`: convert from underlying bit representation, dropping +/// any bits that do not correspond to flags +/// - `is_empty`: `true` if no flags are currently stored +/// - `is_all`: `true` if all flags are currently set +/// - `intersects`: `true` if there are flags common to both `self` and `other` +/// - `contains`: `true` all of the flags in `other` are contained within `self` +/// - `insert`: inserts the specified flags in-place +/// - `remove`: removes the specified flags in-place +/// - `toggle`: the specified flags will be inserted if not present, and removed +/// if they are. +#[macro_export] +macro_rules! bitflags { + ($(#[$attr:meta])* pub flags $BitFlags:ident: $T:ty { + $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+ + }) => { + #[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] + $(#[$attr])* + pub struct $BitFlags { + bits: $T, + } + + $($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { bits: $value };)+ + + bitflags! { + @_impl flags $BitFlags: $T { + $($(#[$Flag_attr])* const $Flag = $value),+ + } + } + }; + ($(#[$attr:meta])* flags $BitFlags:ident: $T:ty { + $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+ + }) => { + #[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] + $(#[$attr])* + struct $BitFlags { + bits: $T, + } + + $($(#[$Flag_attr])* const $Flag: $BitFlags = $BitFlags { bits: $value };)+ + + bitflags! { + @_impl flags $BitFlags: $T { + $($(#[$Flag_attr])* const $Flag = $value),+ + } + } + }; + (@_impl flags $BitFlags:ident: $T:ty { + $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+ + }) => { + impl $crate::__core::fmt::Debug for $BitFlags { + fn fmt(&self, f: &mut $crate::__core::fmt::Formatter) -> $crate::__core::fmt::Result { + // This convoluted approach is to handle #[cfg]-based flag + // omission correctly. Some of the $Flag variants may not be + // defined in this module so we create an inner module which + // defines *all* flags to the value of 0. We then create a + // second inner module that defines all of the flags with #[cfg] + // to their real values. Afterwards the glob will import + // variants from the second inner module, shadowing all + // defined variants, leaving only the undefined ones with the + // bit value of 0. + #[allow(dead_code)] + #[allow(unused_assignments)] + mod dummy { + // We can't use the real $BitFlags struct because it may be + // private, which prevents us from using it to define + // public constants. + pub struct $BitFlags { + bits: u64, + } + mod real_flags { + use super::$BitFlags; + $($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { + bits: super::super::$Flag.bits as u64 + };)+ + } + // Now we define the "undefined" versions of the flags. + // This way, all the names exist, even if some are #[cfg]ed + // out. + $(const $Flag: $BitFlags = $BitFlags { bits: 0 };)+ + + #[inline] + pub fn fmt(self_: u64, + f: &mut $crate::__core::fmt::Formatter) + -> $crate::__core::fmt::Result { + // Now we import the real values for the flags. + // Only ones that are #[cfg]ed out will be 0. + use self::real_flags::*; + + let mut first = true; + $( + // $Flag.bits == 0 means that $Flag doesn't exist + if $Flag.bits != 0 && self_ & $Flag.bits as u64 == $Flag.bits as u64 { + if !first { + try!(f.write_str(" | ")); + } + first = false; + try!(f.write_str(stringify!($Flag))); + } + )+ + Ok(()) + } + } + dummy::fmt(self.bits as u64, f) + } + } + + #[allow(dead_code)] + impl $BitFlags { + /// Returns an empty set of flags. + #[inline] + pub fn empty() -> $BitFlags { + $BitFlags { bits: 0 } + } + + /// Returns the set containing all flags. + #[inline] + pub fn all() -> $BitFlags { + // See above `dummy` module for why this approach is taken. + #[allow(dead_code)] + mod dummy { + pub struct $BitFlags { + bits: u64, + } + mod real_flags { + use super::$BitFlags; + $($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { + bits: super::super::$Flag.bits as u64 + };)+ + } + $(const $Flag: $BitFlags = $BitFlags { bits: 0 };)+ + + #[inline] + pub fn all() -> u64 { + use self::real_flags::*; + $($Flag.bits)|+ + } + } + $BitFlags { bits: dummy::all() as $T } + } + + /// Returns the raw value of the flags currently stored. + #[inline] + pub fn bits(&self) -> $T { + self.bits + } + + /// Convert from underlying bit representation, unless that + /// representation contains bits that do not correspond to a flag. + #[inline] + pub fn from_bits(bits: $T) -> $crate::__core::option::Option<$BitFlags> { + if (bits & !$BitFlags::all().bits()) == 0 { + $crate::__core::option::Option::Some($BitFlags { bits: bits }) + } else { + $crate::__core::option::Option::None + } + } + + /// Convert from underlying bit representation, dropping any bits + /// that do not correspond to flags. + #[inline] + pub fn from_bits_truncate(bits: $T) -> $BitFlags { + $BitFlags { bits: bits } & $BitFlags::all() + } + + /// Returns `true` if no flags are currently stored. + #[inline] + pub fn is_empty(&self) -> bool { + *self == $BitFlags::empty() + } + + /// Returns `true` if all flags are currently set. + #[inline] + pub fn is_all(&self) -> bool { + *self == $BitFlags::all() + } + + /// Returns `true` if there are flags common to both `self` and `other`. + #[inline] + pub fn intersects(&self, other: $BitFlags) -> bool { + !(*self & other).is_empty() + } + + /// Returns `true` all of the flags in `other` are contained within `self`. + #[inline] + pub fn contains(&self, other: $BitFlags) -> bool { + (*self & other) == other + } + + /// Inserts the specified flags in-place. + #[inline] + pub fn insert(&mut self, other: $BitFlags) { + self.bits |= other.bits; + } + + /// Removes the specified flags in-place. + #[inline] + pub fn remove(&mut self, other: $BitFlags) { + self.bits &= !other.bits; + } + + /// Toggles the specified flags in-place. + #[inline] + pub fn toggle(&mut self, other: $BitFlags) { + self.bits ^= other.bits; + } + } + + impl $crate::__core::ops::BitOr for $BitFlags { + type Output = $BitFlags; + + /// Returns the union of the two sets of flags. + #[inline] + fn bitor(self, other: $BitFlags) -> $BitFlags { + $BitFlags { bits: self.bits | other.bits } + } + } + + impl $crate::__core::ops::BitOrAssign for $BitFlags { + + /// Adds the set of flags. + #[inline] + fn bitor_assign(&mut self, other: $BitFlags) { + self.bits |= other.bits; + } + } + + impl $crate::__core::ops::BitXor for $BitFlags { + type Output = $BitFlags; + + /// Returns the left flags, but with all the right flags toggled. + #[inline] + fn bitxor(self, other: $BitFlags) -> $BitFlags { + $BitFlags { bits: self.bits ^ other.bits } + } + } + + impl $crate::__core::ops::BitXorAssign for $BitFlags { + + /// Toggles the set of flags. + #[inline] + fn bitxor_assign(&mut self, other: $BitFlags) { + self.bits ^= other.bits; + } + } + + impl $crate::__core::ops::BitAnd for $BitFlags { + type Output = $BitFlags; + + /// Returns the intersection between the two sets of flags. + #[inline] + fn bitand(self, other: $BitFlags) -> $BitFlags { + $BitFlags { bits: self.bits & other.bits } + } + } + + impl $crate::__core::ops::BitAndAssign for $BitFlags { + + /// Disables all flags disabled in the set. + #[inline] + fn bitand_assign(&mut self, other: $BitFlags) { + self.bits &= other.bits; + } + } + + impl $crate::__core::ops::Sub for $BitFlags { + type Output = $BitFlags; + + /// Returns the set difference of the two sets of flags. + #[inline] + fn sub(self, other: $BitFlags) -> $BitFlags { + $BitFlags { bits: self.bits & !other.bits } + } + } + + impl $crate::__core::ops::SubAssign for $BitFlags { + + /// Disables all flags enabled in the set. + #[inline] + fn sub_assign(&mut self, other: $BitFlags) { + self.bits &= !other.bits; + } + } + + impl $crate::__core::ops::Not for $BitFlags { + type Output = $BitFlags; + + /// Returns the complement of this set of flags. + #[inline] + fn not(self) -> $BitFlags { + $BitFlags { bits: !self.bits } & $BitFlags::all() + } + } + + impl $crate::__core::iter::Extend<$BitFlags> for $BitFlags { + fn extend>(&mut self, iterator: T) { + for item in iterator { + self.insert(item) + } + } + } + + impl $crate::__core::iter::FromIterator<$BitFlags> for $BitFlags { + fn from_iter>(iterator: T) -> $BitFlags { + let mut result = Self::empty(); + result.extend(iterator); + result + } + } + }; + ($(#[$attr:meta])* pub flags $BitFlags:ident: $T:ty { + $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+, + }) => { + bitflags! { + $(#[$attr])* + pub flags $BitFlags: $T { + $($(#[$Flag_attr])* const $Flag = $value),+ + } + } + }; + ($(#[$attr:meta])* flags $BitFlags:ident: $T:ty { + $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+, + }) => { + bitflags! { + $(#[$attr])* + flags $BitFlags: $T { + $($(#[$Flag_attr])* const $Flag = $value),+ + } + } + }; +} + +#[cfg(test)] +#[allow(non_upper_case_globals, dead_code)] +mod tests { + use std::hash::{SipHasher, Hash, Hasher}; + + bitflags! { + #[doc = "> The first principle is that you must not fool yourself — and"] + #[doc = "> you are the easiest person to fool."] + #[doc = "> "] + #[doc = "> - Richard Feynman"] + flags Flags: u32 { + const FlagA = 0b00000001, + #[doc = " macros are way better at generating code than trans is"] + const FlagB = 0b00000010, + const FlagC = 0b00000100, + #[doc = "* cmr bed"] + #[doc = "* strcat table"] + #[doc = " wait what?"] + const FlagABC = FlagA.bits + | FlagB.bits + | FlagC.bits, + } + } + + bitflags! { + flags _CfgFlags: u32 { + #[cfg(windows)] + const _CfgA = 0b01, + #[cfg(unix)] + const _CfgB = 0b01, + #[cfg(windows)] + const _CfgC = _CfgA.bits | 0b10, + } + } + + bitflags! { + flags AnotherSetOfFlags: i8 { + const AnotherFlag = -1_i8, + } + } + + #[test] + fn test_bits(){ + assert_eq!(Flags::empty().bits(), 0b00000000); + assert_eq!(FlagA.bits(), 0b00000001); + assert_eq!(FlagABC.bits(), 0b00000111); + + assert_eq!(AnotherSetOfFlags::empty().bits(), 0b00); + assert_eq!(AnotherFlag.bits(), !0_i8); + } + + #[test] + fn test_from_bits() { + assert_eq!(Flags::from_bits(0), Some(Flags::empty())); + assert_eq!(Flags::from_bits(0b1), Some(FlagA)); + assert_eq!(Flags::from_bits(0b10), Some(FlagB)); + assert_eq!(Flags::from_bits(0b11), Some(FlagA | FlagB)); + assert_eq!(Flags::from_bits(0b1000), None); + + assert_eq!(AnotherSetOfFlags::from_bits(!0_i8), Some(AnotherFlag)); + } + + #[test] + fn test_from_bits_truncate() { + assert_eq!(Flags::from_bits_truncate(0), Flags::empty()); + assert_eq!(Flags::from_bits_truncate(0b1), FlagA); + assert_eq!(Flags::from_bits_truncate(0b10), FlagB); + assert_eq!(Flags::from_bits_truncate(0b11), (FlagA | FlagB)); + assert_eq!(Flags::from_bits_truncate(0b1000), Flags::empty()); + assert_eq!(Flags::from_bits_truncate(0b1001), FlagA); + + assert_eq!(AnotherSetOfFlags::from_bits_truncate(0_i8), AnotherSetOfFlags::empty()); + } + + #[test] + fn test_is_empty(){ + assert!(Flags::empty().is_empty()); + assert!(!FlagA.is_empty()); + assert!(!FlagABC.is_empty()); + + assert!(!AnotherFlag.is_empty()); + } + + #[test] + fn test_is_all() { + assert!(Flags::all().is_all()); + assert!(!FlagA.is_all()); + assert!(FlagABC.is_all()); + + assert!(AnotherFlag.is_all()); + } + + #[test] + fn test_two_empties_do_not_intersect() { + let e1 = Flags::empty(); + let e2 = Flags::empty(); + assert!(!e1.intersects(e2)); + + assert!(AnotherFlag.intersects(AnotherFlag)); + } + + #[test] + fn test_empty_does_not_intersect_with_full() { + let e1 = Flags::empty(); + let e2 = FlagABC; + assert!(!e1.intersects(e2)); + } + + #[test] + fn test_disjoint_intersects() { + let e1 = FlagA; + let e2 = FlagB; + assert!(!e1.intersects(e2)); + } + + #[test] + fn test_overlapping_intersects() { + let e1 = FlagA; + let e2 = FlagA | FlagB; + assert!(e1.intersects(e2)); + } + + #[test] + fn test_contains() { + let e1 = FlagA; + let e2 = FlagA | FlagB; + assert!(!e1.contains(e2)); + assert!(e2.contains(e1)); + assert!(FlagABC.contains(e2)); + + assert!(AnotherFlag.contains(AnotherFlag)); + } + + #[test] + fn test_insert(){ + let mut e1 = FlagA; + let e2 = FlagA | FlagB; + e1.insert(e2); + assert_eq!(e1, e2); + + let mut e3 = AnotherSetOfFlags::empty(); + e3.insert(AnotherFlag); + assert_eq!(e3, AnotherFlag); + } + + #[test] + fn test_remove(){ + let mut e1 = FlagA | FlagB; + let e2 = FlagA | FlagC; + e1.remove(e2); + assert_eq!(e1, FlagB); + + let mut e3 = AnotherFlag; + e3.remove(AnotherFlag); + assert_eq!(e3, AnotherSetOfFlags::empty()); + } + + #[test] + fn test_operators() { + let e1 = FlagA | FlagC; + let e2 = FlagB | FlagC; + assert_eq!((e1 | e2), FlagABC); // union + assert_eq!((e1 & e2), FlagC); // intersection + assert_eq!((e1 - e2), FlagA); // set difference + assert_eq!(!e2, FlagA); // set complement + assert_eq!(e1 ^ e2, FlagA | FlagB); // toggle + let mut e3 = e1; + e3.toggle(e2); + assert_eq!(e3, FlagA | FlagB); + + let mut m4 = AnotherSetOfFlags::empty(); + m4.toggle(AnotherSetOfFlags::empty()); + assert_eq!(m4, AnotherSetOfFlags::empty()); + } + + #[test] + fn test_assignment_operators() { + let mut m1 = Flags::empty(); + let e1 = FlagA | FlagC; + // union + m1 |= FlagA; + assert_eq!(m1, FlagA); + // intersection + m1 &= e1; + assert_eq!(m1, FlagA); + // set difference + m1 -= m1; + assert_eq!(m1, Flags::empty()); + // toggle + m1 ^= e1; + assert_eq!(m1, e1); + } + + #[test] + fn test_extend() { + let mut flags; + + flags = Flags::empty(); + flags.extend([].iter().cloned()); + assert_eq!(flags, Flags::empty()); + + flags = Flags::empty(); + flags.extend([FlagA, FlagB].iter().cloned()); + assert_eq!(flags, FlagA | FlagB); + + flags = FlagA; + flags.extend([FlagA, FlagB].iter().cloned()); + assert_eq!(flags, FlagA | FlagB); + + flags = FlagB; + flags.extend([FlagA, FlagABC].iter().cloned()); + assert_eq!(flags, FlagABC); + } + + #[test] + fn test_from_iterator() { + assert_eq!([].iter().cloned().collect::(), Flags::empty()); + assert_eq!([FlagA, FlagB].iter().cloned().collect::(), FlagA | FlagB); + assert_eq!([FlagA, FlagABC].iter().cloned().collect::(), FlagABC); + } + + #[test] + fn test_lt() { + let mut a = Flags::empty(); + let mut b = Flags::empty(); + + assert!(!(a < b) && !(b < a)); + b = FlagB; + assert!(a < b); + a = FlagC; + assert!(!(a < b) && b < a); + b = FlagC | FlagB; + assert!(a < b); + } + + #[test] + fn test_ord() { + let mut a = Flags::empty(); + let mut b = Flags::empty(); + + assert!(a <= b && a >= b); + a = FlagA; + assert!(a > b && a >= b); + assert!(b < a && b <= a); + b = FlagB; + assert!(b > a && b >= a); + assert!(a < b && a <= b); + } + + fn hash(t: &T) -> u64 { + let mut s = SipHasher::new_with_keys(0, 0); + t.hash(&mut s); + s.finish() + } + + #[test] + fn test_hash() { + let mut x = Flags::empty(); + let mut y = Flags::empty(); + assert_eq!(hash(&x), hash(&y)); + x = Flags::all(); + y = FlagABC; + assert_eq!(hash(&x), hash(&y)); + } + + #[test] + fn test_debug() { + assert_eq!(format!("{:?}", FlagA | FlagB), "FlagA | FlagB"); + assert_eq!(format!("{:?}", FlagABC), "FlagA | FlagB | FlagC | FlagABC"); + } + + mod submodule { + bitflags! { + pub flags PublicFlags: i8 { + const FlagX = 0, + } + } + bitflags! { + flags PrivateFlags: i8 { + const FlagY = 0, + } + } + + #[test] + fn test_private() { + let _ = FlagY; + } + } + + #[test] + fn test_public() { + let _ = submodule::FlagX; + } + + mod t1 { + mod foo { + pub type Bar = i32; + } + + bitflags! { + /// baz + flags Flags: foo::Bar { + const A = 0b00000001, + #[cfg(foo)] + const B = 0b00000010, + #[cfg(foo)] + const C = 0b00000010, + } + } + } +} diff --git a/src/vendor/bitflags-0.7.0/tests/external.rs b/src/vendor/bitflags-0.7.0/tests/external.rs new file mode 100644 index 0000000000..0f0c7f665f --- /dev/null +++ b/src/vendor/bitflags-0.7.0/tests/external.rs @@ -0,0 +1,21 @@ +#![allow(dead_code)] + +#[macro_use] +extern crate bitflags; + +bitflags! { + /// baz + flags Flags: u32 { + const A = 0b00000001, + #[doc = "bar"] + const B = 0b00000010, + const C = 0b00000100, + #[doc = "foo"] + const ABC = A.bits | B.bits | C.bits, + } +} + +#[test] +fn smoke() { + assert_eq!(ABC, A | B | C); +} diff --git a/src/vendor/bitflags-0.7.0/tests/external_no_std.rs b/src/vendor/bitflags-0.7.0/tests/external_no_std.rs new file mode 100644 index 0000000000..46526fd712 --- /dev/null +++ b/src/vendor/bitflags-0.7.0/tests/external_no_std.rs @@ -0,0 +1,22 @@ +#![allow(dead_code)] +#![no_std] + +#[macro_use] +extern crate bitflags; + +bitflags! { + /// baz + flags Flags: u32 { + const A = 0b00000001, + #[doc = "bar"] + const B = 0b00000010, + const C = 0b00000100, + #[doc = "foo"] + const ABC = A.bits | B.bits | C.bits, + } +} + +#[test] +fn smoke() { + assert_eq!(ABC, A | B | C); +} diff --git a/src/vendor/bitflags-0.9.1/.cargo-checksum.json b/src/vendor/bitflags-0.9.1/.cargo-checksum.json new file mode 100644 index 0000000000..b22367f34d --- /dev/null +++ b/src/vendor/bitflags-0.9.1/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"e7a77c1800f9852e4c9a2acb9df041773ecd0bc005bd1b0657ae0512c67100ac","Cargo.toml":"f35826eec96c765ae8aee4f8a66c6b3cb0d918b49935baf05bae79b6df8e1077","Cargo.toml.orig":"46baf2141cf0a39944cd90ff114df4e42570b781e704589da2a6abf4e8ba723f","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"602c63819e332e93c85dc8426db4855f18fe0fabbd642c5b2303ed83f1ba926f","src/example_generated.rs":"161b69d92cf6e5fa4b5dc30f06031f3a0fb590b44be2bcf0f31cb8be4fab36fa","src/lib.rs":"56e86a16356d9322fa6b4e9b910041e2e7558c08b52ffbdacc647eba36b37abc","tests/conflicting_trait_impls.rs":"79993ea67ef09a5f99fddd69d8b73b1c137e41d0e8f8535f03865d6766dcc498","tests/external.rs":"15f7901698e286197666ccd309ad1debd3c35eaff680ca090368494e8b06ccf2","tests/external_no_std.rs":"c3556fd19dd91d1b093eb6a65d09a9d0985544f0377ba3d30c0e265c956f7237","tests/i128_bitflags.rs":"c955ef2c9fd385848195bb416e660e946ccbe59acc87862ef2646eb082d82e3f"},"package":"4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"} \ No newline at end of file diff --git a/src/vendor/bitflags-0.9.1/.cargo-ok b/src/vendor/bitflags-0.9.1/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/bitflags-0.9.1/.gitignore b/src/vendor/bitflags-0.9.1/.gitignore new file mode 100644 index 0000000000..4fffb2f89c --- /dev/null +++ b/src/vendor/bitflags-0.9.1/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/src/vendor/bitflags-0.9.1/.travis.yml b/src/vendor/bitflags-0.9.1/.travis.yml new file mode 100644 index 0000000000..5eb31843a7 --- /dev/null +++ b/src/vendor/bitflags-0.9.1/.travis.yml @@ -0,0 +1,29 @@ +os: + - linux + - osx +language: rust +rust: + - stable + - beta + - nightly +sudo: false +before_script: + - pip install -v 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH + - if [[ -e ~/Library/Python/2.7/bin ]]; then export PATH=~/Library/Python/2.7/bin:$PATH; fi +script: + - cargo build --verbose + - cargo test --verbose + - travis-cargo --only nightly test + - cargo doc --no-deps +after_success: + - travis-cargo --only nightly doc-upload +env: + global: + - TRAVIS_CARGO_NIGHTLY_FEATURE=unstable_testing + - secure: "DoZ8g8iPs+X3xEEucke0Ae02JbkQ1qd1SSv/L2aQqxULmREtRcbzRauhiT+ToQO5Ft1Lul8uck14nPfs4gMr/O3jFFBhEBVpSlbkJx7eNL3kwUdp95UNroA8I43xPN/nccJaHDN6TMTD3+uajTQTje2SyzOQP+1gvdKg17kguvE=" + + + +notifications: + email: + on_success: never diff --git a/src/vendor/bitflags-0.9.1/Cargo.toml b/src/vendor/bitflags-0.9.1/Cargo.toml new file mode 100644 index 0000000000..8ac9adbfc1 --- /dev/null +++ b/src/vendor/bitflags-0.9.1/Cargo.toml @@ -0,0 +1,31 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "bitflags" +version = "0.9.1" +authors = ["The Rust Project Developers"] +description = "A macro to generate structures which behave like bitflags.\n" +homepage = "https://github.com/rust-lang-nursery/bitflags" +documentation = "https://docs.rs/bitflags" +readme = "README.md" +keywords = ["bit", "bitmask", "bitflags"] +categories = ["no-std"] +license = "MIT/Apache-2.0" +repository = "https://github.com/rust-lang-nursery/bitflags" + +[features] +example_generated = [] +unstable_testing = [] +default = ["example_generated"] +[badges.travis-ci] +repository = "rust-lang-nursery/bitflags" diff --git a/src/vendor/bitflags-0.9.1/Cargo.toml.orig b/src/vendor/bitflags-0.9.1/Cargo.toml.orig new file mode 100644 index 0000000000..261bdee2e0 --- /dev/null +++ b/src/vendor/bitflags-0.9.1/Cargo.toml.orig @@ -0,0 +1,26 @@ +[package] + +name = "bitflags" +# NB: When modifying, also modify: +# 1. html_root_url in lib.rs +# 2. number in readme (for breaking changes) +version = "0.9.1" +authors = ["The Rust Project Developers"] +license = "MIT/Apache-2.0" +keywords = ["bit", "bitmask", "bitflags"] +readme = "README.md" +repository = "https://github.com/rust-lang-nursery/bitflags" +homepage = "https://github.com/rust-lang-nursery/bitflags" +documentation = "https://docs.rs/bitflags" +categories = ["no-std"] +description = """ +A macro to generate structures which behave like bitflags. +""" + +[badges] +travis-ci = { repository = "rust-lang-nursery/bitflags" } + +[features] +default = ["example_generated"] +unstable_testing = [] +example_generated = [] diff --git a/src/vendor/unicode-segmentation/LICENSE-APACHE b/src/vendor/bitflags-0.9.1/LICENSE-APACHE similarity index 100% rename from src/vendor/unicode-segmentation/LICENSE-APACHE rename to src/vendor/bitflags-0.9.1/LICENSE-APACHE diff --git a/src/vendor/bitflags-0.9.1/LICENSE-MIT b/src/vendor/bitflags-0.9.1/LICENSE-MIT new file mode 100644 index 0000000000..39d4bdb5ac --- /dev/null +++ b/src/vendor/bitflags-0.9.1/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/bitflags-0.9.1/README.md b/src/vendor/bitflags-0.9.1/README.md new file mode 100644 index 0000000000..714ca9d9e5 --- /dev/null +++ b/src/vendor/bitflags-0.9.1/README.md @@ -0,0 +1,24 @@ +bitflags +======== + +A Rust macro to generate structures which behave like a set of bitflags + +[![Build Status](https://travis-ci.org/rust-lang-nursery/bitflags.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/bitflags) + +[Documentation](https://docs.rs/bitflags) + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +bitflags = "0.9" +``` + +and this to your crate root: + +```rust +#[macro_use] +extern crate bitflags; +``` diff --git a/src/vendor/bitflags-0.9.1/src/example_generated.rs b/src/vendor/bitflags-0.9.1/src/example_generated.rs new file mode 100644 index 0000000000..05b99e8f04 --- /dev/null +++ b/src/vendor/bitflags-0.9.1/src/example_generated.rs @@ -0,0 +1,16 @@ +//! This module shows an example of code generated by the macro. **IT MUST NOT BE USED OUTSIDE THIS +//! CRATE**. + +bitflags! { + /// This is the same `Flags` struct defined in the [crate level example](../index.html#example). + /// Note that this struct is just for documentation purposes only, it must not be used outside + /// this crate. + pub struct Flags: u32 { + const FLAG_A = 0b00000001; + const FLAG_B = 0b00000010; + const FLAG_C = 0b00000100; + const FLAG_ABC = FLAG_A.bits + | FLAG_B.bits + | FLAG_C.bits; + } +} diff --git a/src/vendor/bitflags-0.9.1/src/lib.rs b/src/vendor/bitflags-0.9.1/src/lib.rs new file mode 100644 index 0000000000..5840643a82 --- /dev/null +++ b/src/vendor/bitflags-0.9.1/src/lib.rs @@ -0,0 +1,990 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A typesafe bitmask flag generator useful for sets of C-style bitmask flags. +//! It can be used for creating typesafe wrappers around C APIs. +//! +//! The `bitflags!` macro generates a `struct` that manages a set of flags. The +//! flags should only be defined for integer types, otherwise unexpected type +//! errors may occur at compile time. +//! +//! # Example +//! +//! ``` +//! #[macro_use] +//! extern crate bitflags; +//! +//! bitflags! { +//! struct Flags: u32 { +//! const FLAG_A = 0b00000001; +//! const FLAG_B = 0b00000010; +//! const FLAG_C = 0b00000100; +//! const FLAG_ABC = FLAG_A.bits +//! | FLAG_B.bits +//! | FLAG_C.bits; +//! } +//! } +//! +//! fn main() { +//! let e1 = FLAG_A | FLAG_C; +//! let e2 = FLAG_B | FLAG_C; +//! assert_eq!((e1 | e2), FLAG_ABC); // union +//! assert_eq!((e1 & e2), FLAG_C); // intersection +//! assert_eq!((e1 - e2), FLAG_A); // set difference +//! assert_eq!(!e2, FLAG_A); // set complement +//! } +//! ``` +//! +//! See [`example_generated::Flags`](./example_generated/struct.Flags.html) for documentation of code +//! generated by the above `bitflags!` expansion. +//! +//! The generated `struct`s can also be extended with type and trait +//! implementations: +//! +//! ``` +//! #[macro_use] +//! extern crate bitflags; +//! +//! use std::fmt; +//! +//! bitflags! { +//! struct Flags: u32 { +//! const FLAG_A = 0b00000001; +//! const FLAG_B = 0b00000010; +//! } +//! } +//! +//! impl Flags { +//! pub fn clear(&mut self) { +//! self.bits = 0; // The `bits` field can be accessed from within the +//! // same module where the `bitflags!` macro was invoked. +//! } +//! } +//! +//! impl fmt::Display for Flags { +//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +//! write!(f, "hi!") +//! } +//! } +//! +//! fn main() { +//! let mut flags = FLAG_A | FLAG_B; +//! flags.clear(); +//! assert!(flags.is_empty()); +//! assert_eq!(format!("{}", flags), "hi!"); +//! assert_eq!(format!("{:?}", FLAG_A | FLAG_B), "FLAG_A | FLAG_B"); +//! assert_eq!(format!("{:?}", FLAG_B), "FLAG_B"); +//! } +//! ``` +//! +//! # Visibility +//! +//! The generated struct and its associated flag constants are not exported +//! out of the current module by default. A definition can be exported out of +//! the current module by adding `pub` before `flags`: +//! +//! ```ignore +//! #[macro_use] +//! extern crate bitflags; +//! +//! mod example { +//! bitflags! { +//! pub struct Flags1: u32 { +//! const FLAG_A = 0b00000001; +//! } +//! } +//! bitflags! { +//! struct Flags2: u32 { +//! const FLAG_B = 0b00000010; +//! } +//! } +//! } +//! +//! fn main() { +//! let flag1 = example::FLAG_A; +//! let flag2 = example::FLAG_B; // error: const `FLAG_B` is private +//! } +//! ``` +//! +//! # Attributes +//! +//! Attributes can be attached to the generated `struct` by placing them +//! before the `flags` keyword. +//! +//! # Trait implementations +//! +//! The `Copy`, `Clone`, `PartialEq`, `Eq`, `PartialOrd`, `Ord` and `Hash` +//! traits automatically derived for the `struct` using the `derive` attribute. +//! Additional traits can be derived by providing an explicit `derive` +//! attribute on `flags`. +//! +//! The `Extend` and `FromIterator` traits are implemented for the `struct`, +//! too: `Extend` adds the union of the instances of the `struct` iterated over, +//! while `FromIterator` calculates the union. +//! +//! The `Binary`, `Debug`, `LowerExp`, `Octal` and `UpperExp` trait is also +//! implemented by displaying the bits value of the internal struct. +//! +//! ## Operators +//! +//! The following operator traits are implemented for the generated `struct`: +//! +//! - `BitOr` and `BitOrAssign`: union +//! - `BitAnd` and `BitAndAssign`: intersection +//! - `BitXor` and `BitXorAssign`: toggle +//! - `Sub` and `SubAssign`: set difference +//! - `Not`: set complement +//! +//! # Methods +//! +//! The following methods are defined for the generated `struct`: +//! +//! - `empty`: an empty set of flags +//! - `all`: the set of all flags +//! - `bits`: the raw value of the flags currently stored +//! - `from_bits`: convert from underlying bit representation, unless that +//! representation contains bits that do not correspond to a flag +//! - `from_bits_truncate`: convert from underlying bit representation, dropping +//! any bits that do not correspond to flags +//! - `is_empty`: `true` if no flags are currently stored +//! - `is_all`: `true` if all flags are currently set +//! - `intersects`: `true` if there are flags common to both `self` and `other` +//! - `contains`: `true` all of the flags in `other` are contained within `self` +//! - `insert`: inserts the specified flags in-place +//! - `remove`: removes the specified flags in-place +//! - `toggle`: the specified flags will be inserted if not present, and removed +//! if they are. +//! +//! ## Default +//! +//! The `Default` trait is not automatically implemented for the generated struct. +//! +//! If your default value is equal to `0` (which is the same value as calling `empty()` +//! on the generated struct), you can simply derive `Default`: +//! +//! ``` +//! #[macro_use] +//! extern crate bitflags; +//! +//! bitflags! { +//! // Results in default value with bits: 0 +//! #[derive(Default)] +//! struct Flags: u32 { +//! const FLAG_A = 0b00000001; +//! const FLAG_B = 0b00000010; +//! const FLAG_C = 0b00000100; +//! } +//! } +//! +//! fn main() { +//! let derived_default: Flags = Default::default(); +//! assert_eq!(derived_default.bits(), 0); +//! } +//! ``` +//! +//! If your default value is not equal to `0` you need to implement `Default` yourself: +//! +//! ``` +//! #[macro_use] +//! extern crate bitflags; +//! +//! bitflags! { +//! struct Flags: u32 { +//! const FLAG_A = 0b00000001; +//! const FLAG_B = 0b00000010; +//! const FLAG_C = 0b00000100; +//! } +//! } +//! +//! // explicit `Default` implementation +//! impl Default for Flags { +//! fn default() -> Flags { +//! FLAG_A | FLAG_C +//! } +//! } +//! +//! fn main() { +//! let implemented_default: Flags = Default::default(); +//! assert_eq!(implemented_default, (FLAG_A | FLAG_C)); +//! } +//! ``` + +#![no_std] + +#![doc(html_root_url = "https://docs.rs/bitflags/0.9.1")] +// When compiled for the rustc compiler itself we want to make sure that this is +// an unstable crate. +#![cfg_attr(rustbuild, feature(staged_api))] +#![cfg_attr(rustbuild, unstable(feature = "rustc_private", issue = "27812"))] + +#[cfg(test)] +#[macro_use] +extern crate std; + +// Re-export libstd/libcore using an alias so that the macros can work in no_std +// crates while remaining compatible with normal crates. +#[doc(hidden)] +pub extern crate core as _core; + +/// The macro used to generate the flag structure. +/// +/// See the [crate level docs](../bitflags/index.html) for complete documentation. +/// +/// # Example +/// +/// ``` +/// #[macro_use] +/// extern crate bitflags; +/// +/// bitflags! { +/// struct Flags: u32 { +/// const FLAG_A = 0b00000001; +/// const FLAG_B = 0b00000010; +/// const FLAG_C = 0b00000100; +/// const FLAG_ABC = FLAG_A.bits +/// | FLAG_B.bits +/// | FLAG_C.bits; +/// } +/// } +/// +/// fn main() { +/// let e1 = FLAG_A | FLAG_C; +/// let e2 = FLAG_B | FLAG_C; +/// assert_eq!((e1 | e2), FLAG_ABC); // union +/// assert_eq!((e1 & e2), FLAG_C); // intersection +/// assert_eq!((e1 - e2), FLAG_A); // set difference +/// assert_eq!(!e2, FLAG_A); // set complement +/// } +/// ``` +/// +/// The generated `struct`s can also be extended with type and trait +/// implementations: +/// +/// ``` +/// #[macro_use] +/// extern crate bitflags; +/// +/// use std::fmt; +/// +/// bitflags! { +/// struct Flags: u32 { +/// const FLAG_A = 0b00000001; +/// const FLAG_B = 0b00000010; +/// } +/// } +/// +/// impl Flags { +/// pub fn clear(&mut self) { +/// self.bits = 0; // The `bits` field can be accessed from within the +/// // same module where the `bitflags!` macro was invoked. +/// } +/// } +/// +/// impl fmt::Display for Flags { +/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +/// write!(f, "hi!") +/// } +/// } +/// +/// fn main() { +/// let mut flags = FLAG_A | FLAG_B; +/// flags.clear(); +/// assert!(flags.is_empty()); +/// assert_eq!(format!("{}", flags), "hi!"); +/// assert_eq!(format!("{:?}", FLAG_A | FLAG_B), "FLAG_A | FLAG_B"); +/// assert_eq!(format!("{:?}", FLAG_B), "FLAG_B"); +/// } +/// ``` +#[macro_export] +macro_rules! bitflags { + ($(#[$attr:meta])* pub struct $BitFlags:ident: $T:ty { + $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr;)+ + }) => { + #[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] + $(#[$attr])* + pub struct $BitFlags { + bits: $T, + } + + $($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { bits: $value };)+ + + __impl_bitflags! { + struct $BitFlags: $T { + $($(#[$Flag_attr])* const $Flag = $value;)+ + } + } + }; + ($(#[$attr:meta])* struct $BitFlags:ident: $T:ty { + $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr;)+ + }) => { + #[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] + $(#[$attr])* + struct $BitFlags { + bits: $T, + } + + $($(#[$Flag_attr])* const $Flag: $BitFlags = $BitFlags { bits: $value };)+ + + __impl_bitflags! { + struct $BitFlags: $T { + $($(#[$Flag_attr])* const $Flag = $value;)+ + } + } + + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! __impl_bitflags { + (struct $BitFlags:ident: $T:ty { + $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr;)+ + }) => { + impl $crate::_core::fmt::Debug for $BitFlags { + fn fmt(&self, f: &mut $crate::_core::fmt::Formatter) -> $crate::_core::fmt::Result { + // This convoluted approach is to handle #[cfg]-based flag + // omission correctly. For example it needs to support: + // + // #[cfg(unix)] const A: Flag = /* ... */; + // #[cfg(windows)] const B: Flag = /* ... */; + + // Unconditionally define a check for every flag, even disabled + // ones. + #[allow(non_snake_case)] + trait __BitFlags { + $( + fn $Flag(&self) -> bool { false } + )+ + } + + // Conditionally override the check for just those flags that + // are not #[cfg]ed away. + impl __BitFlags for $BitFlags { + $( + $(#[$Flag_attr])* + fn $Flag(&self) -> bool { + self.bits & $Flag.bits == $Flag.bits + } + )+ + } + + let mut first = true; + $( + if <$BitFlags as __BitFlags>::$Flag(self) { + if !first { + try!(f.write_str(" | ")); + } + first = false; + try!(f.write_str(stringify!($Flag))); + } + )+ + if first { + try!(f.write_str("(empty)")); + } + Ok(()) + } + } + impl $crate::_core::fmt::Binary for $BitFlags { + fn fmt(&self, f: &mut $crate::_core::fmt::Formatter) -> $crate::_core::fmt::Result { + $crate::_core::fmt::Binary::fmt(&self.bits, f) + } + } + impl $crate::_core::fmt::Octal for $BitFlags { + fn fmt(&self, f: &mut $crate::_core::fmt::Formatter) -> $crate::_core::fmt::Result { + $crate::_core::fmt::Octal::fmt(&self.bits, f) + } + } + impl $crate::_core::fmt::LowerHex for $BitFlags { + fn fmt(&self, f: &mut $crate::_core::fmt::Formatter) -> $crate::_core::fmt::Result { + $crate::_core::fmt::LowerHex::fmt(&self.bits, f) + } + } + impl $crate::_core::fmt::UpperHex for $BitFlags { + fn fmt(&self, f: &mut $crate::_core::fmt::Formatter) -> $crate::_core::fmt::Result { + $crate::_core::fmt::UpperHex::fmt(&self.bits, f) + } + } + + #[allow(dead_code)] + impl $BitFlags { + /// Returns an empty set of flags. + #[inline] + pub fn empty() -> $BitFlags { + $BitFlags { bits: 0 } + } + + /// Returns the set containing all flags. + #[inline] + pub fn all() -> $BitFlags { + // See `Debug::fmt` for why this approach is taken. + #[allow(non_snake_case)] + trait __BitFlags { + $( + fn $Flag() -> $T { 0 } + )+ + } + impl __BitFlags for $BitFlags { + $( + $(#[$Flag_attr])* + fn $Flag() -> $T { $Flag.bits } + )+ + } + $BitFlags { bits: $(<$BitFlags as __BitFlags>::$Flag())|+ } + } + + /// Returns the raw value of the flags currently stored. + #[inline] + pub fn bits(&self) -> $T { + self.bits + } + + /// Convert from underlying bit representation, unless that + /// representation contains bits that do not correspond to a flag. + #[inline] + pub fn from_bits(bits: $T) -> $crate::_core::option::Option<$BitFlags> { + if (bits & !$BitFlags::all().bits()) == 0 { + $crate::_core::option::Option::Some($BitFlags { bits: bits }) + } else { + $crate::_core::option::Option::None + } + } + + /// Convert from underlying bit representation, dropping any bits + /// that do not correspond to flags. + #[inline] + pub fn from_bits_truncate(bits: $T) -> $BitFlags { + $BitFlags { bits: bits } & $BitFlags::all() + } + + /// Returns `true` if no flags are currently stored. + #[inline] + pub fn is_empty(&self) -> bool { + *self == $BitFlags::empty() + } + + /// Returns `true` if all flags are currently set. + #[inline] + pub fn is_all(&self) -> bool { + *self == $BitFlags::all() + } + + /// Returns `true` if there are flags common to both `self` and `other`. + #[inline] + pub fn intersects(&self, other: $BitFlags) -> bool { + !(*self & other).is_empty() + } + + /// Returns `true` all of the flags in `other` are contained within `self`. + #[inline] + pub fn contains(&self, other: $BitFlags) -> bool { + (*self & other) == other + } + + /// Inserts the specified flags in-place. + #[inline] + pub fn insert(&mut self, other: $BitFlags) { + self.bits |= other.bits; + } + + /// Removes the specified flags in-place. + #[inline] + pub fn remove(&mut self, other: $BitFlags) { + self.bits &= !other.bits; + } + + /// Toggles the specified flags in-place. + #[inline] + pub fn toggle(&mut self, other: $BitFlags) { + self.bits ^= other.bits; + } + + /// Inserts or removes the specified flags depending on the passed value. + #[inline] + pub fn set(&mut self, other: $BitFlags, value: bool) { + if value { + self.insert(other); + } else { + self.remove(other); + } + } + } + + impl $crate::_core::ops::BitOr for $BitFlags { + type Output = $BitFlags; + + /// Returns the union of the two sets of flags. + #[inline] + fn bitor(self, other: $BitFlags) -> $BitFlags { + $BitFlags { bits: self.bits | other.bits } + } + } + + impl $crate::_core::ops::BitOrAssign for $BitFlags { + + /// Adds the set of flags. + #[inline] + fn bitor_assign(&mut self, other: $BitFlags) { + self.bits |= other.bits; + } + } + + impl $crate::_core::ops::BitXor for $BitFlags { + type Output = $BitFlags; + + /// Returns the left flags, but with all the right flags toggled. + #[inline] + fn bitxor(self, other: $BitFlags) -> $BitFlags { + $BitFlags { bits: self.bits ^ other.bits } + } + } + + impl $crate::_core::ops::BitXorAssign for $BitFlags { + + /// Toggles the set of flags. + #[inline] + fn bitxor_assign(&mut self, other: $BitFlags) { + self.bits ^= other.bits; + } + } + + impl $crate::_core::ops::BitAnd for $BitFlags { + type Output = $BitFlags; + + /// Returns the intersection between the two sets of flags. + #[inline] + fn bitand(self, other: $BitFlags) -> $BitFlags { + $BitFlags { bits: self.bits & other.bits } + } + } + + impl $crate::_core::ops::BitAndAssign for $BitFlags { + + /// Disables all flags disabled in the set. + #[inline] + fn bitand_assign(&mut self, other: $BitFlags) { + self.bits &= other.bits; + } + } + + impl $crate::_core::ops::Sub for $BitFlags { + type Output = $BitFlags; + + /// Returns the set difference of the two sets of flags. + #[inline] + fn sub(self, other: $BitFlags) -> $BitFlags { + $BitFlags { bits: self.bits & !other.bits } + } + } + + impl $crate::_core::ops::SubAssign for $BitFlags { + + /// Disables all flags enabled in the set. + #[inline] + fn sub_assign(&mut self, other: $BitFlags) { + self.bits &= !other.bits; + } + } + + impl $crate::_core::ops::Not for $BitFlags { + type Output = $BitFlags; + + /// Returns the complement of this set of flags. + #[inline] + fn not(self) -> $BitFlags { + $BitFlags { bits: !self.bits } & $BitFlags::all() + } + } + + impl $crate::_core::iter::Extend<$BitFlags> for $BitFlags { + fn extend>(&mut self, iterator: T) { + for item in iterator { + self.insert(item) + } + } + } + + impl $crate::_core::iter::FromIterator<$BitFlags> for $BitFlags { + fn from_iter>(iterator: T) -> $BitFlags { + let mut result = Self::empty(); + result.extend(iterator); + result + } + } + }; +} + +#[cfg(feature = "example_generated")] +pub mod example_generated; + +#[cfg(test)] +#[allow(non_upper_case_globals, dead_code)] +mod tests { + use std::hash::{Hash, Hasher}; + use std::collections::hash_map::DefaultHasher; + + bitflags! { + #[doc = "> The first principle is that you must not fool yourself — and"] + #[doc = "> you are the easiest person to fool."] + #[doc = "> "] + #[doc = "> - Richard Feynman"] + struct Flags: u32 { + const FlagA = 0b00000001; + #[doc = " macros are way better at generating code than trans is"] + const FlagB = 0b00000010; + const FlagC = 0b00000100; + #[doc = "* cmr bed"] + #[doc = "* strcat table"] + #[doc = " wait what?"] + const FlagABC = FlagA.bits + | FlagB.bits + | FlagC.bits; + } + } + + bitflags! { + struct _CfgFlags: u32 { + #[cfg(windows)] + const _CfgA = 0b01; + #[cfg(unix)] + const _CfgB = 0b01; + #[cfg(windows)] + const _CfgC = _CfgA.bits | 0b10; + } + } + + bitflags! { + struct AnotherSetOfFlags: i8 { + const AnotherFlag = -1_i8; + } + } + + bitflags! { + struct LongFlags: u32 { + const LongFlagA = 0b1111111111111111; + } + } + + #[test] + fn test_bits(){ + assert_eq!(Flags::empty().bits(), 0b00000000); + assert_eq!(FlagA.bits(), 0b00000001); + assert_eq!(FlagABC.bits(), 0b00000111); + + assert_eq!(AnotherSetOfFlags::empty().bits(), 0b00); + assert_eq!(AnotherFlag.bits(), !0_i8); + } + + #[test] + fn test_from_bits() { + assert_eq!(Flags::from_bits(0), Some(Flags::empty())); + assert_eq!(Flags::from_bits(0b1), Some(FlagA)); + assert_eq!(Flags::from_bits(0b10), Some(FlagB)); + assert_eq!(Flags::from_bits(0b11), Some(FlagA | FlagB)); + assert_eq!(Flags::from_bits(0b1000), None); + + assert_eq!(AnotherSetOfFlags::from_bits(!0_i8), Some(AnotherFlag)); + } + + #[test] + fn test_from_bits_truncate() { + assert_eq!(Flags::from_bits_truncate(0), Flags::empty()); + assert_eq!(Flags::from_bits_truncate(0b1), FlagA); + assert_eq!(Flags::from_bits_truncate(0b10), FlagB); + assert_eq!(Flags::from_bits_truncate(0b11), (FlagA | FlagB)); + assert_eq!(Flags::from_bits_truncate(0b1000), Flags::empty()); + assert_eq!(Flags::from_bits_truncate(0b1001), FlagA); + + assert_eq!(AnotherSetOfFlags::from_bits_truncate(0_i8), AnotherSetOfFlags::empty()); + } + + #[test] + fn test_is_empty(){ + assert!(Flags::empty().is_empty()); + assert!(!FlagA.is_empty()); + assert!(!FlagABC.is_empty()); + + assert!(!AnotherFlag.is_empty()); + } + + #[test] + fn test_is_all() { + assert!(Flags::all().is_all()); + assert!(!FlagA.is_all()); + assert!(FlagABC.is_all()); + + assert!(AnotherFlag.is_all()); + } + + #[test] + fn test_two_empties_do_not_intersect() { + let e1 = Flags::empty(); + let e2 = Flags::empty(); + assert!(!e1.intersects(e2)); + + assert!(AnotherFlag.intersects(AnotherFlag)); + } + + #[test] + fn test_empty_does_not_intersect_with_full() { + let e1 = Flags::empty(); + let e2 = FlagABC; + assert!(!e1.intersects(e2)); + } + + #[test] + fn test_disjoint_intersects() { + let e1 = FlagA; + let e2 = FlagB; + assert!(!e1.intersects(e2)); + } + + #[test] + fn test_overlapping_intersects() { + let e1 = FlagA; + let e2 = FlagA | FlagB; + assert!(e1.intersects(e2)); + } + + #[test] + fn test_contains() { + let e1 = FlagA; + let e2 = FlagA | FlagB; + assert!(!e1.contains(e2)); + assert!(e2.contains(e1)); + assert!(FlagABC.contains(e2)); + + assert!(AnotherFlag.contains(AnotherFlag)); + } + + #[test] + fn test_insert(){ + let mut e1 = FlagA; + let e2 = FlagA | FlagB; + e1.insert(e2); + assert_eq!(e1, e2); + + let mut e3 = AnotherSetOfFlags::empty(); + e3.insert(AnotherFlag); + assert_eq!(e3, AnotherFlag); + } + + #[test] + fn test_remove(){ + let mut e1 = FlagA | FlagB; + let e2 = FlagA | FlagC; + e1.remove(e2); + assert_eq!(e1, FlagB); + + let mut e3 = AnotherFlag; + e3.remove(AnotherFlag); + assert_eq!(e3, AnotherSetOfFlags::empty()); + } + + #[test] + fn test_operators() { + let e1 = FlagA | FlagC; + let e2 = FlagB | FlagC; + assert_eq!((e1 | e2), FlagABC); // union + assert_eq!((e1 & e2), FlagC); // intersection + assert_eq!((e1 - e2), FlagA); // set difference + assert_eq!(!e2, FlagA); // set complement + assert_eq!(e1 ^ e2, FlagA | FlagB); // toggle + let mut e3 = e1; + e3.toggle(e2); + assert_eq!(e3, FlagA | FlagB); + + let mut m4 = AnotherSetOfFlags::empty(); + m4.toggle(AnotherSetOfFlags::empty()); + assert_eq!(m4, AnotherSetOfFlags::empty()); + } + + #[test] + fn test_set() { + let mut e1 = FlagA | FlagC; + e1.set(FlagB, true); + e1.set(FlagC, false); + + assert_eq!(e1, FlagA | FlagB); + } + + #[test] + fn test_assignment_operators() { + let mut m1 = Flags::empty(); + let e1 = FlagA | FlagC; + // union + m1 |= FlagA; + assert_eq!(m1, FlagA); + // intersection + m1 &= e1; + assert_eq!(m1, FlagA); + // set difference + m1 -= m1; + assert_eq!(m1, Flags::empty()); + // toggle + m1 ^= e1; + assert_eq!(m1, e1); + } + + #[test] + fn test_extend() { + let mut flags; + + flags = Flags::empty(); + flags.extend([].iter().cloned()); + assert_eq!(flags, Flags::empty()); + + flags = Flags::empty(); + flags.extend([FlagA, FlagB].iter().cloned()); + assert_eq!(flags, FlagA | FlagB); + + flags = FlagA; + flags.extend([FlagA, FlagB].iter().cloned()); + assert_eq!(flags, FlagA | FlagB); + + flags = FlagB; + flags.extend([FlagA, FlagABC].iter().cloned()); + assert_eq!(flags, FlagABC); + } + + #[test] + fn test_from_iterator() { + assert_eq!([].iter().cloned().collect::(), Flags::empty()); + assert_eq!([FlagA, FlagB].iter().cloned().collect::(), FlagA | FlagB); + assert_eq!([FlagA, FlagABC].iter().cloned().collect::(), FlagABC); + } + + #[test] + fn test_lt() { + let mut a = Flags::empty(); + let mut b = Flags::empty(); + + assert!(!(a < b) && !(b < a)); + b = FlagB; + assert!(a < b); + a = FlagC; + assert!(!(a < b) && b < a); + b = FlagC | FlagB; + assert!(a < b); + } + + #[test] + fn test_ord() { + let mut a = Flags::empty(); + let mut b = Flags::empty(); + + assert!(a <= b && a >= b); + a = FlagA; + assert!(a > b && a >= b); + assert!(b < a && b <= a); + b = FlagB; + assert!(b > a && b >= a); + assert!(a < b && a <= b); + } + + fn hash(t: &T) -> u64 { + let mut s = DefaultHasher::new(); + t.hash(&mut s); + s.finish() + } + + #[test] + fn test_hash() { + let mut x = Flags::empty(); + let mut y = Flags::empty(); + assert_eq!(hash(&x), hash(&y)); + x = Flags::all(); + y = FlagABC; + assert_eq!(hash(&x), hash(&y)); + } + + #[test] + fn test_debug() { + assert_eq!(format!("{:?}", FlagA | FlagB), "FlagA | FlagB"); + assert_eq!(format!("{:?}", Flags::empty()), "(empty)"); + assert_eq!(format!("{:?}", FlagABC), "FlagA | FlagB | FlagC | FlagABC"); + } + + #[test] + fn test_binary() { + assert_eq!(format!("{:b}", FlagABC), "111"); + assert_eq!(format!("{:#b}", FlagABC), "0b111"); + } + + #[test] + fn test_octal() { + assert_eq!(format!("{:o}", LongFlagA), "177777"); + assert_eq!(format!("{:#o}", LongFlagA), "0o177777"); + } + + #[test] + fn test_lowerhex() { + assert_eq!(format!("{:x}", LongFlagA), "ffff"); + assert_eq!(format!("{:#x}", LongFlagA), "0xffff"); + } + + #[test] + fn test_upperhex() { + assert_eq!(format!("{:X}", LongFlagA), "FFFF"); + assert_eq!(format!("{:#X}", LongFlagA), "0xFFFF"); + } + + mod submodule { + bitflags! { + pub struct PublicFlags: i8 { + const FlagX = 0; + } + } + bitflags! { + struct PrivateFlags: i8 { + const FlagY = 0; + } + } + + #[test] + fn test_private() { + let _ = FlagY; + } + } + + #[test] + fn test_public() { + let _ = submodule::FlagX; + } + + mod t1 { + mod foo { + pub type Bar = i32; + } + + bitflags! { + /// baz + struct Flags: foo::Bar { + const A = 0b00000001; + #[cfg(foo)] + const B = 0b00000010; + #[cfg(foo)] + const C = 0b00000010; + } + } + } + + #[test] + fn test_in_function() { + bitflags! { + struct Flags: u8 { + const A = 1; + #[cfg(any())] // false + const B = 2; + } + } + assert_eq!(Flags::all(), A); + assert_eq!(format!("{:?}", A), "A"); + } +} diff --git a/src/vendor/bitflags-0.9.1/tests/conflicting_trait_impls.rs b/src/vendor/bitflags-0.9.1/tests/conflicting_trait_impls.rs new file mode 100644 index 0000000000..4704dfaa3d --- /dev/null +++ b/src/vendor/bitflags-0.9.1/tests/conflicting_trait_impls.rs @@ -0,0 +1,20 @@ +#![allow(dead_code)] +#![no_std] + +#[macro_use] +extern crate bitflags; + +#[allow(unused_imports)] +use core::fmt::Display; + +bitflags! { + /// baz + struct Flags: u32 { + const A = 0b00000001; + } +} + +#[test] +fn main() { + +} diff --git a/src/vendor/bitflags-0.9.1/tests/external.rs b/src/vendor/bitflags-0.9.1/tests/external.rs new file mode 100644 index 0000000000..fc1c346709 --- /dev/null +++ b/src/vendor/bitflags-0.9.1/tests/external.rs @@ -0,0 +1,21 @@ +#![allow(dead_code)] + +#[macro_use] +extern crate bitflags; + +bitflags! { + /// baz + struct Flags: u32 { + const A = 0b00000001; + #[doc = "bar"] + const B = 0b00000010; + const C = 0b00000100; + #[doc = "foo"] + const ABC = A.bits | B.bits | C.bits; + } +} + +#[test] +fn smoke() { + assert_eq!(ABC, A | B | C); +} diff --git a/src/vendor/bitflags-0.9.1/tests/external_no_std.rs b/src/vendor/bitflags-0.9.1/tests/external_no_std.rs new file mode 100644 index 0000000000..8b8c7067f6 --- /dev/null +++ b/src/vendor/bitflags-0.9.1/tests/external_no_std.rs @@ -0,0 +1,22 @@ +#![allow(dead_code)] +#![no_std] + +#[macro_use] +extern crate bitflags; + +bitflags! { + /// baz + struct Flags: u32 { + const A = 0b00000001; + #[doc = "bar"] + const B = 0b00000010; + const C = 0b00000100; + #[doc = "foo"] + const ABC = A.bits | B.bits | C.bits; + } +} + +#[test] +fn smoke() { + assert_eq!(ABC, A | B | C); +} diff --git a/src/vendor/bitflags-0.9.1/tests/i128_bitflags.rs b/src/vendor/bitflags-0.9.1/tests/i128_bitflags.rs new file mode 100644 index 0000000000..acbb927821 --- /dev/null +++ b/src/vendor/bitflags-0.9.1/tests/i128_bitflags.rs @@ -0,0 +1,30 @@ +#![cfg(feature = "unstable_testing")] + +#![allow(dead_code, unused_imports)] +#![feature(i128_type)] + +#[macro_use] +extern crate bitflags; + +bitflags! { + /// baz + struct Flags128: u128 { + const A = 0x0000_0000_0000_0000_0000_0000_0000_0001; + const B = 0x0000_0000_0000_1000_0000_0000_0000_0000; + const C = 0x8000_0000_0000_0000_0000_0000_0000_0000; + const ABC = A.bits | B.bits | C.bits; + } +} + +#[test] +fn test_i128_bitflags() { + assert_eq!(ABC, A | B | C); + assert_eq!(A.bits, 0x0000_0000_0000_0000_0000_0000_0000_0001); + assert_eq!(B.bits, 0x0000_0000_0000_1000_0000_0000_0000_0000); + assert_eq!(C.bits, 0x8000_0000_0000_0000_0000_0000_0000_0000); + assert_eq!(ABC.bits, 0x8000_0000_0000_1000_0000_0000_0000_0001); + assert_eq!(format!("{:?}", A), "A"); + assert_eq!(format!("{:?}", B), "B"); + assert_eq!(format!("{:?}", C), "C"); + assert_eq!(format!("{:?}", ABC), "A | B | C | ABC"); +} diff --git a/src/vendor/bitflags/.cargo-checksum.json b/src/vendor/bitflags/.cargo-checksum.json index b22367f34d..7005745705 100644 --- a/src/vendor/bitflags/.cargo-checksum.json +++ b/src/vendor/bitflags/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"e7a77c1800f9852e4c9a2acb9df041773ecd0bc005bd1b0657ae0512c67100ac","Cargo.toml":"f35826eec96c765ae8aee4f8a66c6b3cb0d918b49935baf05bae79b6df8e1077","Cargo.toml.orig":"46baf2141cf0a39944cd90ff114df4e42570b781e704589da2a6abf4e8ba723f","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"602c63819e332e93c85dc8426db4855f18fe0fabbd642c5b2303ed83f1ba926f","src/example_generated.rs":"161b69d92cf6e5fa4b5dc30f06031f3a0fb590b44be2bcf0f31cb8be4fab36fa","src/lib.rs":"56e86a16356d9322fa6b4e9b910041e2e7558c08b52ffbdacc647eba36b37abc","tests/conflicting_trait_impls.rs":"79993ea67ef09a5f99fddd69d8b73b1c137e41d0e8f8535f03865d6766dcc498","tests/external.rs":"15f7901698e286197666ccd309ad1debd3c35eaff680ca090368494e8b06ccf2","tests/external_no_std.rs":"c3556fd19dd91d1b093eb6a65d09a9d0985544f0377ba3d30c0e265c956f7237","tests/i128_bitflags.rs":"c955ef2c9fd385848195bb416e660e946ccbe59acc87862ef2646eb082d82e3f"},"package":"4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"d0da6f63d93a5cfb6536e3be8714239386a79fbfdfe3e6ec92a78c8ddde62010","Cargo.toml":"5a87b6a8e0411a560659a73163f9a222b4b6fa46c6d3ef7bb4dc1e5152807b4a","Cargo.toml.orig":"6bb4fe17d992c94b7993182bf7368c7a92812b6b68ce3fe04b2f004d5d58aeb5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"aedcbbbefde11d7d8df61645aa93324695e7035d847e934a92e0250920287072","src/example_generated.rs":"8e8f188edaaefb8cc75064bd9e6dfdb8d776132ad838cff377a1acf08216186f","src/lib.rs":"d66cedd360642fcaf89a84408ea6b336f9d89948800e0a8149465cc3a022a621","tests/conflicting_trait_impls.rs":"a2a2e28dee83ead220bbc76653ae203242d9024e05aa47217049c91dd66e3b91","tests/external.rs":"1ec3fb8681056383397f34a9ae87c3d24f031715f88602cab19fa3c820e021b0","tests/external_no_std.rs":"75e0e2d6257b67d4ab9d975a9851f0a874ca7d2cdb0940df6212eafbb0fc2d88","tests/i128_bitflags.rs":"e55fbd7529839bffb8e3a8bf00516b7aca9f4a51cfda5177da17aece8328ad55"},"package":"f5cde24d1b2e2216a726368b2363a273739c91f4e3eb4e0dd12d672d396ad989"} \ No newline at end of file diff --git a/src/vendor/bitflags/.travis.yml b/src/vendor/bitflags/.travis.yml index 5eb31843a7..60dc19e9a5 100644 --- a/src/vendor/bitflags/.travis.yml +++ b/src/vendor/bitflags/.travis.yml @@ -3,6 +3,8 @@ os: - osx language: rust rust: + # This version is tested to avoid unintentional bumping of the minimum supported Rust version + - 1.20.0 - stable - beta - nightly diff --git a/src/vendor/bitflags/Cargo.toml b/src/vendor/bitflags/Cargo.toml index 8ac9adbfc1..214adb511b 100644 --- a/src/vendor/bitflags/Cargo.toml +++ b/src/vendor/bitflags/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "bitflags" -version = "0.9.1" +version = "1.0.0" authors = ["The Rust Project Developers"] description = "A macro to generate structures which behave like bitflags.\n" homepage = "https://github.com/rust-lang-nursery/bitflags" @@ -24,8 +24,8 @@ license = "MIT/Apache-2.0" repository = "https://github.com/rust-lang-nursery/bitflags" [features] -example_generated = [] unstable_testing = [] default = ["example_generated"] +example_generated = [] [badges.travis-ci] repository = "rust-lang-nursery/bitflags" diff --git a/src/vendor/bitflags/Cargo.toml.orig b/src/vendor/bitflags/Cargo.toml.orig index 261bdee2e0..5470c98efc 100644 --- a/src/vendor/bitflags/Cargo.toml.orig +++ b/src/vendor/bitflags/Cargo.toml.orig @@ -4,7 +4,7 @@ name = "bitflags" # NB: When modifying, also modify: # 1. html_root_url in lib.rs # 2. number in readme (for breaking changes) -version = "0.9.1" +version = "1.0.0" authors = ["The Rust Project Developers"] license = "MIT/Apache-2.0" keywords = ["bit", "bitmask", "bitflags"] diff --git a/src/vendor/bitflags/README.md b/src/vendor/bitflags/README.md index 714ca9d9e5..8deb465d28 100644 --- a/src/vendor/bitflags/README.md +++ b/src/vendor/bitflags/README.md @@ -5,7 +5,8 @@ A Rust macro to generate structures which behave like a set of bitflags [![Build Status](https://travis-ci.org/rust-lang-nursery/bitflags.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/bitflags) -[Documentation](https://docs.rs/bitflags) +- [Documentation](https://docs.rs/bitflags) +- [Release notes](https://github.com/rust-lang-nursery/bitflags/releases) ## Usage @@ -13,7 +14,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -bitflags = "0.9" +bitflags = "1.0" ``` and this to your crate root: @@ -22,3 +23,9 @@ and this to your crate root: #[macro_use] extern crate bitflags; ``` + +## Rust Version Support + +The minimum supported* Rust version is 1.20 due to use of associated constants. + +_* As of the current master branch (unreleased)_ diff --git a/src/vendor/bitflags/src/example_generated.rs b/src/vendor/bitflags/src/example_generated.rs index 05b99e8f04..ee5b59b105 100644 --- a/src/vendor/bitflags/src/example_generated.rs +++ b/src/vendor/bitflags/src/example_generated.rs @@ -9,8 +9,8 @@ bitflags! { const FLAG_A = 0b00000001; const FLAG_B = 0b00000010; const FLAG_C = 0b00000100; - const FLAG_ABC = FLAG_A.bits - | FLAG_B.bits - | FLAG_C.bits; + const FLAG_ABC = Self::FLAG_A.bits + | Self::FLAG_B.bits + | Self::FLAG_C.bits; } } diff --git a/src/vendor/bitflags/src/lib.rs b/src/vendor/bitflags/src/lib.rs index 5840643a82..156558221b 100644 --- a/src/vendor/bitflags/src/lib.rs +++ b/src/vendor/bitflags/src/lib.rs @@ -26,19 +26,19 @@ //! const FLAG_A = 0b00000001; //! const FLAG_B = 0b00000010; //! const FLAG_C = 0b00000100; -//! const FLAG_ABC = FLAG_A.bits -//! | FLAG_B.bits -//! | FLAG_C.bits; +//! const FLAG_ABC = Self::FLAG_A.bits +//! | Self::FLAG_B.bits +//! | Self::FLAG_C.bits; //! } //! } //! //! fn main() { -//! let e1 = FLAG_A | FLAG_C; -//! let e2 = FLAG_B | FLAG_C; -//! assert_eq!((e1 | e2), FLAG_ABC); // union -//! assert_eq!((e1 & e2), FLAG_C); // intersection -//! assert_eq!((e1 - e2), FLAG_A); // set difference -//! assert_eq!(!e2, FLAG_A); // set complement +//! let e1 = Flags::FLAG_A | Flags::FLAG_C; +//! let e2 = Flags::FLAG_B | Flags::FLAG_C; +//! assert_eq!((e1 | e2), Flags::FLAG_ABC); // union +//! assert_eq!((e1 & e2), Flags::FLAG_C); // intersection +//! assert_eq!((e1 - e2), Flags::FLAG_A); // set difference +//! assert_eq!(!e2, Flags::FLAG_A); // set complement //! } //! ``` //! @@ -75,12 +75,12 @@ //! } //! //! fn main() { -//! let mut flags = FLAG_A | FLAG_B; +//! let mut flags = Flags::FLAG_A | Flags::FLAG_B; //! flags.clear(); //! assert!(flags.is_empty()); //! assert_eq!(format!("{}", flags), "hi!"); -//! assert_eq!(format!("{:?}", FLAG_A | FLAG_B), "FLAG_A | FLAG_B"); -//! assert_eq!(format!("{:?}", FLAG_B), "FLAG_B"); +//! assert_eq!(format!("{:?}", Flags::FLAG_A | Flags::FLAG_B), "FLAG_A | FLAG_B"); +//! assert_eq!(format!("{:?}", Flags::FLAG_B), "FLAG_B"); //! } //! ``` //! @@ -108,8 +108,8 @@ //! } //! //! fn main() { -//! let flag1 = example::FLAG_A; -//! let flag2 = example::FLAG_B; // error: const `FLAG_B` is private +//! let flag1 = example::Flags1::FLAG_A; +//! let flag2 = example::Flags2::FLAG_B; // error: const `FLAG_B` is private //! } //! ``` //! @@ -161,6 +161,7 @@ //! - `remove`: removes the specified flags in-place //! - `toggle`: the specified flags will be inserted if not present, and removed //! if they are. +//! - `set`: inserts or removes the specified flags depending on the passed value //! //! ## Default //! @@ -206,19 +207,19 @@ //! // explicit `Default` implementation //! impl Default for Flags { //! fn default() -> Flags { -//! FLAG_A | FLAG_C +//! Flags::FLAG_A | Flags::FLAG_C //! } //! } //! //! fn main() { //! let implemented_default: Flags = Default::default(); -//! assert_eq!(implemented_default, (FLAG_A | FLAG_C)); +//! assert_eq!(implemented_default, (Flags::FLAG_A | Flags::FLAG_C)); //! } //! ``` #![no_std] -#![doc(html_root_url = "https://docs.rs/bitflags/0.9.1")] +#![doc(html_root_url = "https://docs.rs/bitflags/1.0.0")] // When compiled for the rustc compiler itself we want to make sure that this is // an unstable crate. #![cfg_attr(rustbuild, feature(staged_api))] @@ -248,19 +249,19 @@ pub extern crate core as _core; /// const FLAG_A = 0b00000001; /// const FLAG_B = 0b00000010; /// const FLAG_C = 0b00000100; -/// const FLAG_ABC = FLAG_A.bits -/// | FLAG_B.bits -/// | FLAG_C.bits; +/// const FLAG_ABC = Self::FLAG_A.bits +/// | Self::FLAG_B.bits +/// | Self::FLAG_C.bits; /// } /// } /// /// fn main() { -/// let e1 = FLAG_A | FLAG_C; -/// let e2 = FLAG_B | FLAG_C; -/// assert_eq!((e1 | e2), FLAG_ABC); // union -/// assert_eq!((e1 & e2), FLAG_C); // intersection -/// assert_eq!((e1 - e2), FLAG_A); // set difference -/// assert_eq!(!e2, FLAG_A); // set complement +/// let e1 = Flags::FLAG_A | Flags::FLAG_C; +/// let e2 = Flags::FLAG_B | Flags::FLAG_C; +/// assert_eq!((e1 | e2), Flags::FLAG_ABC); // union +/// assert_eq!((e1 & e2), Flags::FLAG_C); // intersection +/// assert_eq!((e1 - e2), Flags::FLAG_A); // set difference +/// assert_eq!(!e2, Flags::FLAG_A); // set complement /// } /// ``` /// @@ -294,59 +295,77 @@ pub extern crate core as _core; /// } /// /// fn main() { -/// let mut flags = FLAG_A | FLAG_B; +/// let mut flags = Flags::FLAG_A | Flags::FLAG_B; /// flags.clear(); /// assert!(flags.is_empty()); /// assert_eq!(format!("{}", flags), "hi!"); -/// assert_eq!(format!("{:?}", FLAG_A | FLAG_B), "FLAG_A | FLAG_B"); -/// assert_eq!(format!("{:?}", FLAG_B), "FLAG_B"); +/// assert_eq!(format!("{:?}", Flags::FLAG_A | Flags::FLAG_B), "FLAG_A | FLAG_B"); +/// assert_eq!(format!("{:?}", Flags::FLAG_B), "FLAG_B"); /// } /// ``` #[macro_export] macro_rules! bitflags { - ($(#[$attr:meta])* pub struct $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr;)+ - }) => { + ( + $(#[$outer:meta])* + pub struct $BitFlags:ident: $T:ty { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:ident = $value:expr; + )+ + } + ) => { #[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] - $(#[$attr])* + $(#[$outer])* pub struct $BitFlags { bits: $T, } - $($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { bits: $value };)+ - __impl_bitflags! { struct $BitFlags: $T { - $($(#[$Flag_attr])* const $Flag = $value;)+ + $( + $(#[$inner $($args)*])* + const $Flag = $value; + )+ } } }; - ($(#[$attr:meta])* struct $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr;)+ - }) => { + ( + $(#[$outer:meta])* + struct $BitFlags:ident: $T:ty { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:ident = $value:expr; + )+ + } + ) => { #[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] - $(#[$attr])* + $(#[$outer])* struct $BitFlags { bits: $T, } - $($(#[$Flag_attr])* const $Flag: $BitFlags = $BitFlags { bits: $value };)+ - __impl_bitflags! { struct $BitFlags: $T { - $($(#[$Flag_attr])* const $Flag = $value;)+ + $( + $(#[$inner $($args)*])* + const $Flag = $value; + )+ } } - }; } #[macro_export] #[doc(hidden)] macro_rules! __impl_bitflags { - (struct $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr;)+ - }) => { + ( + struct $BitFlags:ident: $T:ty { + $( + $(#[$attr:ident $($args:tt)*])* + const $Flag:ident = $value:expr; + )+ + } + ) => { impl $crate::_core::fmt::Debug for $BitFlags { fn fmt(&self, f: &mut $crate::_core::fmt::Formatter) -> $crate::_core::fmt::Result { // This convoluted approach is to handle #[cfg]-based flag @@ -368,9 +387,12 @@ macro_rules! __impl_bitflags { // are not #[cfg]ed away. impl __BitFlags for $BitFlags { $( - $(#[$Flag_attr])* - fn $Flag(&self) -> bool { - self.bits & $Flag.bits == $Flag.bits + __impl_bitflags! { + #[allow(deprecated)] + $(? #[$attr $($args)*])* + fn $Flag(&self) -> bool { + self.bits & Self::$Flag.bits == Self::$Flag.bits + } } )+ } @@ -414,6 +436,11 @@ macro_rules! __impl_bitflags { #[allow(dead_code)] impl $BitFlags { + $( + $(#[$attr $($args)*])* + pub const $Flag: $BitFlags = $BitFlags { bits: $value }; + )+ + /// Returns an empty set of flags. #[inline] pub fn empty() -> $BitFlags { @@ -432,8 +459,11 @@ macro_rules! __impl_bitflags { } impl __BitFlags for $BitFlags { $( - $(#[$Flag_attr])* - fn $Flag() -> $T { $Flag.bits } + __impl_bitflags! { + #[allow(deprecated)] + $(? #[$attr $($args)*])* + fn $Flag() -> $T { Self::$Flag.bits } + } )+ } $BitFlags { bits: $(<$BitFlags as __BitFlags>::$Flag())|+ } @@ -618,13 +648,65 @@ macro_rules! __impl_bitflags { } } }; + + // Every attribute that the user writes on a const is applied to the + // corresponding const that we generate, but within the implementation of + // Debug and all() we want to ignore everything but #[cfg] attributes. In + // particular, including a #[deprecated] attribute on those items would fail + // to compile. + // https://github.com/rust-lang-nursery/bitflags/issues/109 + // + // Input: + // + // ? #[cfg(feature = "advanced")] + // ? #[deprecated(note = "Use somthing else.")] + // ? #[doc = r"High quality documentation."] + // fn f() -> i32 { /* ... */ } + // + // Output: + // + // #[cfg(feature = "advanced")] + // fn f() -> i32 { /* ... */ } + ( + $(#[$filtered:meta])* + ? #[cfg $($cfgargs:tt)*] + $(? #[$rest:ident $($restargs:tt)*])* + fn $($item:tt)* + ) => { + __impl_bitflags! { + $(#[$filtered])* + #[cfg $($cfgargs)*] + $(? #[$rest $($restargs)*])* + fn $($item)* + } + }; + ( + $(#[$filtered:meta])* + // $next != `cfg` + ? #[$next:ident $($nextargs:tt)*] + $(? #[$rest:ident $($restargs:tt)*])* + fn $($item:tt)* + ) => { + __impl_bitflags! { + $(#[$filtered])* + // $next filtered out + $(? #[$rest $($restargs)*])* + fn $($item)* + } + }; + ( + $(#[$filtered:meta])* + fn $($item:tt)* + ) => { + $(#[$filtered])* + fn $($item)* + }; } #[cfg(feature = "example_generated")] pub mod example_generated; #[cfg(test)] -#[allow(non_upper_case_globals, dead_code)] mod tests { use std::hash::{Hash, Hasher}; use std::collections::hash_map::DefaultHasher; @@ -635,71 +717,71 @@ mod tests { #[doc = "> "] #[doc = "> - Richard Feynman"] struct Flags: u32 { - const FlagA = 0b00000001; + const FLAG_A = 0b00000001; #[doc = " macros are way better at generating code than trans is"] - const FlagB = 0b00000010; - const FlagC = 0b00000100; + const FLAG_B = 0b00000010; + const FLAG_C = 0b00000100; #[doc = "* cmr bed"] #[doc = "* strcat table"] #[doc = " wait what?"] - const FlagABC = FlagA.bits - | FlagB.bits - | FlagC.bits; + const FLAG_ABC = Self::FLAG_A.bits + | Self::FLAG_B.bits + | Self::FLAG_C.bits; } } bitflags! { struct _CfgFlags: u32 { #[cfg(windows)] - const _CfgA = 0b01; + const _CFG_A = 0b01; #[cfg(unix)] - const _CfgB = 0b01; + const _CFG_B = 0b01; #[cfg(windows)] - const _CfgC = _CfgA.bits | 0b10; + const _CFG_C = _CFG_A.bits | 0b10; } } bitflags! { struct AnotherSetOfFlags: i8 { - const AnotherFlag = -1_i8; + const ANOTHER_FLAG = -1_i8; } } bitflags! { struct LongFlags: u32 { - const LongFlagA = 0b1111111111111111; + const LONG_FLAG_A = 0b1111111111111111; } } #[test] fn test_bits(){ assert_eq!(Flags::empty().bits(), 0b00000000); - assert_eq!(FlagA.bits(), 0b00000001); - assert_eq!(FlagABC.bits(), 0b00000111); + assert_eq!(Flags::FLAG_A.bits(), 0b00000001); + assert_eq!(Flags::FLAG_ABC.bits(), 0b00000111); assert_eq!(AnotherSetOfFlags::empty().bits(), 0b00); - assert_eq!(AnotherFlag.bits(), !0_i8); + assert_eq!(AnotherSetOfFlags::ANOTHER_FLAG.bits(), !0_i8); } #[test] fn test_from_bits() { assert_eq!(Flags::from_bits(0), Some(Flags::empty())); - assert_eq!(Flags::from_bits(0b1), Some(FlagA)); - assert_eq!(Flags::from_bits(0b10), Some(FlagB)); - assert_eq!(Flags::from_bits(0b11), Some(FlagA | FlagB)); + assert_eq!(Flags::from_bits(0b1), Some(Flags::FLAG_A)); + assert_eq!(Flags::from_bits(0b10), Some(Flags::FLAG_B)); + assert_eq!(Flags::from_bits(0b11), Some(Flags::FLAG_A | Flags::FLAG_B)); assert_eq!(Flags::from_bits(0b1000), None); - assert_eq!(AnotherSetOfFlags::from_bits(!0_i8), Some(AnotherFlag)); + assert_eq!(AnotherSetOfFlags::from_bits(!0_i8), Some(AnotherSetOfFlags::ANOTHER_FLAG)); } #[test] fn test_from_bits_truncate() { assert_eq!(Flags::from_bits_truncate(0), Flags::empty()); - assert_eq!(Flags::from_bits_truncate(0b1), FlagA); - assert_eq!(Flags::from_bits_truncate(0b10), FlagB); - assert_eq!(Flags::from_bits_truncate(0b11), (FlagA | FlagB)); + assert_eq!(Flags::from_bits_truncate(0b1), Flags::FLAG_A); + assert_eq!(Flags::from_bits_truncate(0b10), Flags::FLAG_B); + assert_eq!(Flags::from_bits_truncate(0b11), (Flags::FLAG_A | Flags::FLAG_B)); assert_eq!(Flags::from_bits_truncate(0b1000), Flags::empty()); - assert_eq!(Flags::from_bits_truncate(0b1001), FlagA); + assert_eq!(Flags::from_bits_truncate(0b1001), Flags::FLAG_A); assert_eq!(AnotherSetOfFlags::from_bits_truncate(0_i8), AnotherSetOfFlags::empty()); } @@ -707,19 +789,19 @@ mod tests { #[test] fn test_is_empty(){ assert!(Flags::empty().is_empty()); - assert!(!FlagA.is_empty()); - assert!(!FlagABC.is_empty()); + assert!(!Flags::FLAG_A.is_empty()); + assert!(!Flags::FLAG_ABC.is_empty()); - assert!(!AnotherFlag.is_empty()); + assert!(!AnotherSetOfFlags::ANOTHER_FLAG.is_empty()); } #[test] fn test_is_all() { assert!(Flags::all().is_all()); - assert!(!FlagA.is_all()); - assert!(FlagABC.is_all()); + assert!(!Flags::FLAG_A.is_all()); + assert!(Flags::FLAG_ABC.is_all()); - assert!(AnotherFlag.is_all()); + assert!(AnotherSetOfFlags::ANOTHER_FLAG.is_all()); } #[test] @@ -728,77 +810,77 @@ mod tests { let e2 = Flags::empty(); assert!(!e1.intersects(e2)); - assert!(AnotherFlag.intersects(AnotherFlag)); + assert!(AnotherSetOfFlags::ANOTHER_FLAG.intersects(AnotherSetOfFlags::ANOTHER_FLAG)); } #[test] fn test_empty_does_not_intersect_with_full() { let e1 = Flags::empty(); - let e2 = FlagABC; + let e2 = Flags::FLAG_ABC; assert!(!e1.intersects(e2)); } #[test] fn test_disjoint_intersects() { - let e1 = FlagA; - let e2 = FlagB; + let e1 = Flags::FLAG_A; + let e2 = Flags::FLAG_B; assert!(!e1.intersects(e2)); } #[test] fn test_overlapping_intersects() { - let e1 = FlagA; - let e2 = FlagA | FlagB; + let e1 = Flags::FLAG_A; + let e2 = Flags::FLAG_A | Flags::FLAG_B; assert!(e1.intersects(e2)); } #[test] fn test_contains() { - let e1 = FlagA; - let e2 = FlagA | FlagB; + let e1 = Flags::FLAG_A; + let e2 = Flags::FLAG_A | Flags::FLAG_B; assert!(!e1.contains(e2)); assert!(e2.contains(e1)); - assert!(FlagABC.contains(e2)); + assert!(Flags::FLAG_ABC.contains(e2)); - assert!(AnotherFlag.contains(AnotherFlag)); + assert!(AnotherSetOfFlags::ANOTHER_FLAG.contains(AnotherSetOfFlags::ANOTHER_FLAG)); } #[test] fn test_insert(){ - let mut e1 = FlagA; - let e2 = FlagA | FlagB; + let mut e1 = Flags::FLAG_A; + let e2 = Flags::FLAG_A | Flags::FLAG_B; e1.insert(e2); assert_eq!(e1, e2); let mut e3 = AnotherSetOfFlags::empty(); - e3.insert(AnotherFlag); - assert_eq!(e3, AnotherFlag); + e3.insert(AnotherSetOfFlags::ANOTHER_FLAG); + assert_eq!(e3, AnotherSetOfFlags::ANOTHER_FLAG); } #[test] fn test_remove(){ - let mut e1 = FlagA | FlagB; - let e2 = FlagA | FlagC; + let mut e1 = Flags::FLAG_A | Flags::FLAG_B; + let e2 = Flags::FLAG_A | Flags::FLAG_C; e1.remove(e2); - assert_eq!(e1, FlagB); + assert_eq!(e1, Flags::FLAG_B); - let mut e3 = AnotherFlag; - e3.remove(AnotherFlag); + let mut e3 = AnotherSetOfFlags::ANOTHER_FLAG; + e3.remove(AnotherSetOfFlags::ANOTHER_FLAG); assert_eq!(e3, AnotherSetOfFlags::empty()); } #[test] fn test_operators() { - let e1 = FlagA | FlagC; - let e2 = FlagB | FlagC; - assert_eq!((e1 | e2), FlagABC); // union - assert_eq!((e1 & e2), FlagC); // intersection - assert_eq!((e1 - e2), FlagA); // set difference - assert_eq!(!e2, FlagA); // set complement - assert_eq!(e1 ^ e2, FlagA | FlagB); // toggle + let e1 = Flags::FLAG_A | Flags::FLAG_C; + let e2 = Flags::FLAG_B | Flags::FLAG_C; + assert_eq!((e1 | e2), Flags::FLAG_ABC); // union + assert_eq!((e1 & e2), Flags::FLAG_C); // intersection + assert_eq!((e1 - e2), Flags::FLAG_A); // set difference + assert_eq!(!e2, Flags::FLAG_A); // set complement + assert_eq!(e1 ^ e2, Flags::FLAG_A | Flags::FLAG_B); // toggle let mut e3 = e1; e3.toggle(e2); - assert_eq!(e3, FlagA | FlagB); + assert_eq!(e3, Flags::FLAG_A | Flags::FLAG_B); let mut m4 = AnotherSetOfFlags::empty(); m4.toggle(AnotherSetOfFlags::empty()); @@ -807,23 +889,23 @@ mod tests { #[test] fn test_set() { - let mut e1 = FlagA | FlagC; - e1.set(FlagB, true); - e1.set(FlagC, false); + let mut e1 = Flags::FLAG_A | Flags::FLAG_C; + e1.set(Flags::FLAG_B, true); + e1.set(Flags::FLAG_C, false); - assert_eq!(e1, FlagA | FlagB); + assert_eq!(e1, Flags::FLAG_A | Flags::FLAG_B); } #[test] fn test_assignment_operators() { let mut m1 = Flags::empty(); - let e1 = FlagA | FlagC; + let e1 = Flags::FLAG_A | Flags::FLAG_C; // union - m1 |= FlagA; - assert_eq!(m1, FlagA); + m1 |= Flags::FLAG_A; + assert_eq!(m1, Flags::FLAG_A); // intersection m1 &= e1; - assert_eq!(m1, FlagA); + assert_eq!(m1, Flags::FLAG_A); // set difference m1 -= m1; assert_eq!(m1, Flags::empty()); @@ -841,23 +923,25 @@ mod tests { assert_eq!(flags, Flags::empty()); flags = Flags::empty(); - flags.extend([FlagA, FlagB].iter().cloned()); - assert_eq!(flags, FlagA | FlagB); + flags.extend([Flags::FLAG_A, Flags::FLAG_B].iter().cloned()); + assert_eq!(flags, Flags::FLAG_A | Flags::FLAG_B); - flags = FlagA; - flags.extend([FlagA, FlagB].iter().cloned()); - assert_eq!(flags, FlagA | FlagB); + flags = Flags::FLAG_A; + flags.extend([Flags::FLAG_A, Flags::FLAG_B].iter().cloned()); + assert_eq!(flags, Flags::FLAG_A | Flags::FLAG_B); - flags = FlagB; - flags.extend([FlagA, FlagABC].iter().cloned()); - assert_eq!(flags, FlagABC); + flags = Flags::FLAG_B; + flags.extend([Flags::FLAG_A, Flags::FLAG_ABC].iter().cloned()); + assert_eq!(flags, Flags::FLAG_ABC); } #[test] fn test_from_iterator() { assert_eq!([].iter().cloned().collect::(), Flags::empty()); - assert_eq!([FlagA, FlagB].iter().cloned().collect::(), FlagA | FlagB); - assert_eq!([FlagA, FlagABC].iter().cloned().collect::(), FlagABC); + assert_eq!([Flags::FLAG_A, Flags::FLAG_B].iter().cloned().collect::(), + Flags::FLAG_A | Flags::FLAG_B); + assert_eq!([Flags::FLAG_A, Flags::FLAG_ABC].iter().cloned().collect::(), + Flags::FLAG_ABC); } #[test] @@ -866,11 +950,11 @@ mod tests { let mut b = Flags::empty(); assert!(!(a < b) && !(b < a)); - b = FlagB; + b = Flags::FLAG_B; assert!(a < b); - a = FlagC; + a = Flags::FLAG_C; assert!(!(a < b) && b < a); - b = FlagC | FlagB; + b = Flags::FLAG_C | Flags::FLAG_B; assert!(a < b); } @@ -880,10 +964,10 @@ mod tests { let mut b = Flags::empty(); assert!(a <= b && a >= b); - a = FlagA; + a = Flags::FLAG_A; assert!(a > b && a >= b); assert!(b < a && b <= a); - b = FlagB; + b = Flags::FLAG_B; assert!(b > a && b >= a); assert!(a < b && a <= b); } @@ -900,62 +984,63 @@ mod tests { let mut y = Flags::empty(); assert_eq!(hash(&x), hash(&y)); x = Flags::all(); - y = FlagABC; + y = Flags::FLAG_ABC; assert_eq!(hash(&x), hash(&y)); } #[test] fn test_debug() { - assert_eq!(format!("{:?}", FlagA | FlagB), "FlagA | FlagB"); + assert_eq!(format!("{:?}", Flags::FLAG_A | Flags::FLAG_B), "FLAG_A | FLAG_B"); assert_eq!(format!("{:?}", Flags::empty()), "(empty)"); - assert_eq!(format!("{:?}", FlagABC), "FlagA | FlagB | FlagC | FlagABC"); + assert_eq!(format!("{:?}", Flags::FLAG_ABC), "FLAG_A | FLAG_B | FLAG_C | FLAG_ABC"); } #[test] fn test_binary() { - assert_eq!(format!("{:b}", FlagABC), "111"); - assert_eq!(format!("{:#b}", FlagABC), "0b111"); + assert_eq!(format!("{:b}", Flags::FLAG_ABC), "111"); + assert_eq!(format!("{:#b}", Flags::FLAG_ABC), "0b111"); } #[test] fn test_octal() { - assert_eq!(format!("{:o}", LongFlagA), "177777"); - assert_eq!(format!("{:#o}", LongFlagA), "0o177777"); + assert_eq!(format!("{:o}", LongFlags::LONG_FLAG_A), "177777"); + assert_eq!(format!("{:#o}", LongFlags::LONG_FLAG_A), "0o177777"); } #[test] fn test_lowerhex() { - assert_eq!(format!("{:x}", LongFlagA), "ffff"); - assert_eq!(format!("{:#x}", LongFlagA), "0xffff"); + assert_eq!(format!("{:x}", LongFlags::LONG_FLAG_A), "ffff"); + assert_eq!(format!("{:#x}", LongFlags::LONG_FLAG_A), "0xffff"); } #[test] fn test_upperhex() { - assert_eq!(format!("{:X}", LongFlagA), "FFFF"); - assert_eq!(format!("{:#X}", LongFlagA), "0xFFFF"); + assert_eq!(format!("{:X}", LongFlags::LONG_FLAG_A), "FFFF"); + assert_eq!(format!("{:#X}", LongFlags::LONG_FLAG_A), "0xFFFF"); } mod submodule { bitflags! { pub struct PublicFlags: i8 { - const FlagX = 0; + const FLAG_X = 0; } } bitflags! { struct PrivateFlags: i8 { - const FlagY = 0; + const FLAG_Y = 0; } } #[test] fn test_private() { - let _ = FlagY; + + let _ = PrivateFlags::FLAG_Y; } } #[test] fn test_public() { - let _ = submodule::FlagX; + let _ = submodule::PublicFlags::FLAG_X; } mod t1 { @@ -984,7 +1069,17 @@ mod tests { const B = 2; } } - assert_eq!(Flags::all(), A); - assert_eq!(format!("{:?}", A), "A"); + assert_eq!(Flags::all(), Flags::A); + assert_eq!(format!("{:?}", Flags::A), "A"); + } + + #[test] + fn test_deprecated() { + bitflags! { + pub struct TestFlags: u32 { + #[deprecated(note = "Use something else.")] + const FLAG_ONE = 1; + } + } } } diff --git a/src/vendor/bitflags/tests/conflicting_trait_impls.rs b/src/vendor/bitflags/tests/conflicting_trait_impls.rs index 4704dfaa3d..933e31c985 100644 --- a/src/vendor/bitflags/tests/conflicting_trait_impls.rs +++ b/src/vendor/bitflags/tests/conflicting_trait_impls.rs @@ -1,4 +1,3 @@ -#![allow(dead_code)] #![no_std] #[macro_use] diff --git a/src/vendor/bitflags/tests/external.rs b/src/vendor/bitflags/tests/external.rs index fc1c346709..b393778292 100644 --- a/src/vendor/bitflags/tests/external.rs +++ b/src/vendor/bitflags/tests/external.rs @@ -1,5 +1,3 @@ -#![allow(dead_code)] - #[macro_use] extern crate bitflags; @@ -11,11 +9,11 @@ bitflags! { const B = 0b00000010; const C = 0b00000100; #[doc = "foo"] - const ABC = A.bits | B.bits | C.bits; + const ABC = Flags::A.bits | Flags::B.bits | Flags::C.bits; } } #[test] fn smoke() { - assert_eq!(ABC, A | B | C); + assert_eq!(Flags::ABC, Flags::A | Flags::B | Flags::C); } diff --git a/src/vendor/bitflags/tests/external_no_std.rs b/src/vendor/bitflags/tests/external_no_std.rs index 8b8c7067f6..7c5c9e0d9b 100644 --- a/src/vendor/bitflags/tests/external_no_std.rs +++ b/src/vendor/bitflags/tests/external_no_std.rs @@ -1,4 +1,3 @@ -#![allow(dead_code)] #![no_std] #[macro_use] @@ -12,11 +11,11 @@ bitflags! { const B = 0b00000010; const C = 0b00000100; #[doc = "foo"] - const ABC = A.bits | B.bits | C.bits; + const ABC = Flags::A.bits | Flags::B.bits | Flags::C.bits; } } #[test] fn smoke() { - assert_eq!(ABC, A | B | C); + assert_eq!(Flags::ABC, Flags::A | Flags::B | Flags::C); } diff --git a/src/vendor/bitflags/tests/i128_bitflags.rs b/src/vendor/bitflags/tests/i128_bitflags.rs index acbb927821..47bd06bfea 100644 --- a/src/vendor/bitflags/tests/i128_bitflags.rs +++ b/src/vendor/bitflags/tests/i128_bitflags.rs @@ -1,6 +1,5 @@ #![cfg(feature = "unstable_testing")] -#![allow(dead_code, unused_imports)] #![feature(i128_type)] #[macro_use] @@ -12,19 +11,19 @@ bitflags! { const A = 0x0000_0000_0000_0000_0000_0000_0000_0001; const B = 0x0000_0000_0000_1000_0000_0000_0000_0000; const C = 0x8000_0000_0000_0000_0000_0000_0000_0000; - const ABC = A.bits | B.bits | C.bits; + const ABC = Self::A.bits | Self::B.bits | Self::C.bits; } } #[test] fn test_i128_bitflags() { - assert_eq!(ABC, A | B | C); - assert_eq!(A.bits, 0x0000_0000_0000_0000_0000_0000_0000_0001); - assert_eq!(B.bits, 0x0000_0000_0000_1000_0000_0000_0000_0000); - assert_eq!(C.bits, 0x8000_0000_0000_0000_0000_0000_0000_0000); - assert_eq!(ABC.bits, 0x8000_0000_0000_1000_0000_0000_0000_0001); - assert_eq!(format!("{:?}", A), "A"); - assert_eq!(format!("{:?}", B), "B"); - assert_eq!(format!("{:?}", C), "C"); - assert_eq!(format!("{:?}", ABC), "A | B | C | ABC"); + assert_eq!(Flags128::ABC, Flags128::A | Flags128::B | Flags128::C); + assert_eq!(Flags128::A.bits, 0x0000_0000_0000_0000_0000_0000_0000_0001); + assert_eq!(Flags128::B.bits, 0x0000_0000_0000_1000_0000_0000_0000_0000); + assert_eq!(Flags128::C.bits, 0x8000_0000_0000_0000_0000_0000_0000_0000); + assert_eq!(Flags128::ABC.bits, 0x8000_0000_0000_1000_0000_0000_0000_0001); + assert_eq!(format!("{:?}", Flags128::A), "A"); + assert_eq!(format!("{:?}", Flags128::B), "B"); + assert_eq!(format!("{:?}", Flags128::C), "C"); + assert_eq!(format!("{:?}", Flags128::ABC), "A | B | C | ABC"); } diff --git a/src/vendor/cc/.cargo-checksum.json b/src/vendor/cc/.cargo-checksum.json new file mode 100644 index 0000000000..60259edcb7 --- /dev/null +++ b/src/vendor/cc/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"4cc6445feac7e9a1f8f1e1c51cc3afd0cf7bb931e3c5a6f18c41258401652702",".travis.yml":"276069d13bb9d23f12fc0f8250fe43903a524b3694397f874eb586c34528aa86","Cargo.toml":"091db7e45f0ccec9c99ef6431dbbdd6613e6b50caffc7e1cd5946031286e1d43","Cargo.toml.orig":"2f4b35205c832e73fbece68f69afeef357e85ec8f9f9534062b090d0fd1b3124","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"2ce6a7ea74afb3f593ec31cdc773e51d09624043c1814ceb48a238876e56db88","appveyor.yml":"ef04ec11eba1462746866e02e0f5336cc50b337f61884b6d2547253ddfd0b957","src/bin/gcc-shim.rs":"d6be9137cb48b86891e7b263adbf492e1193ffe682db9ba4a88eb1079b874b58","src/com.rs":"0cb06f5db0fb70d27db0e5917ca337de6e7032119e6aabfea1bad9c719f5f34b","src/lib.rs":"5f254224f983fa9a5bae1c26bc94df6c434ba8a3ed0eb1deb8361dc0c6db3a41","src/registry.rs":"3876ef9573e3bbc050aef41a684b9a510cc1a91b15ae874fe032cf4377b4d116","src/setup_config.rs":"1a3eeb11c6847c31f2a4685b62ab35c76f0b6d5a17f7ed99e9df164283a771f7","src/winapi.rs":"cb5e6cab3eb570b0f97c660ca448ccfb5024262c0c7b245c181daad91a79f211","src/windows_registry.rs":"6de548aa94215e449f0e58e9a3b1702939d7c2f7b63a9040901c948bf138201d","tests/cc_env.rs":"61a2d5671fab7ffd4c9d749297d5f1db548a35bf488313eeac2eaa353d7d973b","tests/support/mod.rs":"7a375dc966001bd25456e88b02d985819b23b4aab6ce8ac2ba4802c388ed97a8","tests/test.rs":"31edefc38c3ec463d753855db730349e98e507c9388805fff6c0d4d22546a229"},"package":"7db2f146208d7e0fbee761b09cd65a7f51ccc38705d4e7262dad4d73b12a76b1"} \ No newline at end of file diff --git a/src/vendor/cc/.cargo-ok b/src/vendor/cc/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/gcc/.gitignore b/src/vendor/cc/.gitignore similarity index 100% rename from src/vendor/gcc/.gitignore rename to src/vendor/cc/.gitignore diff --git a/src/vendor/gcc/.travis.yml b/src/vendor/cc/.travis.yml similarity index 98% rename from src/vendor/gcc/.travis.yml rename to src/vendor/cc/.travis.yml index ff57da5d37..0f0a094ecf 100644 --- a/src/vendor/gcc/.travis.yml +++ b/src/vendor/cc/.travis.yml @@ -6,7 +6,7 @@ rust: matrix: include: # Minimum version supported - - rust: 1.6.0 + - rust: 1.13.0 install: script: cargo build diff --git a/src/vendor/cc/Cargo.toml b/src/vendor/cc/Cargo.toml new file mode 100644 index 0000000000..90e52bec9b --- /dev/null +++ b/src/vendor/cc/Cargo.toml @@ -0,0 +1,36 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "cc" +version = "1.0.0" +authors = ["Alex Crichton "] +description = "A build-time dependency for Cargo build scripts to assist in invoking the native\nC compiler to compile native C code into a static archive to be linked into Rust\ncode.\n" +documentation = "https://docs.rs/cc" +readme = "README.md" +keywords = ["build-dependencies"] +categories = ["development-tools"] +license = "MIT/Apache-2.0" +repository = "https://github.com/alexcrichton/cc-rs" +[dependencies.rayon] +version = "0.8" +optional = true +[dev-dependencies.tempdir] +version = "0.3" + +[features] +parallel = ["rayon"] +[badges.travis-ci] +repository = "alexcrichton/gcc-rs" + +[badges.appveyor] +repository = "alexcrichton/gcc-rs" diff --git a/src/vendor/gcc/Cargo.toml b/src/vendor/cc/Cargo.toml.orig similarity index 69% rename from src/vendor/gcc/Cargo.toml rename to src/vendor/cc/Cargo.toml.orig index 1ae2de816c..d1391ad0e8 100644 --- a/src/vendor/gcc/Cargo.toml +++ b/src/vendor/cc/Cargo.toml.orig @@ -1,24 +1,26 @@ [package] -name = "gcc" -version = "0.3.51" +name = "cc" +version = "1.0.0" authors = ["Alex Crichton "] license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/gcc-rs" -documentation = "https://docs.rs/gcc" +repository = "https://github.com/alexcrichton/cc-rs" +documentation = "https://docs.rs/cc" description = """ A build-time dependency for Cargo build scripts to assist in invoking the native C compiler to compile native C code into a static archive to be linked into Rust code. """ keywords = ["build-dependencies"] +readme = "README.md" +categories = ["development-tools"] [badges] travis-ci = { repository = "alexcrichton/gcc-rs" } appveyor = { repository = "alexcrichton/gcc-rs" } [dependencies] -rayon = { version = "0.7", optional = true } +rayon = { version = "0.8", optional = true } [features] parallel = ["rayon"] diff --git a/src/vendor/cc/LICENSE-APACHE b/src/vendor/cc/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/src/vendor/cc/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/vendor/gcc/LICENSE-MIT b/src/vendor/cc/LICENSE-MIT similarity index 100% rename from src/vendor/gcc/LICENSE-MIT rename to src/vendor/cc/LICENSE-MIT diff --git a/src/vendor/gcc/README.md b/src/vendor/cc/README.md similarity index 72% rename from src/vendor/gcc/README.md rename to src/vendor/cc/README.md index 2d3e5ed738..edb0d402c6 100644 --- a/src/vendor/gcc/README.md +++ b/src/vendor/cc/README.md @@ -1,30 +1,24 @@ -# gcc-rs +# cc-rs -A library to compile C/C++ code into a Rust library/application. +A library to compile C/C++/assembly into a Rust library/application. -[![Build Status](https://travis-ci.org/alexcrichton/gcc-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/gcc-rs) -[![Build status](https://ci.appveyor.com/api/projects/status/onu270iw98h81nwv?svg=true)](https://ci.appveyor.com/project/alexcrichton/gcc-rs) +[![Build Status](https://travis-ci.org/alexcrichton/cc-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/cc-rs) +[![Build status](https://ci.appveyor.com/api/projects/status/onu270iw98h81nwv?svg=true)](https://ci.appveyor.com/project/alexcrichton/cc-rs) -[Documentation](https://docs.rs/gcc) +[Documentation](https://docs.rs/cc) A simple library meant to be used as a build dependency with Cargo packages in -order to build a set of C/C++ files into a static archive. Note that while this -crate is called "gcc", it actually calls out to the most relevant compile for -a platform, for example using `cl` on MSVC. That is, this crate does indeed work -on MSVC! +order to build a set of C/C++ files into a static archive. This crate calls out +to the most relevant compiler for a platform, for example using `cl` on MSVC. -## Using gcc-rs +## Using cc-rs First, you'll want to both add a build script for your crate (`build.rs`) and also add this crate to your `Cargo.toml` via: ```toml -[package] -# ... -build = "build.rs" - [build-dependencies] -gcc = "0.3" +cc = "1.0" ``` Next up, you'll want to write a build script like so: @@ -32,16 +26,20 @@ Next up, you'll want to write a build script like so: ```rust,no_run // build.rs -extern crate gcc; +extern crate cc; fn main() { - gcc::compile_library("libfoo.a", &["foo.c", "bar.c"]); + cc::Build::new() + .file("foo.c") + .file("bar.c") + .compile("foo"); } ``` And that's it! Running `cargo build` should take care of the rest and your Rust -application will now have the C files `foo.c` and `bar.c` compiled into it. You -can call the functions in Rust by declaring functions in your Rust code like so: +application will now have the C files `foo.c` and `bar.c` compiled into a file +named libfoo.a. You can call the functions in Rust by declaring functions in +your Rust code like so: ``` extern { @@ -66,7 +64,7 @@ fn main() { To control the programs and flags used for building, the builder can set a number of different environment variables. -* `CFLAGS` - a series of space separated flags passed to "gcc". Note that +* `CFLAGS` - a series of space separated flags passed to compilers. Note that individual flags cannot currently contain spaces, so doing something like: "-L=foo\ bar" is not possible. * `CC` - the actual C compiler used. Note that this is used as an exact @@ -85,9 +83,9 @@ in the following prioritized order: 3. `_` - for example, `HOST_CC` or `TARGET_CFLAGS` 4. `` - a plain `CC`, `AR` as above. -If none of these variables exist, gcc-rs uses built-in defaults +If none of these variables exist, cc-rs uses built-in defaults -In addition to the the above optional environment variables, `gcc-rs` has some +In addition to the the above optional environment variables, `cc-rs` has some functions with hard requirements on some variables supplied by [cargo's build-script driver][cargo] that it has the `TARGET`, `OUT_DIR`, `OPT_LEVEL`, and `HOST` variables. @@ -96,16 +94,16 @@ and `HOST` variables. ## Optional features -Currently gcc-rs supports parallel compilation (think `make -jN`) but this -feature is turned off by default. To enable gcc-rs to compile C/C++ in parallel, +Currently cc-rs supports parallel compilation (think `make -jN`) but this +feature is turned off by default. To enable cc-rs to compile C/C++ in parallel, you can change your dependency to: ```toml [build-dependencies] -gcc = { version = "0.3", features = ["parallel"] } +cc = { version = "1.0", features = ["parallel"] } ``` -By default gcc-rs will limit parallelism to `$NUM_JOBS`, or if not present it +By default cc-rs will limit parallelism to `$NUM_JOBS`, or if not present it will limit it to the number of cpus on the machine. If you are using cargo, use `-jN` option of `build`, `test` and `run` commands as `$NUM_JOBS` is supplied by cargo. @@ -117,13 +115,13 @@ is being run. This crate does not ship a C compiler with it. The compiler required varies per platform, but there are three broad categories: * Unix platforms require `cc` to be the C compiler. This can be found by - installing gcc/clang on Linux distributions and Xcode on OSX, for example. + installing cc/clang on Linux distributions and Xcode on OSX, for example. * Windows platforms targeting MSVC (e.g. your target triple ends in `-msvc`) require `cl.exe` to be available and in `PATH`. This is typically found in standard Visual Studio installations and the `PATH` can be set up by running the appropriate developer tools shell. * Windows platforms targeting MinGW (e.g. your target triple ends in `-gnu`) - require `gcc` to be available in `PATH`. We recommend the + require `cc` to be available in `PATH`. We recommend the [MinGW-w64](http://mingw-w64.org) distribution, which is using the [Win-builds](http://win-builds.org) installation system. You may also acquire it via @@ -136,14 +134,14 @@ required varies per platform, but there are three broad categories: ## C++ support -`gcc-rs` supports C++ libraries compilation by using the `cpp` method on -`Config`: +`cc-rs` supports C++ libraries compilation by using the `cpp` method on +`Build`: ```rust,no_run -extern crate gcc; +extern crate cc; fn main() { - gcc::Config::new() + cc::Build::new() .cpp(true) // Switch to C++ library compilation. .file("foo.cpp") .compile("libfoo.a"); @@ -156,7 +154,7 @@ linked to the crate target. ## License -`gcc-rs` is primarily distributed under the terms of both the MIT license and +`cc-rs` is primarily distributed under the terms of both the MIT license and the Apache License (Version 2.0), with portions covered by various BSD-like licenses. diff --git a/src/vendor/gcc/appveyor.yml b/src/vendor/cc/appveyor.yml similarity index 56% rename from src/vendor/gcc/appveyor.yml rename to src/vendor/cc/appveyor.yml index f6108c6651..aa1edb5f4c 100644 --- a/src/vendor/gcc/appveyor.yml +++ b/src/vendor/cc/appveyor.yml @@ -1,4 +1,24 @@ environment: + + # At the time this was added AppVeyor was having troubles with checking + # revocation of SSL certificates of sites like static.rust-lang.org and what + # we think is crates.io. The libcurl HTTP client by default checks for + # revocation on Windows and according to a mailing list [1] this can be + # disabled. + # + # The `CARGO_HTTP_CHECK_REVOKE` env var here tells cargo to disable SSL + # revocation checking on Windows in libcurl. Note, though, that rustup, which + # we're using to download Rust here, also uses libcurl as the default backend. + # Unlike Cargo, however, rustup doesn't have a mechanism to disable revocation + # checking. To get rustup working we set `RUSTUP_USE_HYPER` which forces it to + # use the Hyper instead of libcurl backend. Both Hyper and libcurl use + # schannel on Windows but it appears that Hyper configures it slightly + # differently such that revocation checking isn't turned on by default. + # + # [1]: https://curl.haxx.se/mail/lib-2016-03/0202.html + RUSTUP_USE_HYPER: 1 + CARGO_HTTP_CHECK_REVOKE: false + matrix: - TARGET: x86_64-pc-windows-msvc ARCH: amd64 diff --git a/src/vendor/gcc/src/bin/gcc-shim.rs b/src/vendor/cc/src/bin/gcc-shim.rs similarity index 100% rename from src/vendor/gcc/src/bin/gcc-shim.rs rename to src/vendor/cc/src/bin/gcc-shim.rs diff --git a/src/vendor/gcc/src/com.rs b/src/vendor/cc/src/com.rs similarity index 100% rename from src/vendor/gcc/src/com.rs rename to src/vendor/cc/src/com.rs diff --git a/src/vendor/gcc/src/lib.rs b/src/vendor/cc/src/lib.rs similarity index 61% rename from src/vendor/gcc/src/lib.rs rename to src/vendor/cc/src/lib.rs index 16ab426c0e..ac966b4e54 100644 --- a/src/vendor/gcc/src/lib.rs +++ b/src/vendor/cc/src/lib.rs @@ -5,44 +5,39 @@ //! //! ```toml //! [build-dependencies] -//! gcc = "0.3" +//! cc = "1.0" //! ``` //! //! The purpose of this crate is to provide the utility functions necessary to //! compile C code into a static archive which is then linked into a Rust crate. -//! The top-level `compile_library` function serves as a convenience and more -//! advanced configuration is available through the `Config` builder. +//! Configuration is available through the `Build` struct. //! //! This crate will automatically detect situations such as cross compilation or //! other environment variables set by Cargo and will build code appropriately. //! -//! # Examples -//! -//! Use the default configuration: +//! The crate is not limited to C code, it can accept any source code that can +//! be passed to a C or C++ compiler. As such, assembly files with extensions +//! `.s` (gcc/clang) and `.asm` (MSVC) can also be compiled. //! -//! ```no_run -//! extern crate gcc; +//! [`Build`]: struct.Build.html //! -//! fn main() { -//! gcc::compile_library("libfoo.a", &["src/foo.c"]); -//! } -//! ``` +//! # Examples //! -//! Use more advanced configuration: +//! Use the `Build` struct to compile `src/foo.c`: //! //! ```no_run -//! extern crate gcc; +//! extern crate cc; //! //! fn main() { -//! gcc::Config::new() -//! .file("src/foo.c") -//! .define("FOO", Some("bar")) -//! .include("src") -//! .compile("libfoo.a"); +//! cc::Build::new() +//! .file("src/foo.c") +//! .define("FOO", Some("bar")) +//! .include("src") +//! .compile("foo"); //! } //! ``` -#![doc(html_root_url = "https://docs.rs/gcc/0.3")] +#![doc(html_root_url = "https://docs.rs/cc/1.0")] #![cfg_attr(test, deny(warnings))] #![deny(missing_docs)] @@ -57,6 +52,9 @@ use std::process::{Command, Stdio, Child}; use std::io::{self, BufReader, BufRead, Read, Write}; use std::thread::{self, JoinHandle}; +#[cfg(feature = "parallel")] +use std::sync::Mutex; + // These modules are all glue to support reading the MSVC version from // the registry and from COM interfaces #[cfg(windows)] @@ -71,12 +69,18 @@ mod setup_config; pub mod windows_registry; -/// Extra configuration to pass to gcc. -pub struct Config { +/// A builder for compilation of a native static library. +/// +/// A `Build` is the main type of the `cc` crate and is used to control all the +/// various configuration options and such of a compile. You'll find more +/// documentation on each method itself. +#[derive(Clone, Debug)] +pub struct Build { include_directories: Vec, definitions: Vec<(String, Option)>, objects: Vec, flags: Vec, + flags_supported: Vec, files: Vec, cpp: bool, cpp_link_stdlib: Option>, @@ -94,6 +98,44 @@ pub struct Config { static_crt: Option, shared_flag: Option, static_flag: Option, + warnings_into_errors: bool, + warnings: bool, +} + +/// Represents the types of errors that may occur while using cc-rs. +#[derive(Clone, Debug)] +enum ErrorKind { + /// Error occurred while performing I/O. + IOError, + /// Invalid architecture supplied. + ArchitectureInvalid, + /// Environment variable not found, with the var in question as extra info. + EnvVarNotFound, + /// Error occurred while using external tools (ie: invocation of compiler). + ToolExecError, + /// Error occurred due to missing external tools. + ToolNotFound, +} + +/// Represents an internal error that occurred, with an explaination. +#[derive(Clone, Debug)] +pub struct Error { + /// Describes the kind of error that occurred. + kind: ErrorKind, + /// More explaination of error that occurred. + message: String, +} + +impl Error { + fn new(kind: ErrorKind, message: &str) -> Error { + Error { kind: kind, message: message.to_owned() } + } +} + +impl From for Error { + fn from(e: io::Error) -> Error { + Error::new(ErrorKind::IOError, &format!("{}", e)) + } } /// Configuration used to represent an invocation of a C compiler. @@ -103,6 +145,7 @@ pub struct Config { /// This can be used to further configure other build systems (e.g. forward /// along CC and/or CFLAGS) or the `to_command` method can be used to run the /// compiler itself. +#[derive(Clone, Debug)] pub struct Tool { path: PathBuf, args: Vec, @@ -115,7 +158,7 @@ pub struct Tool { /// Each family of tools differs in how and what arguments they accept. /// /// Detection of a family is done on best-effort basis and may not accurately reflect the tool. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq)] enum ToolFamily { /// Tool is GNU Compiler Collection-like. Gnu, @@ -153,39 +196,42 @@ impl ToolFamily { ToolFamily::Clang => "-E", } } -} -/// Compile a library from the given set of input C files. -/// -/// This will simply compile all files into object files and then assemble them -/// into the output. This will read the standard environment variables to detect -/// cross compilations and such. -/// -/// This function will also print all metadata on standard output for Cargo. -/// -/// # Example -/// -/// ```no_run -/// gcc::compile_library("libfoo.a", &["foo.c", "bar.c"]); -/// ``` -pub fn compile_library(output: &str, files: &[&str]) { - let mut c = Config::new(); - for f in files.iter() { - c.file(*f); - } - c.compile(output); + /// What the flags to enable all warnings + fn warnings_flags(&self) -> &'static [&'static str] { + static MSVC_FLAGS: &'static [&'static str] = &["/W4"]; + static GNU_CLANG_FLAGS: &'static [&'static str] = &["-Wall", "-Wextra"]; + + match *self { + ToolFamily::Msvc => &MSVC_FLAGS, + ToolFamily::Gnu | + ToolFamily::Clang => &GNU_CLANG_FLAGS, + } + } + + /// What the flag to turn warning into errors + fn warnings_to_errors_flag(&self) -> &'static str { + match *self { + ToolFamily::Msvc => "/WX", + ToolFamily::Gnu | + ToolFamily::Clang => "-Werror" + } + } } -impl Config { +impl Build { /// Construct a new instance of a blank set of configuration. /// - /// This builder is finished with the `compile` function. - pub fn new() -> Config { - Config { + /// This builder is finished with the [`compile`] function. + /// + /// [`compile`]: struct.Build.html#method.compile + pub fn new() -> Build { + Build { include_directories: Vec::new(), definitions: Vec::new(), objects: Vec::new(), flags: Vec::new(), + flags_supported: Vec::new(), files: Vec::new(), shared_flag: None, static_flag: None, @@ -203,38 +249,136 @@ impl Config { cargo_metadata: true, pic: None, static_crt: None, + warnings: true, + warnings_into_errors: false, } } /// Add a directory to the `-I` or include path for headers - pub fn include>(&mut self, dir: P) -> &mut Config { + /// + /// # Example + /// + /// ```no_run + /// use std::path::Path; + /// + /// let library_path = Path::new("/path/to/library"); + /// + /// cc::Build::new() + /// .file("src/foo.c") + /// .include(library_path) + /// .include("src") + /// .compile("foo"); + /// ``` + pub fn include>(&mut self, dir: P) -> &mut Build { self.include_directories.push(dir.as_ref().to_path_buf()); self } /// Specify a `-D` variable with an optional value. - pub fn define(&mut self, var: &str, val: Option<&str>) -> &mut Config { - self.definitions.push((var.to_string(), val.map(|s| s.to_string()))); + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .define("FOO", "BAR") + /// .define("BAZ", None) + /// .compile("foo"); + /// ``` + pub fn define<'a, V: Into>>(&mut self, var: &str, val: V) -> &mut Build { + self.definitions.push((var.to_string(), val.into().map(|s| s.to_string()))); self } /// Add an arbitrary object file to link in - pub fn object>(&mut self, obj: P) -> &mut Config { + pub fn object>(&mut self, obj: P) -> &mut Build { self.objects.push(obj.as_ref().to_path_buf()); self } /// Add an arbitrary flag to the invocation of the compiler - pub fn flag(&mut self, flag: &str) -> &mut Config { + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .flag("-ffunction-sections") + /// .compile("foo"); + /// ``` + pub fn flag(&mut self, flag: &str) -> &mut Build { self.flags.push(flag.to_string()); self } + fn ensure_check_file(&self) -> Result { + let out_dir = self.get_out_dir()?; + let src = if self.cpp { + out_dir.join("flag_check.cpp") + } else { + out_dir.join("flag_check.c") + }; + + if !src.exists() { + let mut f = fs::File::create(&src)?; + write!(f, "int main(void) {{ return 0; }}")?; + } + + Ok(src) + } + + fn is_flag_supported(&self, flag: &str) -> Result { + let out_dir = self.get_out_dir()?; + let src = self.ensure_check_file()?; + let obj = out_dir.join("flag_check"); + let target = self.get_target()?; + let mut cfg = Build::new(); + cfg.flag(flag) + .target(&target) + .opt_level(0) + .host(&target) + .debug(false) + .cpp(self.cpp); + let compiler = cfg.try_get_compiler()?; + let mut cmd = compiler.to_command(); + command_add_output_file(&mut cmd, &obj, target.contains("msvc"), false); + cmd.arg(&src); + + let output = cmd.output()?; + Ok(output.stderr.is_empty()) + } + + /// Add an arbitrary flag to the invocation of the compiler if it supports it + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .flag_if_supported("-Wlogical-op") // only supported by GCC + /// .flag_if_supported("-Wunreachable-code") // only supported by clang + /// .compile("foo"); + /// ``` + pub fn flag_if_supported(&mut self, flag: &str) -> &mut Build { + self.flags_supported.push(flag.to_string()); + self + } + /// Set the `-shared` flag. /// /// When enabled, the compiler will produce a shared object which can /// then be linked with other objects to form an executable. - pub fn shared_flag(&mut self, shared_flag: bool) -> &mut Config { + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .shared_flag(true) + /// .compile("libfoo.so"); + /// ``` + + pub fn shared_flag(&mut self, shared_flag: bool) -> &mut Build { self.shared_flag = Some(shared_flag); self } @@ -243,26 +387,91 @@ impl Config { /// /// When enabled on systems that support dynamic linking, this prevents /// linking with the shared libraries. - pub fn static_flag(&mut self, static_flag: bool) -> &mut Config { + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .shared_flag(true) + /// .static_flag(true) + /// .compile("foo"); + /// ``` + pub fn static_flag(&mut self, static_flag: bool) -> &mut Build { self.static_flag = Some(static_flag); self } /// Add a file which will be compiled - pub fn file>(&mut self, p: P) -> &mut Config { + pub fn file>(&mut self, p: P) -> &mut Build { self.files.push(p.as_ref().to_path_buf()); self } + /// Add files which will be compiled + pub fn files

+ +## Compatibility + +`conv` is compatible with Rust 1.2 and higher. + +## Change Log + +### v0.3.2 + +- Added integer ↔ `char` conversions. +- Added missing `isize`/`usize` → `f32`/`f64` conversions. +- Fixed the error type of `i64` → `usize` for 64-bit targets. + +### v0.3.1 + +- Change to `unwrap_ok` for better codegen (thanks bluss). +- Fix for Rust breaking change (code in question was dodgy anyway; thanks m4rw3r). + +### v0.3.0 + +- Added an `Error` constraint to all `Err` associated types. This will break any user-defined conversions where the `Err` type does not implement `Error`. +- Renamed the `Overflow` and `Underflow` errors to `PosOverflow` and `NegOverflow` respectively. In the context of floating point conversions, "underflow" usually means the value was too close to zero to correctly represent. + +### v0.2.1 + +- Added `ConvUtil::into_as` as a shortcut for `Into::::into`. +- Added `#[inline]` attributes. +- Added `Saturate::saturate`, which can saturate `Result`s arising from over/underflow. + +### v0.2.0 + +- Changed all error types to include the original input as payload. This breaks pretty much *everything*. Sorry about that. On the bright side, there's now no downside to using the conversion traits for non-`Copy` types. +- Added the normal rounding modes for float → int approximations: `RoundToNearest`, `RoundToNegInf`, `RoundToPosInf`, and `RoundToZero`. +- `ApproxWith` is now subsumed by a pair of extension traits (`ConvUtil` and `ConvAsUtil`), that also have shortcuts for `TryInto` and `ValueInto` so that you can specify the destination type on the method. + +# Overview + +The following traits are used to define various conversion semantics: + +- [`ApproxFrom`](./trait.ApproxFrom.html)/[`ApproxInto`](./trait.ApproxInto.html) - approximate conversions, with selectable approximation scheme (see [`ApproxScheme`](./trait.ApproxScheme.html)). +- [`TryFrom`](./trait.TryFrom.html)/[`TryInto`](./trait.TryInto.html) - general, potentially failing value conversions. +- [`ValueFrom`](./trait.ValueFrom.html)/[`ValueInto`](./trait.ValueInto.html) - exact, value-preserving conversions. + +When *defining* a conversion, try to implement the `*From` trait variant where possible. When *using* a conversion, try to depend on the `*Into` trait variant where possible. This is because the `*Into` traits automatically use `*From` implementations, but not the reverse. Implementing `*From` and using `*Into` ensures conversions work in as many contexts as possible. + +These extension methods are provided to help with some common cases: + +- [`ConvUtil::approx_as`](./trait.ConvUtil.html#method.approx_as) - approximates to `Dst` with the `DefaultApprox` scheme. +- [`ConvUtil::approx_as_by`](./trait.ConvUtil.html#method.approx_as_by) - approximates to `Dst` with the scheme `S`. +- [`ConvUtil::into_as`](./trait.ConvUtil.html#method.into_as) - converts to `Dst` using `Into::into`. +- [`ConvUtil::try_as`](./trait.ConvUtil.html#method.try_as) - converts to `Dst` using `TryInto::try_into`. +- [`ConvUtil::value_as`](./trait.ConvUtil.html#method.value_as) - converts to `Dst` using `ValueInto::value_into`. +- [`ConvAsUtil::approx`](./trait.ConvAsUtil.html#method.approx) - approximates to an inferred destination type with the `DefaultApprox` scheme. +- [`ConvAsUtil::approx_by`](./trait.ConvAsUtil.html#method.approx_by) - approximates to an inferred destination type with the scheme `S`. +- [`Saturate::saturate`](./errors/trait.Saturate.html#tymethod.saturate) - saturates on overflow. +- [`UnwrapOk::unwrap_ok`](./errors/trait.UnwrapOk.html#tymethod.unwrap_ok) - unwraps results from conversions that cannot fail. +- [`UnwrapOrInf::unwrap_or_inf`](./errors/trait.UnwrapOrInf.html#tymethod.unwrap_or_inf) - saturates to ±∞ on failure. +- [`UnwrapOrInvalid::unwrap_or_invalid`](./errors/trait.UnwrapOrInvalid.html#tymethod.unwrap_or_invalid) - substitutes the target type's "invalid" sentinel value on failure. +- [`UnwrapOrSaturate::unwrap_or_saturate`](./errors/trait.UnwrapOrSaturate.html#tymethod.unwrap_or_saturate) - saturates to the maximum or minimum value of the target type on failure. + +A macro is provided to assist in implementing conversions: + +- [`TryFrom!`](./macros/index.html#tryfrom!) - derives an implementation of [`TryFrom`](./trait.TryFrom.html). + +If you are implementing your own types, you may also be interested in the traits contained in the [`misc`](./misc/index.html) module. + +## Provided Implementations + +The crate provides several blanket implementations: + +- `*From for A` (all types can be converted from and into themselves). +- `*Into for Src where Dst: *From` (`*From` implementations imply a matching `*Into` implementation). + +Conversions for the builtin numeric (integer and floating point) types are provided. In general, `ValueFrom` conversions exist for all pairs except for float → integer (since such a conversion is generally unlikely to *exactly* succeed) and `f64 → f32` (for the same reason). `ApproxFrom` conversions with the `DefaultApprox` scheme exist between all pairs. `ApproxFrom` with the `Wrapping` scheme exist between integers. + +## Errors + +A number of error types are defined in the [`errors`](./errors/index.html) module. Generally, conversions use whichever error type most *narrowly* defines the kinds of failures that can occur. For example: + +- `ValueFrom for u16` cannot possibly fail, and as such it uses `NoError`. +- `ValueFrom for u16` can *only* fail with a negative overflow, thus it uses the `NegOverflow` type. +- `ValueFrom for u16` can overflow in either direction, hence it uses `RangeError`. +- Finally, `ApproxFrom for u16` can overflow (positive or negative), or attempt to convert NaN; `FloatError` covers those three cases. + +Because there are *numerous* error types, the `GeneralError` enum is provided. `From for GeneralError` exists for each error type `E` defined by this crate (even for `NoError`!), allowing errors to be translated automatically by `try!`. In fact, all errors can be "expanded" to *all* more general forms (*e.g.* `NoError` → `NegOverflow`, `PosOverflow` → `RangeError` → `FloatError`). + +Aside from `NoError`, the various error types wrap the input value that you attempted to convert. This is so that non-`Copy` types do not need to be pre-emptively cloned prior to conversion, just in case the conversion fails. A downside is that this means there are many, *many* incompatible error types. + +To help alleviate this, there is also `GeneralErrorKind`, which is simply `GeneralError` without the payload, and all errors can be converted into it directly. + +The reason for not just using `GeneralErrorKind` in the first place is to statically reduce the number of potential error cases you need to deal with. It also allows the `Unwrap*` extension traits to be defined *without* the possibility for runtime failure (*e.g.* you cannot use `unwrap_or_saturate` with a `FloatError`, because what do you do if the error is `NotANumber`; saturate to max or to min? Or panic?). + +# Examples + +``` +# extern crate conv; +# use conv::*; +# fn main() { +// This *cannot* fail, so we can use `unwrap_ok` to discard the `Result`. +assert_eq!(u8::value_from(0u8).unwrap_ok(), 0u8); + +// This *can* fail. Specifically, it can overflow toward negative infinity. +assert_eq!(u8::value_from(0i8), Ok(0u8)); +assert_eq!(u8::value_from(-1i8), Err(NegOverflow(-1))); + +// This can overflow in *either* direction; hence the change to `RangeError`. +assert_eq!(u8::value_from(-1i16), Err(RangeError::NegOverflow(-1))); +assert_eq!(u8::value_from(0i16), Ok(0u8)); +assert_eq!(u8::value_from(256i16), Err(RangeError::PosOverflow(256))); + +// We can use the extension traits to simplify this a little. +assert_eq!(u8::value_from(-1i16).unwrap_or_saturate(), 0u8); +assert_eq!(u8::value_from(0i16).unwrap_or_saturate(), 0u8); +assert_eq!(u8::value_from(256i16).unwrap_or_saturate(), 255u8); + +// Obviously, all integers can be "approximated" using the default scheme (it +// doesn't *do* anything), but they can *also* be approximated with the +// `Wrapping` scheme. +assert_eq!( + >::approx_from(400u16), + Err(PosOverflow(400))); +assert_eq!( + >::approx_from(400u16), + Ok(144u8)); + +// This is rather inconvenient; as such, there are a number of convenience +// extension methods available via `ConvUtil` and `ConvAsUtil`. +assert_eq!(400u16.approx(), Err::(PosOverflow(400))); +assert_eq!(400u16.approx_by::(), Ok::(144u8)); +assert_eq!(400u16.approx_as::(), Err(PosOverflow(400))); +assert_eq!(400u16.approx_as_by::(), Ok(144)); + +// Integer -> float conversions *can* fail due to limited precision. +// Once the continuous range of exactly representable integers is exceeded, the +// provided implementations fail with overflow errors. +assert_eq!(f32::value_from(16_777_216i32), Ok(16_777_216.0f32)); +assert_eq!(f32::value_from(16_777_217i32), Err(RangeError::PosOverflow(16_777_217))); + +// Float -> integer conversions have to be done using approximations. Although +// exact conversions are *possible*, "advertising" this with an implementation +// is misleading. +// +// Note that `DefaultApprox` for float -> integer uses whatever rounding +// mode is currently active (*i.e.* whatever `as` would do). +assert_eq!(41.0f32.approx(), Ok(41u8)); +assert_eq!(41.3f32.approx(), Ok(41u8)); +assert_eq!(41.5f32.approx(), Ok(41u8)); +assert_eq!(41.8f32.approx(), Ok(41u8)); +assert_eq!(42.0f32.approx(), Ok(42u8)); + +assert_eq!(255.0f32.approx(), Ok(255u8)); +assert_eq!(256.0f32.approx(), Err::(FloatError::PosOverflow(256.0))); + +// Sometimes, it can be useful to saturate the conversion from float to +// integer directly, then account for NaN as input separately. The `Saturate` +// extension trait exists for this reason. +assert_eq!((-23.0f32).approx_as::().saturate(), Ok(0)); +assert_eq!(302.0f32.approx_as::().saturate(), Ok(255u8)); +assert!(std::f32::NAN.approx_as::().saturate().is_err()); + +// If you really don't care about the specific kind of error, you can just rely +// on automatic conversion to `GeneralErrorKind`. +fn too_many_errors() -> Result<(), GeneralErrorKind> { + assert_eq!({let r: u8 = try!(0u8.value_into()); r}, 0u8); + assert_eq!({let r: u8 = try!(0i8.value_into()); r}, 0u8); + assert_eq!({let r: u8 = try!(0i16.value_into()); r}, 0u8); + assert_eq!({let r: u8 = try!(0.0f32.approx()); r}, 0u8); + Ok(()) +} +# let _ = too_many_errors(); +# } +``` + +*/ + +#![deny(missing_docs)] + +#[macro_use] extern crate custom_derive; + +// Exported macros. +pub mod macros; + +pub use errors::{ + NoError, GeneralError, GeneralErrorKind, Unrepresentable, + NegOverflow, PosOverflow, + FloatError, RangeError, RangeErrorKind, + Saturate, + UnwrapOk, UnwrapOrInf, UnwrapOrInvalid, UnwrapOrSaturate, +}; + +use std::error::Error; + +/** +Publicly re-exports the most generally useful set of items. + +Usage of the prelude should be considered **unstable**. Although items will likely *not* be removed without bumping the major version, new items *may* be added, which could potentially cause name conflicts in user code. +*/ +pub mod prelude { + pub use super::{ + ApproxFrom, ApproxInto, + ValueFrom, ValueInto, + GeneralError, GeneralErrorKind, + Saturate, + UnwrapOk, UnwrapOrInf, UnwrapOrInvalid, UnwrapOrSaturate, + ConvUtil, ConvAsUtil, + RoundToNearest, RoundToZero, Wrapping, + }; +} + +macro_rules! as_item { + ($($i:item)*) => {$($i)*}; +} + +macro_rules! item_for_each { + ( + $( ($($arg:tt)*) ),* $(,)* => { $($exp:tt)* } + ) => { + macro_rules! body { + $($exp)* + } + + $( + body! { $($arg)* } + )* + }; +} + +pub mod errors; +pub mod misc; + +mod impls; + +/** +This trait is used to perform a conversion that is permitted to approximate the result, but *not* to wrap or saturate the result to fit into the destination type's representable range. + +Where possible, prefer *implementing* this trait over `ApproxInto`, but prefer *using* `ApproxInto` for generic constraints. + +# Details + +All implementations of this trait must provide a conversion that can be separated into two logical steps: an approximation transform, and a representation transform. + +The "approximation transform" step involves transforming the input value into an approximately equivalent value which is supported by the target type *without* taking the target type's representable range into account. For example, this might involve rounding or truncating a floating point value to an integer, or reducing the accuracy of a floating point value. + +The "representation transform" step *exactly* rewrites the value from the source type's binary representation into the destination type's binary representation. This step *may not* transform the value in any way. If the result of the approximation is not representable, the conversion *must* fail. + +The major reason for this formulation is to exactly define what happens when converting between floating point and integer types. Often, it is unclear what happens to floating point values beyond the range of the target integer type. Do they saturate, wrap, or cause a failure? + +With this formulation, it is well-defined: if a floating point value is outside the representable range, the conversion fails. This allows users to distinguish between approximation and range violation, and act accordingly. +*/ +pub trait ApproxFrom: Sized where Scheme: ApproxScheme { + /// The error type produced by a failed conversion. + type Err: Error; + + /// Convert the given value into an approximately equivalent representation. + fn approx_from(src: Src) -> Result; +} + +impl ApproxFrom for Src where Scheme: ApproxScheme { + type Err = NoError; + fn approx_from(src: Src) -> Result { + Ok(src) + } +} + +/** +This is the dual of `ApproxFrom`; see that trait for information. + +Where possible, prefer *using* this trait over `ApproxFrom` for generic constraints, but prefer *implementing* `ApproxFrom`. +*/ +pub trait ApproxInto where Scheme: ApproxScheme { + /// The error type produced by a failed conversion. + type Err: Error; + + /// Convert the subject into an approximately equivalent representation. + fn approx_into(self) -> Result; +} + +impl ApproxInto for Src +where + Dst: ApproxFrom, + Scheme: ApproxScheme, +{ + type Err = Dst::Err; + fn approx_into(self) -> Result { + ApproxFrom::approx_from(self) + } +} + +/** +This trait is used to mark approximation scheme types. +*/ +pub trait ApproxScheme {} + +/** +The "default" approximation scheme. This scheme does whatever would generally be expected of a lossy conversion, assuming no additional context or instruction is given. + +This is a double-edged sword: it has the loosest semantics, but is far more likely to exist than more complicated approximation schemes. +*/ +pub enum DefaultApprox {} +impl ApproxScheme for DefaultApprox {} + +/** +This scheme is used to convert a value by "wrapping" it into a narrower range. + +In abstract, this can be viewed as the opposite of rounding: rather than preserving the most significant bits of a value, it preserves the *least* significant bits of a value. +*/ +pub enum Wrapping {} +impl ApproxScheme for Wrapping {} + +/** +This scheme is used to convert a value by rounding it to the nearest representable value, with ties rounding away from zero. +*/ +pub enum RoundToNearest {} +impl ApproxScheme for RoundToNearest {} + +/** +This scheme is used to convert a value by rounding it toward negative infinity to the nearest representable value. +*/ +pub enum RoundToNegInf {} +impl ApproxScheme for RoundToNegInf {} + +/** +This scheme is used to convert a value by rounding it toward positive infinity to the nearest representable value. +*/ +pub enum RoundToPosInf {} +impl ApproxScheme for RoundToPosInf {} + +/** +This scheme is used to convert a value by rounding it toward zero to the nearest representable value. +*/ +pub enum RoundToZero {} +impl ApproxScheme for RoundToZero {} + +/** +This trait is used to perform a conversion between different semantic types which might fail. + +Where possible, prefer *implementing* this trait over `TryInto`, but prefer *using* `TryInto` for generic constraints. + +# Details + +Typically, this should be used in cases where you are converting between values whose ranges and/or representations only partially overlap. That the conversion may fail should be a reasonably expected outcome. A standard example of this is converting from integers to enums of unitary variants. +*/ +pub trait TryFrom: Sized { + /// The error type produced by a failed conversion. + type Err: Error; + + /// Convert the given value into the subject type. + fn try_from(src: Src) -> Result; +} + +impl TryFrom for Src { + type Err = NoError; + fn try_from(src: Src) -> Result { + Ok(src) + } +} + +/** +This is the dual of `TryFrom`; see that trait for information. + +Where possible, prefer *using* this trait over `TryFrom` for generic constraints, but prefer *implementing* `TryFrom`. +*/ +pub trait TryInto { + /// The error type produced by a failed conversion. + type Err: Error; + + /// Convert the subject into the destination type. + fn try_into(self) -> Result; +} + +impl TryInto for Src where Dst: TryFrom { + type Err = Dst::Err; + fn try_into(self) -> Result { + TryFrom::try_from(self) + } +} + +/** +This trait is used to perform an exact, value-preserving conversion. + +Where possible, prefer *implementing* this trait over `ValueInto`, but prefer *using* `ValueInto` for generic constraints. + +# Details + +Implementations of this trait should be reflexive, associative and commutative (in the absence of conversion errors). That is, all possible cycles of `ValueFrom` conversions (for which each "step" has a defined implementation) should produce the same result, with a given value either being "round-tripped" exactly, or an error being produced. +*/ +pub trait ValueFrom: Sized { + /// The error type produced by a failed conversion. + type Err: Error; + + /// Convert the given value into an exactly equivalent representation. + fn value_from(src: Src) -> Result; +} + +impl ValueFrom for Src { + type Err = NoError; + fn value_from(src: Src) -> Result { + Ok(src) + } +} + +/** +This is the dual of `ValueFrom`; see that trait for information. + +Where possible, prefer *using* this trait over `ValueFrom` for generic constraints, but prefer *implementing* `ValueFrom`. +*/ +pub trait ValueInto { + /// The error type produced by a failed conversion. + type Err: Error; + + /// Convert the subject into an exactly equivalent representation. + fn value_into(self) -> Result; +} + +impl ValueInto for Src where Dst: ValueFrom { + type Err = Dst::Err; + fn value_into(self) -> Result { + ValueFrom::value_from(self) + } +} + +/** +This extension trait exists to simplify using various conversions. + +If there is more than one implementation for a given type/trait pair, a simple call to `*_into` may not be uniquely resolvable. Due to the position of the type parameter (on the trait itself), it is cumbersome to specify the destination type. A similar problem exists for approximation schemes. + +See also the [`ConvAsUtil`](./trait.ConvAsUtil.html) trait. + +> **Note**: There appears to be a bug in `rustdoc`'s output. This trait is implemented *for all* types, though the methods are only available for types where the appropriate conversions are defined. +*/ +pub trait ConvUtil { + /// Approximate the subject to a given type with the default scheme. + fn approx_as(self) -> Result + where Self: Sized + ApproxInto { + self.approx_into() + } + + /// Approximate the subject to a given type with a specific scheme. + fn approx_as_by(self) -> Result + where + Self: Sized + ApproxInto, + Scheme: ApproxScheme, + { + self.approx_into() + } + + /// Convert the subject to a given type. + fn into_as(self) -> Dst + where Self: Sized + Into { + self.into() + } + + /// Attempt to convert the subject to a given type. + fn try_as(self) -> Result + where Self: Sized + TryInto { + self.try_into() + } + + /// Attempt a value conversion of the subject to a given type. + fn value_as(self) -> Result + where Self: Sized + ValueInto { + self.value_into() + } +} + +impl ConvUtil for T {} + +/** +This extension trait exists to simplify using various conversions. + +If there is more than one `ApproxFrom` implementation for a given type, a simple call to `approx_into` may not be uniquely resolvable. Due to the position of the scheme parameter (on the trait itself), it is cumbersome to specify which scheme you wanted. + +The destination type is inferred from context. + +See also the [`ConvUtil`](./trait.ConvUtil.html) trait. + +> **Note**: There appears to be a bug in `rustdoc`'s output. This trait is implemented *for all* types, though the methods are only available for types where the appropriate conversions are defined. +*/ +pub trait ConvAsUtil { + /// Approximate the subject with the default scheme. + fn approx(self) -> Result + where Self: Sized + ApproxInto { + self.approx_into() + } + + /// Approximate the subject with a specific scheme. + fn approx_by(self) -> Result + where + Self: Sized + ApproxInto, + Scheme: ApproxScheme, + { + self.approx_into() + } +} + +impl ConvAsUtil for T {} diff --git a/src/vendor/conv/src/macros.rs b/src/vendor/conv/src/macros.rs new file mode 100644 index 0000000000..f0609ecb18 --- /dev/null +++ b/src/vendor/conv/src/macros.rs @@ -0,0 +1,148 @@ +/*! +This module provides convenience macros to help with implementing the conversion traits. + +# `TryFrom!` + +```ignore +macro_rules! TryFrom { + (($target:ty) $enum:item) => { ... }; +} +``` + +This macro attempts to derive an implementation of the [`TryFrom`](../trait.TryFrom.html) trait. Specifically, it supports `enum`s consisting entirely of unitary variants, with or without explicit values. The source type can be any integer type which the variants of the enumeration can be explicitly cast to (*i.e.* using `as`). + +If a conversion fails (due to there being no matching variant for the specified integer value `src`), then the conversion returns `Err(Unrepresentable(src))` (see [`Unrepresentable`](../errors/struct.Unrepresentable.html)). + +It is compatible with the [`custom_derive!`](https://crates.io/crates/custom_derive) macro. + +## Example + +Using `custom_derive!`: + +``` +#[macro_use] extern crate conv; +#[macro_use] extern crate custom_derive; + +custom_derive! { + #[derive(Debug, PartialEq, TryFrom(i32))] + enum Colours { + Red = 0, + Green = 5, + Blue + } +} + +fn main() { + use conv::{TryFrom, Unrepresentable}; + + assert_eq!(Colours::try_from(0), Ok(Colours::Red)); + assert_eq!(Colours::try_from(1), Err(Unrepresentable(1))); + assert_eq!(Colours::try_from(5), Ok(Colours::Green)); + assert_eq!(Colours::try_from(6), Ok(Colours::Blue)); + assert_eq!(Colours::try_from(7), Err(Unrepresentable(7))); +} +``` + +The above is equivalent to the following: + +``` +#[macro_use] extern crate conv; + +#[derive(Debug, PartialEq)] +enum Colours { + Red = 0, + Green = 5, + Blue +} + +TryFrom! { (i32) enum Colours { + Red = 0, + Green = 5, + Blue +} } +# fn main() {} +``` +*/ + +/** +See the documentation for the [`macros`](./macros/index.html#tryfrom!) module for details. +*/ +#[macro_export] +macro_rules! TryFrom { + (($prim:ty) $(pub)* enum $name:ident { $($body:tt)* }) => { + TryFrom! { + @collect_variants ($name, $prim), + ($($body)*,) -> () + } + }; + + ( + @collect_variants ($name:ident, $prim:ty), + ($(,)*) -> ($($var_names:ident,)*) + ) => { + impl $crate::TryFrom<$prim> for $name { + type Err = $crate::errors::Unrepresentable<$prim>; + fn try_from(src: $prim) -> Result<$name, Self::Err> { + $( + if src == $name::$var_names as $prim { + return Ok($name::$var_names); + } + )* + Err($crate::errors::Unrepresentable(src)) + } + } + }; + + ( + @collect_variants $fixed:tt, + (#[$_attr:meta] $($tail:tt)*) -> $var_names:tt + ) => { + TryFrom! { + @skip_meta $fixed, + ($($tail)*) -> $var_names + } + }; + + ( + @collect_variants $fixed:tt, + ($var:ident $(= $_val:expr)*, $($tail:tt)*) -> ($($var_names:tt)*) + ) => { + TryFrom! { + @collect_variants $fixed, + ($($tail)*) -> ($($var_names)* $var,) + } + }; + + ( + @collect_variants ($name:ident), + ($var:ident $_struct:tt, $($tail:tt)*) -> ($($var_names:tt)*) + ) => { + const _error: () = concat!( + "cannot derive TryFrom for ", + stringify!($name), + ", due to non-unitary variant ", + stringify!($var), + "." + ); + }; + + ( + @skip_meta $fixed:tt, + (#[$_attr:meta] $($tail:tt)*) -> $var_names:tt + ) => { + TryFrom! { + @skip_meta $fixed, + ($($tail)*) -> $var_names + } + }; + + ( + @skip_meta $fixed:tt, + ($var:ident $($tail:tt)*) -> $var_names:tt + ) => { + TryFrom! { + @collect_variants $fixed, + ($var $($tail)*) -> $var_names + } + }; +} diff --git a/src/vendor/conv/src/misc.rs b/src/vendor/conv/src/misc.rs new file mode 100644 index 0000000000..db80a532c6 --- /dev/null +++ b/src/vendor/conv/src/misc.rs @@ -0,0 +1,71 @@ +/*! +This module defines some additional traits not *directly* tied to conversions. +*/ + +/** +This trait indicates that values of a type can be logically "saturated". + +This is used by the `errors::UnwrapOrSaturate` extension trait. +*/ +pub trait Saturated { + /// Returns the type's saturated, maximum value. + fn saturated_max() -> Self; + + /// Returns the type's saturated, minimum value. + fn saturated_min() -> Self; +} + +item_for_each! { + (i8), (i16), (i32), (i64), (u8), (u16), (u32), (u64), (isize), (usize) => { + ($ity:ident) => { + impl Saturated for $ity { + #[inline] fn saturated_max() -> Self { ::std::$ity::MAX } + #[inline] fn saturated_min() -> Self { ::std::$ity::MIN } + } + }; + } +} + +/** +This trait indicates that a type has an "invalid" sentinel value. + +This is used by the `errors::UnwrapOrInvalid` extension trait. +*/ +pub trait InvalidSentinel { + /// Returns the type's "invalid" sentinel value. + fn invalid_sentinel() -> Self; +} + +item_for_each! { + (f32), (f64) => { + ($ity:ident) => { + impl InvalidSentinel for $ity { + #[inline] fn invalid_sentinel() -> Self { ::std::$ity::NAN } + } + }; + } +} + +/** +This trait indicates that a type has positive and negative "infinity" values. + +This is used by the `errors::UnwrapOrInf` extension trait. +*/ +pub trait SignedInfinity { + /// Returns the type's positive infinity value. + fn neg_infinity() -> Self; + + /// Returns the type's negative infinity value. + fn pos_infinity() -> Self; +} + +item_for_each! { + (f32), (f64) => { + ($ity:ident) => { + impl SignedInfinity for $ity { + #[inline] fn neg_infinity() -> Self { ::std::$ity::NEG_INFINITY } + #[inline] fn pos_infinity() -> Self { ::std::$ity::INFINITY } + } + }; + } +} diff --git a/src/vendor/conv/tests/conv_utils.rs b/src/vendor/conv/tests/conv_utils.rs new file mode 100644 index 0000000000..3444ab3182 --- /dev/null +++ b/src/vendor/conv/tests/conv_utils.rs @@ -0,0 +1,40 @@ +#[macro_use] extern crate conv; + +use conv::prelude::*; + +#[test] +fn test_approx() { + use conv::DefaultApprox; + assert_eq!((1.5f32).approx(), Ok(1i32)); + assert_eq!((1.5f32).approx_by::(), Ok(1)); + assert_eq!((1.5f32).approx_as::(), Ok(1)); + assert_eq!((1.5f32).approx_as_by::(), Ok(1)); +} + +#[test] +fn test_into() { + let v = "ABC".into_as::>(); + assert_eq!(&*v, &[0x41, 0x42, 0x43]); +} + +#[test] +fn test_try() { + #[derive(PartialEq, Debug)] enum ItAintRight { BabeNo, NoNo } + TryFrom! { (u8) enum ItAintRight { BabeNo, NoNo } } + + assert_eq!(0u8.try_as::(), Ok(ItAintRight::BabeNo)); + assert_eq!(1u8.try_as::(), Ok(ItAintRight::NoNo)); + assert_eq!(2u8.try_as::(), Err(conv::Unrepresentable(2))); +} + +#[test] +fn test_value() { + assert_eq!((123u32).value_as::(), Ok(123)); +} + +#[test] +fn test_whizzo() { + use conv::errors::Unrepresentable; + assert_eq!((-1.0f32).approx_as::().saturate(), Ok::<_, Unrepresentable<_>>(0u8)); + assert_eq!((-1i32).value_as::().saturate().unwrap_ok(), 0u8); +} diff --git a/src/vendor/conv/tests/derive_try_from.rs b/src/vendor/conv/tests/derive_try_from.rs new file mode 100644 index 0000000000..f8e0c781f4 --- /dev/null +++ b/src/vendor/conv/tests/derive_try_from.rs @@ -0,0 +1,45 @@ +#[macro_use] extern crate conv; + +use conv::{TryFrom, Unrepresentable}; + +#[derive(Debug, PartialEq)] +enum Get { Up, Down, AllAround } + +TryFrom! { (u8) + enum Get { + Up, + /// And + Down, + /** And */ + AllAround + } +} + +#[derive(Debug, PartialEq)] +enum GottaGo { GetAway, Fast = 9000, Faster = 9001 } + +TryFrom! { (u16) + enum GottaGo { + GetAway, + Fast = 9000, + /// This show was stupid. + Faster = 9001 + } +} + +#[test] +fn test_try_from() { + assert_eq!(Get::try_from(0u8), Ok(Get::Up)); + assert_eq!(Get::try_from(1u8), Ok(Get::Down)); + assert_eq!(Get::try_from(2u8), Ok(Get::AllAround)); + assert_eq!(Get::try_from(3u8), Err(Unrepresentable(3u8))); + + assert_eq!(GottaGo::try_from(0u16), Ok(GottaGo::GetAway)); + assert_eq!(GottaGo::try_from(1u16), Err(Unrepresentable(1u16))); + assert_eq!(GottaGo::try_from(2u16), Err(Unrepresentable(2u16))); + assert_eq!(GottaGo::try_from(3u16), Err(Unrepresentable(3u16))); + assert_eq!(GottaGo::try_from(8999u16), Err(Unrepresentable(8999u16))); + assert_eq!(GottaGo::try_from(9000u16), Ok(GottaGo::Fast)); + assert_eq!(GottaGo::try_from(9001u16), Ok(GottaGo::Faster)); + assert_eq!(GottaGo::try_from(9002u16), Err(Unrepresentable(9002u16))); +} diff --git a/src/vendor/conv/tests/lang_char.rs b/src/vendor/conv/tests/lang_char.rs new file mode 100644 index 0000000000..88a9320743 --- /dev/null +++ b/src/vendor/conv/tests/lang_char.rs @@ -0,0 +1,121 @@ +extern crate conv; + +#[macro_use] mod util; + +use conv::*; + +use conv::PosOverflow as Of; +use conv::Unrepresentable as Ur; + +macro_rules! check { + (@ $from:ty, $to:ty=> $(;)*) => {}; + + (@ $from:ty, $to:ty=> try cident; $($tail:tt)*) => { + check!(@ $from, $to=> try v: '\x00';); + check!(@ $from, $to=> try v: '\x01';); + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> try uident; $($tail:tt)*) => { + check!(@ $from, $to=> try v: 0;); + check!(@ $from, $to=> try v: 1;); + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> try v: $src:expr, !$dst:expr; $($tail:tt)*) => { + { + let src: $from = $src; + let dst: Result<$to, _> = src.try_into(); + assert_eq!(dst, Err($dst(src))); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> try v: $src:expr; $($tail:tt)*) => { + { + let src: $from = $src; + let dst: Result<$to, _> = src.try_into(); + assert_eq!(dst, Ok($src as $to)); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qt: *; $($tail:tt)*) => { + { + extern crate quickcheck; + + fn property(v: $from) -> bool { + let dst: Result<$to, _> = v.try_into(); + dst == Ok(v as $to) + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qv {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + ($from:ty, $to:ty=> $($tail:tt)*) => { + check! { @ $from, $to=> $($tail)*; } + }; +} + +#[test] +fn test_i_to_c() { + check!(u8, char => try uident; qt: *); + + /* + `char` is a pain because `u8` is the *only* type you can cast directly from. So, the `check!` macro is *basically useless*. + + Also, `char` has a great big hole in the middle, which makes things more interesting. + + Instead, we're just going to make sure that the conversions *exist* and have the expected error type. + */ + macro_rules! check_i_to_c { + ($($ts:ty),* $(,)*) => { + $( + { + let v: $ts = 0; + let r: Result> = TryFrom::try_from(v); + assert_eq!(r, Ok('\x00')); + } + )* + }; + } + check_i_to_c!(i8, i16, i32, i64, isize, u16, u32, u64, usize); +} + +#[test] +fn test_c_to_i() { + check!(char, i8=> try cident; + try v: '\u{80}', !Of; + ); + check!(char, i16=> try cident; + try v: '\u{8000}', !Of; + ); + check!(char, i32=> try cident;); + check!(char, i64=> try cident;); + check!(char, u8=> try cident; + try v: '\u{100}', !Of; + ); + check!(char, u16=> try cident; + try v: '\u{10000}', !Of; + ); + check!(char, u32=> try cident;); + check!(char, u64=> try cident;); + for_bitness! { + 32 { + check!(char, isize=> try cident; + try v: '\u{10ffff}'; + ); + check!(char, usize=> try cident;); + } + 64 { + check!(char, i64=> try cident;); + check!(char, u64=> try cident;); + } + } +} diff --git a/src/vendor/conv/tests/lang_floats.rs b/src/vendor/conv/tests/lang_floats.rs new file mode 100644 index 0000000000..9ec1a416d2 --- /dev/null +++ b/src/vendor/conv/tests/lang_floats.rs @@ -0,0 +1,57 @@ +extern crate conv; + +#[macro_use] mod util; + +use conv::*; + +use conv::FloatError::NegOverflow as FU; +use conv::FloatError::PosOverflow as FO; + +#[test] +fn test_f32() { + check!(f32, f32=> fident; qv: *;); + check!(f32, f64=> fident; qv: *;); +} + +#[test] +fn test_f32_to_int() { + check!(f32, i8=> sidenta; qa: i8=> a: -129.0, !FU; a: 128.0, !FO;); + check!(f32, i16=> sidenta; qa: i16=> a: -32_769.0, !FU; a: 32_768.0, !FO;); + check!(f32, i32=> sidenta; qa: i32=> + a: -2.1474836e9, -2147483648; a: 2.1474835e9, 2147483520; + a: -2_147_500_000.0, !FU; a: 2_147_500_000.0, !FO;); + check!(f32, i64=> sidenta; qa: i64=> + a: -9.223372e18, -9223372036854775808; a: 9.2233715e18, 9223371487098961920; + a: -9_223_373_000_000_000_000.0, !FU; a: 9_223_373_000_000_000_000.0, !FO;); + check!(f32, u8=> uidenta; qa: u8=> a: -1.0, !FU; a: 256.0, !FO;); + check!(f32, u16=> uidenta; qa: u16=> a: -1.0, !FU; a: 65_536.0, !FO;); + check!(f32, u32=> uidenta; qa: u32=> + a: 4.294967e9, 4294967040; + a: -1.0, !FU; a: 4_294_968_000.0, !FO;); + check!(f32, u64=> uidenta; qa: u64=> + a: 1.8446743e19, 18446742974197923840; + a: -1.0, !FU; a: 18_446_746_000_000_000_000.0, !FO;); +} + +#[test] +fn test_f64_to_int() { + check!(f64, i8=> sidenta; qa: i8=> a: -129.0, !FU; a: 128.0, !FO;); + check!(f64, i16=> sidenta; qa: i16=> a: -32_769.0, !FU; a: 32_768.0, !FO;); + check!(f64, i32=> sidenta; qa: i32=> a: -2_147_483_649.0, !FU; a: 2_147_483_648.0, !FO;); + check!(f64, i64=> sidenta; qa: i64=> + a: -9.223372036854776e18, -9223372036854775808; + a: 9.223372036854775e18, 9223372036854774784; + a: -9_223_372_036_854_778_000.0, !FU; a: 9_223_372_036_854_778_000.0, !FO;); + check!(f64, u8=> uidenta; qa: u8=> a: -1.0, !FU; a: 256.0, !FO;); + check!(f64, u16=> uidenta; qa: u16=> a: -1.0, !FU; a: 65_536.0, !FO;); + check!(f64, u32=> uidenta; qa: u32=> a: -1.0, !FU; a: 4_294_967_296.0, !FO;); + check!(f64, u64=> uidenta; qa: u64=> + a: 1.844674407370955e19; + a: -1.0, !FU; a: 18_446_744_073_709_560_000.0, !FO;); +} + +#[test] +fn test_f64() { + check!(f64, f32=> fidenta; qa: *;); + check!(f64, f64=> fident; qv: *;); +} diff --git a/src/vendor/conv/tests/lang_ints.rs b/src/vendor/conv/tests/lang_ints.rs new file mode 100644 index 0000000000..f8f63a7cae --- /dev/null +++ b/src/vendor/conv/tests/lang_ints.rs @@ -0,0 +1,395 @@ +extern crate conv; + +#[macro_use] mod util; + +use conv::*; + +use conv::NegOverflow as Uf; +use conv::PosOverflow as Of; +use conv::RangeError::NegOverflow as RU; +use conv::RangeError::PosOverflow as RO; + +#[test] +fn test_i8() { + check!(i8, i8=> sident; qv: *; qa: *; qaW: *); + check!(i8, i16=> sident; qv: *; qa: *; qaW: *); + check!(i8, i32=> sident; qv: *; qa: *; qaW: *); + check!(i8, i64=> sident; qv: *; qa: *; qaW: *); + check!(i8, u8=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(i8, u16=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(i8, u32=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(i8, u64=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(i8, isize=> sident; qv: *; qa: *; qaW: *); + check!(i8, usize=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); +} + +#[test] +fn test_i16() { + check!(i16, i8=> sident; qv: i8=> qa: i8=> qaW: *; + v: -129, !RU; v: 128, !RO; + ); + check!(i16, i16=> sident; qv: *; qa: *; qaW: *); + check!(i16, i32=> sident; qv: *; qa: *; qaW: *); + check!(i16, i64=> sident; qv: *; qa: *; qaW: *); + check!(i16, u8=> uident; qv: u8=> qa: +; qaW: *; + v: -1, !RU; + ); + check!(i16, u16=> uident; qv: u16, i16=> qa: +; qaW: *; + v: -1, !Uf; + ); + check!(i16, u32=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(i16, u64=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(i16, isize=> sident; qv: *; qa: *; qaW: *); + check!(i16, usize=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); +} + +#[test] +fn test_i32() { + check!(i32, i8=> sident; qv: i8=> qa: i8=> qaW: *; + v: -129, !RU; v: 128, !RO; + ); + check!(i32, i16=> sident; qv: i16=> qa: i16=> qaW: *; + v: -32_769, !RU; v: 32_768, !RO; + ); + check!(i32, i32=> sident; qv: *; qa: *; qaW: *); + check!(i32, i64=> sident; qv: *; qa: *; qaW: *); + check!(i32, u8=> uident; qv: u8=> qa: u8=> qaW: *; + v: -1, !RU; + ); + check!(i32, u16=> uident; qv: u16=> qa: u16=> qaW: *; + v: -1, !RU; + ); + check!(i32, u32=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(i32, u64=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + for_bitness! { + 32 { + check!(i32, isize=> sident; qv: *; qa: *; qaW: *); + check!(i32, usize=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + } + 64 { + check!(i32, isize=> sident; qv: *; qa: *; qaW: *); + check!(i32, usize=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + } + } +} + +#[test] +fn test_i64() { + check!(i64, i8=> sident; qv: i8=> qa: i8=> qaW: *; + v: -129, !RU; v: 128, !RO; + ); + check!(i64, i16=> sident; qv: i16=> qa: i16=> qaW: *; + v: -32_769, !RU; v: 32_768, !RO; + ); + check!(i64, i32=> sident; qv: i32=> qa: i32=> qaW: *; + v: -2_147_483_649, !RU; v: 2_147_483_648, !RO; + ); + check!(i64, i64=> sident; qv: *; qa: *; qaW: *; + ); + check!(i64, u8=> uident; qv: u8=> qa: u8=> qaW: *; + v: -1, !RU; + ); + check!(i64, u16=> uident; qv: u16=> qa: u16=> qaW: *; + v: -1, !RU; + ); + check!(i64, u32=> uident; qv: u32=> qa: u32=> qaW: *; + v: -1, !RU; + ); + check!(i64, u64=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + for_bitness! { + 32 { + check!(i64, isize=> sident; qv: isize=> qa: isize=> qaW: *; + v: -2_147_483_649, !RU; v: 2_147_483_648, !RO; + ); + check!(i64, usize=> uident; qv: usize=> qa: usize=> qaW: *; + v: -1, !RU; v: 4_294_967_296, !RO; + ); + } + 64 { + check!(i64, isize=> sident; qv: *; qa: *; qaW: *; + ); + check!(i64, usize=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + } + } +} + +#[test] +fn test_u8() { + check!(u8, i8=> uident; qv: +i8=> qa: +i8=> qaW: *; + v: 127; v: 128, !Of; + ); + check!(u8, i16=> uident; qv: *; qa: *; qaW: *); + check!(u8, i32=> uident; qv: *; qa: *; qaW: *); + check!(u8, i64=> uident; qv: *; qa: *; qaW: *); + check!(u8, u8=> uident; qv: *; qa: *; qaW: *); + check!(u8, u16=> uident; qv: *; qa: *; qaW: *); + check!(u8, u32=> uident; qv: *; qa: *; qaW: *); + check!(u8, u64=> uident; qv: *; qa: *; qaW: *); + check!(u8, isize=> uident; qv: *; qa: *; qaW: *); + check!(u8, usize=> uident; qv: *; qa: *; qaW: *); +} + +#[test] +fn test_u16() { + check!(u16, i8=> uident; qv: +i8=> qa: +i8=> qaW: *; + v: 128, !Of; + ); + check!(u16, i16=> uident; qv: +i16=> qa: +i16=> qaW: *; + v: 32_768, !Of; + ); + check!(u16, i32=> uident; qv: *; qa: *; qaW: *); + check!(u16, i64=> uident; qv: *; qa: *; qaW: *); + check!(u16, u8=> uident; qv: u8=> qa: u8=> qaW: *; + v: 256, !Of; + ); + check!(u16, u16=> uident; qv: *; qa: *; qaW: *); + check!(u16, u32=> uident; qv: *; qa: *; qaW: *); + check!(u16, u64=> uident; qv: *; qa: *; qaW: *); + check!(u16, isize=> uident; qv: *; qa: *; qaW: *); + check!(u16, usize=> uident; qv: *; qa: *; qaW: *); +} + +#[test] +fn test_u32() { + check!(u32, i8=> uident; qv: +i8=> qa: +i8=> qaW: *; + v: 128, !Of; + ); + check!(u32, i16=> uident; qv: +i16=> qa: +i16=> qaW: *; + v: 32_768, !Of; + ); + check!(u32, i32=> uident; qv: +i32=> qa: +i32=> qaW: *; + v: 2_147_483_648, !Of; + ); + check!(u32, i64=> uident; qv: *; qa: *; qaW: *); + check!(u32, u8=> uident; qv: u8=> qa: u8=> qaW: *; + v: 256, !Of; + ); + check!(u32, u16=> uident; qv: u16=> qa: u16=> qaW: *; + v: 65_536, !Of; + ); + check!(u32, u32=> uident; qv: *; qa: *; qaW: *); + check!(u32, u64=> uident; qv: *; qa: *; qaW: *); + for_bitness! { + 32 { + check!(u32, isize=> uident; qv: +isize=> qa: +isize=> qaW: *; + v: 2_147_483_647; v: 2_147_483_648, !Of; + ); + check!(u32, usize=> uident; qv: *; qa: *; qaW: *); + } + 64 { + check!(u32, isize=> uident; qv: *; qa: *; qaW: *); + check!(u32, usize=> uident; qv: *; qa: *; qaW: *); + } + } +} + +#[test] +fn test_u64() { + check!(u64, i8=> uident; qv: +i8=> qa: +i8=> qaW: *; + v: 128, !Of; + ); + check!(u64, i16=> uident; qv: +i16=> qa: +i16=> qaW: *; + v: 32_768, !Of; + ); + check!(u64, i32=> uident; qv: +i32=> qa: +i32=> qaW: *; + v: 2_147_483_648, !Of; + ); + check!(u64, i64=> uident; qv: +i64=> qa: +i64=> qaW: *; + v: 9_223_372_036_854_775_808, !Of; + ); + check!(u64, u8=> uident; qv: u8=> qa: u8=> qaW: *; + v: 256, !Of; + ); + check!(u64, u16=> uident; qv: u16=> qa: u16=> qaW: *; + v: 65_536, !Of; + ); + check!(u64, u32=> uident; qv: u32=> qa: u32=> qaW: *; + v: 4_294_967_296, !Of; + ); + check!(u64, u64=> uident; qv: *; qa: *; qaW: *); + for_bitness! { + 32 { + check!(u64, isize=> uident; qv: +isize=> qa: +isize=> qaW: *; + v: 2_147_483_648, !Of; + ); + check!(u64, usize=> uident; qv: usize=> qa: usize=> qaW: *; + v: 4_294_967_296, !Of; + ); + } + 64 { + check!(u64, isize=> uident; qv: +i64=> qa: +i64=> qaW: *; + v: 9_223_372_036_854_775_808, !Of; + ); + check!(u64, usize=> uident; qv: *; qa: *; qaW: *); + } + } +} + +#[test] +fn test_isize() { + check!(isize, i8=> sident; qv: i8=> qa: i8=> qaW: *; + v: -129, !RU; v: 128, !RO; + ); + check!(isize, i16=> sident; qv: i16=> qa: i16=> qaW: *; + v: -32_769, !RU; v: 32_768, !RO; + ); + check!(isize, u8=> uident; qv: u8=> qa: u8=> qaW: *; + v: -1, !RU; v: 256, !RO; + ); + check!(isize, u16=> uident; qv: u16=> qa: u16=> qaW: *; + v: -1, !RU; v: 65_536, !RO; + ); + check!(isize, isize=> sident; qv: *; qa: *; qaW: *); + for_bitness! { + 32 { + check!(isize, i32=> sident; qv: *; qa: *; qaW: *); + check!(isize, i64=> sident; qv: *; qa: *; qaW: *); + check!(isize, u32=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(isize, u64=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(isize, usize=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + } + 64 { + check!(isize, i32=> sident; qv: *; qa: *; qaW: *); + check!(isize, i64=> sident; qv: *; qa: *; qaW: *); + check!(isize, u32=> uident; qv: u32=> qa: u32=> qaW: *; + v: -1, !RU; v: 4_294_967_296, !RO; + ); + check!(isize, u64=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + check!(isize, usize=> uident; qv: +; qa: +; qaW: *; + v: -1, !Uf; + ); + } + } +} + +#[test] +fn test_usize() { + check!(usize, i8=> uident; qv: +i8=> qa: +i8=> qaW: *; + v: 128, !Of; + ); + check!(usize, i16=> uident; qv: +i16=> qa: +i16=> qaW: *; + v: 32_768, !Of; + ); + check!(usize, u8=> uident; qv: u8=> qa: u8=> qaW: *; + v: 256, !Of; + ); + check!(usize, u16=> uident; qv: u16=> qa: u16=> qaW: *; + v: 65_536, !Of; + ); + check!(usize, usize=> uident; qv: *; qa: *; qaW: *); + for_bitness! { + 32 { + check!(usize, i32=> uident; qv: +i32=> qa: +i32=> qaW: *); + check!(usize, i64=> uident; qv: *; qa: *; qaW: *); + check!(usize, u32=> uident; qv: *; qa: *; qaW: *); + check!(usize, u64=> uident; qv: *; qa: *; qaW: *); + check!(usize, isize=> uident; qv: +isize=> qa: +isize=> qaW: *); + } + 64 { + check!(usize, i32=> uident; qv: +i32=> qa: +i32=> qaW: *); + check!(usize, i64=> uident; qv: +i64=> qa: +i64=> qaW: *); + check!(usize, u32=> uident; qv: u32=> qa: u32=> qaW: *; + v: 4_294_967_296, !Of; + ); + check!(usize, u64=> uident; qv: *; qa: *; qaW: *); + check!(usize, isize=> uident; qv: +isize=> qa: +isize=> qaW: *); + } + } +} + +#[test] +fn test_i_to_f() { + check!(i8, f32=> sident; qv: *; qa: *); + check!(i16, f32=> sident; qv: *; qa: *); + check!(i32, f32=> sident; qv: (+-16_777_216); qa: *; + v: -16_777_217, !RU; v: 16_777_217, !RO; + ); + check!(i64, f32=> sident; qv: (+-16_777_216); qa: *; + v: -16_777_217, !RU; v: 16_777_217, !RO; + ); + check!(isize, f32=> sident; qv: (+-16_777_216); qa: *; + v: -16_777_217, !RU; v: 16_777_217, !RO; + ); + + check!(u8, f32=> uident; qv: *; qa: *); + check!(u16, f32=> uident; qv: *; qa: *); + check!(u32, f32=> uident; qv: (, 16_777_216); qa: *; + v: 16_777_217, !Of; + ); + check!(u64, f32=> uident; qv: (, 16_777_216); qa: *; + v: 16_777_217, !Of; + ); + check!(usize, f32=> uident; qv: (, 16_777_216); qa: *; + v: 16_777_217, !Of; + ); + + check!(i8, f64=> sident; qv: *; qa: *); + check!(i16, f64=> sident; qv: *; qa: *); + check!(i32, f64=> sident; qv: *; qa: *); + check!(i64, f64=> sident; qv: (+-9_007_199_254_740_992); qa: *; + v: -9_007_199_254_740_993, !RU; v: 9_007_199_254_740_993, !RO; + ); + for_bitness! { + 32 { + check!(isize, f64=> sident; qv: *; qa: *); + } + 64 { + check!(i64, f64=> sident; qv: (+-9_007_199_254_740_992); qa: *; + v: -9_007_199_254_740_993, !RU; v: 9_007_199_254_740_993, !RO; + ); + } + } + + check!(u8, f64=> uident; qv: *; qa: *); + check!(u16, f64=> uident; qv: *; qa: *); + check!(u32, f64=> uident; qv: *; qa: *); + check!(u64, f64=> uident; qv: (, 9_007_199_254_740_992); qa: *; + v: 9_007_199_254_740_993, !Of; + ); + for_bitness! { + 32 { + check!(usize, f64=> uident; qv: *; qa: *); + } + 64 { + check!(u64, f64=> uident; qv: (, 9_007_199_254_740_992); qa: *; + v: 9_007_199_254_740_993, !Of; + ); + } + } +} diff --git a/src/vendor/conv/tests/unwraps.rs b/src/vendor/conv/tests/unwraps.rs new file mode 100644 index 0000000000..921dec7c4e --- /dev/null +++ b/src/vendor/conv/tests/unwraps.rs @@ -0,0 +1,31 @@ +extern crate conv; + +#[macro_use] mod util; + +use conv::*; + +macro_rules! cty { + ($e:expr, $t:ty) => { + { let v: $t = $e; v } + }; +} + +#[test] +fn test_unwraps() { + assert_eq!(cty!(0i16.value_into().unwrap(), i32), 0); + assert_eq!(cty!(127i16.value_into().unwrap(), i8), 127); + assert_eq!(cty!(128i16.value_into().unwrap_or_saturate(), i8), 127); + assert_eq!(cty!(128i16.approx().unwrap_or_saturate(), i8), 127); + assert_eq!(cty!(128i16.approx_by::().unwrap_or_saturate(), i8), -128); + + assert_eq!(cty!(16_777_216i32.value_into().unwrap(), f32), 16_777_216.0); + assert_eq!(cty!(16_777_216i32.value_into().unwrap_or_inf(), f32), 16_777_216.0); + assert_eq!(cty!(16_777_217i32.value_into().unwrap_or_inf(), f32), std::f32::INFINITY); + assert_eq!(cty!((-16_777_217i32).value_into().unwrap_or_inf(), f32), std::f32::NEG_INFINITY); + + assert_eq!(cty!(16_777_216i32.value_into().unwrap_or_invalid(), f32), 16_777_216.0); + assert!(cty!(16_777_217i32.value_into().unwrap_or_invalid(), f32).is_nan()); + assert!(cty!((-16_777_217i32).value_into().unwrap_or_invalid(), f32).is_nan()); + + assert_eq!(cty!(0u8.value_into().unwrap_ok(), u16), 0); +} diff --git a/src/vendor/conv/tests/use_in_generics.rs b/src/vendor/conv/tests/use_in_generics.rs new file mode 100644 index 0000000000..9400dacc5c --- /dev/null +++ b/src/vendor/conv/tests/use_in_generics.rs @@ -0,0 +1,14 @@ +//! Are conversions easily usable in generic code? +extern crate conv; + +use conv::prelude::*; + +#[test] +fn test_generic_unwrap() { + fn do_conv(t: T) -> U + where T: ValueInto { + t.value_into().unwrap() + } + + assert_eq!({let x: u8 = do_conv(42i32); x}, 42u8); +} diff --git a/src/vendor/conv/tests/util/mod.rs b/src/vendor/conv/tests/util/mod.rs new file mode 100644 index 0000000000..9bb5abfc58 --- /dev/null +++ b/src/vendor/conv/tests/util/mod.rs @@ -0,0 +1,509 @@ +macro_rules! SL { + ($($tts:tt)*) => { stringify!($($tts)*) }; +} + +macro_rules! as_expr { + ($e:expr) => {$e}; +} + +macro_rules! check { + (@ $from:ty, $to:ty=> $(;)*) => {}; + + (@ $from:ty, $to:ty=> cident; $($tail:tt)*) => { + check!(@ $from, $to=> v: '\x00';); + check!(@ $from, $to=> v: '\x01';); + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> uident; $($tail:tt)*) => { + check!(@ $from, $to=> v: 0;); + check!(@ $from, $to=> v: 1;); + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> sident; $($tail:tt)*) => { + check!(@ $from, $to=> v: -1;); + check!(@ $from, $to=> v: 0;); + check!(@ $from, $to=> v: 1;); + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> fident; $($tail:tt)*) => { + check!(@ $from, $to=> v: -1.0;); + check!(@ $from, $to=> v: 0.0;); + check!(@ $from, $to=> v: 1.0;); + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> uidenta; $($tail:tt)*) => { + check!(@ $from, $to=> a: 0.0;); + check!(@ $from, $to=> a: 1.0;); + + check!(@ $from, $to=> aRTN: 0.00, 0;); + check!(@ $from, $to=> aRTN: 0.25, 0;); + check!(@ $from, $to=> aRTN: 0.50, 1;); + check!(@ $from, $to=> aRTN: 0.75, 1;); + check!(@ $from, $to=> aRTN: 1.00, 1;); + + check!(@ $from, $to=> aRNI: 0.00, 0;); + check!(@ $from, $to=> aRNI: 0.25, 0;); + check!(@ $from, $to=> aRNI: 0.50, 0;); + check!(@ $from, $to=> aRNI: 0.75, 0;); + check!(@ $from, $to=> aRNI: 1.00, 1;); + + check!(@ $from, $to=> aRPI: 0.00, 0;); + check!(@ $from, $to=> aRPI: 0.25, 1;); + check!(@ $from, $to=> aRPI: 0.50, 1;); + check!(@ $from, $to=> aRPI: 0.75, 1;); + check!(@ $from, $to=> aRPI: 1.00, 1;); + + check!(@ $from, $to=> aRTZ: 0.00, 0;); + check!(@ $from, $to=> aRTZ: 0.25, 0;); + check!(@ $from, $to=> aRTZ: 0.50, 0;); + check!(@ $from, $to=> aRTZ: 0.75, 0;); + check!(@ $from, $to=> aRTZ: 1.00, 1;); + + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> sidenta; $($tail:tt)*) => { + check!(@ $from, $to=> a: -1.0;); + check!(@ $from, $to=> a: 0.0;); + check!(@ $from, $to=> a: 1.0;); + + check!(@ $from, $to=> aRTN: -1.00, -1;); + check!(@ $from, $to=> aRTN: -0.75, -1;); + check!(@ $from, $to=> aRTN: -0.50, -1;); + check!(@ $from, $to=> aRTN: -0.25, 0;); + check!(@ $from, $to=> aRTN: 0.00, 0;); + check!(@ $from, $to=> aRTN: 0.25, 0;); + check!(@ $from, $to=> aRTN: 0.50, 1;); + check!(@ $from, $to=> aRTN: 0.75, 1;); + check!(@ $from, $to=> aRTN: 1.00, 1;); + + check!(@ $from, $to=> aRNI: -1.00, -1;); + check!(@ $from, $to=> aRNI: -0.75, -1;); + check!(@ $from, $to=> aRNI: -0.50, -1;); + check!(@ $from, $to=> aRNI: -0.25, -1;); + check!(@ $from, $to=> aRNI: 0.00, 0;); + check!(@ $from, $to=> aRNI: 0.25, 0;); + check!(@ $from, $to=> aRNI: 0.50, 0;); + check!(@ $from, $to=> aRNI: 0.75, 0;); + check!(@ $from, $to=> aRNI: 1.00, 1;); + + check!(@ $from, $to=> aRPI: -1.00, -1;); + check!(@ $from, $to=> aRPI: -0.75, 0;); + check!(@ $from, $to=> aRPI: -0.50, 0;); + check!(@ $from, $to=> aRPI: -0.25, 0;); + check!(@ $from, $to=> aRPI: 0.00, 0;); + check!(@ $from, $to=> aRPI: 0.25, 1;); + check!(@ $from, $to=> aRPI: 0.50, 1;); + check!(@ $from, $to=> aRPI: 0.75, 1;); + check!(@ $from, $to=> aRPI: 1.00, 1;); + + check!(@ $from, $to=> aRTZ: -1.00, -1;); + check!(@ $from, $to=> aRTZ: -0.75, 0;); + check!(@ $from, $to=> aRTZ: -0.50, 0;); + check!(@ $from, $to=> aRTZ: -0.25, 0;); + check!(@ $from, $to=> aRTZ: 0.00, 0;); + check!(@ $from, $to=> aRTZ: 0.25, 0;); + check!(@ $from, $to=> aRTZ: 0.50, 0;); + check!(@ $from, $to=> aRTZ: 0.75, 0;); + check!(@ $from, $to=> aRTZ: 1.00, 1;); + + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> fidenta; $($tail:tt)*) => { + check!(@ $from, $to=> a: -1.0;); + check!(@ $from, $to=> a: 0.0;); + check!(@ $from, $to=> a: 1.0;); + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> v: $src:expr, !$dst:expr; $($tail:tt)*) => { + { + println!("? {} => {}, v: {}, !{}", SL!($from), SL!($to), SL!($src), SL!($dst)); + let src: $from = $src; + let dst: Result<$to, _> = src.value_into(); + assert_eq!(dst, Err($dst(src))); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> v: $src:expr; $($tail:tt)*) => { + { + println!("? {} => {}, v: {}", SL!($from), SL!($to), SL!($src)); + let src: $from = $src; + let dst: Result<$to, _> = src.value_into(); + assert_eq!(dst, Ok($src as $to)); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qv: *; $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qv: *", SL!($from), SL!($to)); + + fn property(v: $from) -> bool { + let dst: Result<$to, _> = v.value_into(); + dst == Ok(v as $to) + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qv {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qv: (+-$bound:expr); $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qv: (+- {})", SL!($from), SL!($to), SL!($bound)); + + fn property(v: $from) -> bool { + let dst: Result<$to, conv::FloatError<_>> = v.value_into().map_err(From::from); + if !(-$bound as $from <= v) { + dst == Err(conv::FloatError::NegOverflow(v)) + } else if !(v <= $bound as $from) { + dst == Err(conv::FloatError::PosOverflow(v)) + } else { + dst == Ok(v as $to) + } + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qv {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qv: (, $bound:expr); $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qv: (, {})", SL!($from), SL!($to), SL!($bound)); + + fn property(v: $from) -> bool { + let dst: Result<$to, conv::FloatError<_>> = v.value_into().map_err(From::from); + if !(v <= $bound as $from) { + dst == Err(conv::FloatError::PosOverflow(v)) + } else { + dst == Ok(v as $to) + } + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qv {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qv: +; $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qv: +", SL!($from), SL!($to)); + + fn property(v: $from) -> bool { + let dst: Result<$to, conv::FloatError<_>> = v.value_into().map_err(From::from); + if !(0 <= v) { + dst == Err(conv::FloatError::NegOverflow(v)) + } else { + dst == Ok(v as $to) + } + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qv {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qv: +$max:ty=> $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qv: +{}", SL!($from), SL!($to), SL!($max)); + + fn property(v: $from) -> bool { + let dst: Result<$to, conv::FloatError<_>> = v.value_into().map_err(From::from); + if !(v <= <$max>::max_value() as $from) { + dst == Err(conv::FloatError::PosOverflow(v)) + } else { + dst == Ok(v as $to) + } + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qv {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qv: $bound:ty=> $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qv: {}", SL!($from), SL!($to), SL!($bound)); + + fn property(v: $from) -> bool { + let dst: Result<$to, conv::FloatError<_>> = v.value_into().map_err(From::from); + if !(<$bound>::min_value() as $from <= v) { + dst == Err(conv::FloatError::NegOverflow(v)) + } else if !(v <= <$bound>::max_value() as $from) { + dst == Err(conv::FloatError::PosOverflow(v)) + } else { + dst == Ok(v as $to) + } + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qv {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qv: $min:ty, $max:ty=> $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qv: {}, {}", SL!($from), SL!($to), SL!($min), SL!($max)); + + fn property(v: $from) -> bool { + let dst: Result<$to, conv::FloatError<_>> = v.value_into().map_err(From::from); + if !(<$min>::min_value() as $from <= v) { + dst == Err(conv::FloatError::NegOverflow(v)) + } else if !(v <= <$max>::max_value() as $from) { + dst == Err(conv::FloatError::PosOverflow(v)) + } else { + dst == Ok(v as $to) + } + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qv {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> a: $src:expr, !$dst:expr; $($tail:tt)*) => { + { + println!("? {} => {}, a: {}, !{}", SL!($from), SL!($to), SL!($src), SL!($dst)); + let src: $from = $src; + let dst: Result<$to, _> = src.approx_as(); + assert_eq!(dst, Err($dst(src))); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> a: $src:expr, $dst:expr; $($tail:tt)*) => { + { + println!("? {} => {}, a: {}, {}", SL!($from), SL!($to), SL!($src), SL!($dst)); + let src: $from = $src; + let dst: Result<$to, _> = src.approx_as(); + assert_eq!(dst, Ok($dst)); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> a: $src:expr; $($tail:tt)*) => { + { + println!("? {} => {}, a: {}", SL!($from), SL!($to), SL!($src)); + let src: $from = $src; + let dst: Result<$to, _> = src.approx_as(); + assert_eq!(dst, Ok($src as $to)); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qa: *; $($tail:tt)*) => { + { + println!("? {} => {}, qa: *", SL!($from), SL!($to)); + extern crate quickcheck; + + fn property(v: $from) -> bool { + let dst: Result<$to, _> = v.approx_as(); + dst == Ok(v as $to) + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qa {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qa: +; $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qa: +", SL!($from), SL!($to)); + + fn property(v: $from) -> bool { + let dst: Result<$to, conv::FloatError<_>> = v.approx_as().map_err(From::from); + if !(0 <= v) { + dst == Err(conv::FloatError::NegOverflow(v)) + } else { + dst == Ok(v as $to) + } + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qa {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qa: +$max:ty=> $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qa: +{}", SL!($from), SL!($to), SL!($max)); + + fn property(v: $from) -> bool { + let dst: Result<$to, conv::FloatError<_>> = v.approx_as().map_err(From::from); + if !(v <= <$max>::max_value() as $from) { + dst == Err(conv::FloatError::PosOverflow(v)) + } else { + dst == Ok(v as $to) + } + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qa {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qa: $bound:ty=> $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qa: {}", SL!($from), SL!($to), SL!($bound)); + + fn property(v: $from) -> bool { + let dst: Result<$to, conv::FloatError<_>> = v.approx_as().map_err(From::from); + if !(<$bound>::min_value() as $from <= v) { + dst == Err(conv::FloatError::NegOverflow(v)) + } else if !(v <= <$bound>::max_value() as $from) { + dst == Err(conv::FloatError::PosOverflow(v)) + } else { + dst == Ok(v as $to) + } + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qa {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> qaW: *; $($tail:tt)*) => { + { + extern crate quickcheck; + println!("? {} => {}, qaW: *", SL!($from), SL!($to)); + + fn property(v: $from) -> bool { + let dst: Result<$to, _> = v.approx_as_by::<_, Wrapping>(); + dst == Ok(v as $to) + } + + let mut qc = quickcheck::QuickCheck::new(); + match qc.quicktest(property as fn($from) -> bool) { + Ok(_) => (), + Err(err) => panic!("qaW {:?}", err) + } + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> aRTN: $src:expr, $dst:expr; $($tail:tt)*) => { + { + println!("? {} => {}, aRTN: {}, {}", SL!($from), SL!($to), SL!($src), SL!($dst)); + let src: $from = $src; + let dst: Result<$to, _> = src.approx_by::(); + assert_eq!(dst, Ok($dst)); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> aRNI: $src:expr, $dst:expr; $($tail:tt)*) => { + { + println!("? {} => {}, aRNI: {}, {}", SL!($from), SL!($to), SL!($src), SL!($dst)); + let src: $from = $src; + let dst: Result<$to, _> = src.approx_by::(); + assert_eq!(dst, Ok($dst)); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> aRPI: $src:expr, $dst:expr; $($tail:tt)*) => { + { + println!("? {} => {}, aRPI: {}, {}", SL!($from), SL!($to), SL!($src), SL!($dst)); + let src: $from = $src; + let dst: Result<$to, _> = src.approx_by::(); + assert_eq!(dst, Ok($dst)); + } + check!(@ $from, $to=> $($tail)*); + }; + + (@ $from:ty, $to:ty=> aRTZ: $src:expr, $dst:expr; $($tail:tt)*) => { + { + println!("? {} => {}, aRTZ: {}, {}", SL!($from), SL!($to), SL!($src), SL!($dst)); + let src: $from = $src; + let dst: Result<$to, _> = src.approx_by::(); + assert_eq!(dst, Ok($dst)); + } + check!(@ $from, $to=> $($tail)*); + }; + + ($from:ty, $to:ty=> $($tail:tt)*) => { + check! { @ $from, $to=> $($tail)*; } + }; +} + +macro_rules! for_bitness { + (32 {$($bits32:tt)*} 64 {$($bits64:tt)*}) => { + as_expr!( + { + #[cfg(target_pointer_width="32")] + fn for_bitness() { + $($bits32)* + } + + #[cfg(target_pointer_width="64")] + fn for_bitness() { + $($bits64)* + } + + for_bitness() + } + ) + }; +} diff --git a/src/vendor/cssparser-macros/.cargo-checksum.json b/src/vendor/cssparser-macros/.cargo-checksum.json new file mode 100644 index 0000000000..da4ef0d78d --- /dev/null +++ b/src/vendor/cssparser-macros/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"3080a72e897022e23ede1dd38cf28049e74a077518cc25d91f559c3b575aa3e3","lib.rs":"17331a3a0b0be3c05fe360e08538baf26b343ae61feec9062429a34a1c1eb1e2"},"package":"079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df"} \ No newline at end of file diff --git a/src/vendor/cssparser-macros/.cargo-ok b/src/vendor/cssparser-macros/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/cssparser-macros/Cargo.toml b/src/vendor/cssparser-macros/Cargo.toml new file mode 100644 index 0000000000..4770b4b11f --- /dev/null +++ b/src/vendor/cssparser-macros/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cssparser-macros" +version = "0.3.0" +authors = ["Simon Sapin "] +description = "Procedural macros for cssparser" +documentation = "https://docs.rs/cssparser-macros/" +repository = "https://github.com/servo/rust-cssparser" +license = "MPL-2.0" + +[lib] +path = "lib.rs" +proc-macro = true + +[dependencies] +procedural-masquerade = {path = "../procedural-masquerade", version = "0.1"} +phf_codegen = "0.7" +quote = "0.3.14" +syn = {version = "0.11.8", features = ["full"]} diff --git a/src/vendor/cssparser-macros/lib.rs b/src/vendor/cssparser-macros/lib.rs new file mode 100644 index 0000000000..80d3e54574 --- /dev/null +++ b/src/vendor/cssparser-macros/lib.rs @@ -0,0 +1,97 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#[macro_use] extern crate procedural_masquerade; +extern crate phf_codegen; +extern crate proc_macro; +#[macro_use] extern crate quote; +extern crate syn; + +use std::ascii::AsciiExt; + +define_proc_macros! { + /// Input: the arms of a `match` expression. + /// + /// Output: a `MAX_LENGTH` constant with the length of the longest string pattern. + /// + /// Panic if the arms contain non-string patterns, + /// or string patterns that contains ASCII uppercase letters. + #[allow(non_snake_case)] + pub fn cssparser_internal__assert_ascii_lowercase__max_len(input: &str) -> String { + let expr = syn::parse_expr(&format!("match x {{ {} }}", input)).unwrap(); + let arms = match expr { + syn::Expr { node: syn::ExprKind::Match(_, ref arms), .. } => arms, + _ => panic!("expected a match expression, got {:?}", expr) + }; + max_len(arms.iter().flat_map(|arm| &arm.pats).filter_map(|pattern| { + let expr = match *pattern { + syn::Pat::Lit(ref expr) => expr, + syn::Pat::Wild => return None, + _ => panic!("expected string or wildcard pattern, got {:?}", pattern) + }; + match **expr { + syn::Expr { node: syn::ExprKind::Lit(syn::Lit::Str(ref string, _)), .. } => { + assert_eq!(*string, string.to_ascii_lowercase(), + "string patterns must be given in ASCII lowercase"); + Some(string.len()) + } + _ => panic!("expected string pattern, got {:?}", expr) + } + })) + } + + /// Input: string literals with no separator + /// + /// Output: a `MAX_LENGTH` constant with the length of the longest string. + #[allow(non_snake_case)] + pub fn cssparser_internal__max_len(input: &str) -> String { + max_len(syn::parse_token_trees(input).unwrap().iter().map(|tt| string_literal(tt).len())) + } + + /// Input: parsed as token trees. The first TT is a type. (Can be wrapped in parens.) + /// following TTs are grouped in pairs, each pair being a key as a string literal + /// and the corresponding value as a const expression. + /// + /// Output: a rust-phf map, with keys ASCII-lowercased: + /// ```text + /// static MAP: &'static ::cssparser::phf::Map<&'static str, $ValueType> = …; + /// ``` + #[allow(non_snake_case)] + pub fn cssparser_internal__phf_map(input: &str) -> String { + let token_trees = syn::parse_token_trees(input).unwrap(); + let value_type = &token_trees[0]; + let pairs: Vec<_> = token_trees[1..].chunks(2).map(|chunk| { + let key = string_literal(&chunk[0]); + let value = &chunk[1]; + (key.to_ascii_lowercase(), quote!(#value).to_string()) + }).collect(); + + let mut map = phf_codegen::Map::new(); + map.phf_path("::cssparser::_internal__phf"); + for &(ref key, ref value) in &pairs { + map.entry(&**key, &**value); + } + + let mut tokens = quote! { + static MAP: ::cssparser::_internal__phf::Map<&'static str, #value_type> = + }; + let mut initializer_bytes = Vec::new(); + map.build(&mut initializer_bytes).unwrap(); + tokens.append(::std::str::from_utf8(&initializer_bytes).unwrap()); + tokens.append(";"); + tokens.into_string() + } +} + +fn max_len>(lengths: I) -> String { + let max_length = lengths.max().expect("expected at least one string"); + quote!( const MAX_LENGTH: usize = #max_length; ).into_string() +} + +fn string_literal(token: &syn::TokenTree) -> &str { + match *token { + syn::TokenTree::Token(syn::Token::Literal(syn::Lit::Str(ref string, _))) => string, + _ => panic!("expected string literal, got {:?}", token) + } +} diff --git a/src/vendor/cssparser/.cargo-checksum.json b/src/vendor/cssparser/.cargo-checksum.json new file mode 100644 index 0000000000..0d95bb8b9e --- /dev/null +++ b/src/vendor/cssparser/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"0b944a5f5edfe3dca4796907e63f537a0e919630df66dca7945d51ba4e3c8051",".travis.yml":"f1fb4b65964c81bc1240544267ea334f554ca38ae7a74d57066f4d47d2b5d568","Cargo.toml":"c6aa6d96263fb5247a4b3d3bf5be16644373aa4b956e166647e23f4527aac7b8","Cargo.toml.orig":"bc6ad4db67d62cf6101527073857fa4abd86cb76f4cbe7e91727aff917955246","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"9afe084d70a5d9396674a2624012d6ac749df35f81e322d2d75b042bf208f523","build.rs":"950bcc47a196f07f99f59637c28cc65e02a885130011f90a2b2608248b4724a2","build/match_byte.rs":"89e8b941af74df2c204abf808672d3ff278bdec75abc918c41a843260b924677","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/big-data-url.css":"04a8f6197ea1181123bca48bd1ebd016268e1da40f01b8f21055814e44bf62b8","src/color.rs":"33927641dc64d3bafba20f65d17d63d39c565013a07393c86cdf29f93dfc536b","src/css-parsing-tests/An+B.json":"d24559c1dad55d3da9d1fca29383edefdfc6046988435d6388a9bc0f28850257","src/css-parsing-tests/LICENSE":"5f9019a92f4aa8917aadc8e035aa673c2c1bf08d5ca2e535a0564106599f44eb","src/css-parsing-tests/README.rst":"775c5f957dd1d46d3ce954aaad219c821d2b64b4a9fb93c42e9737a11131ca44","src/css-parsing-tests/color3.json":"814179b0a6a9d3a935e635f485f1eaec9880129c5a0d09c0cda4efcea69649fb","src/css-parsing-tests/color3_hsl.json":"61c3b7d8d5ae02f94769d64458e0dd29e4c22c5068e5ea44040d14b88f9616b0","src/css-parsing-tests/color3_keywords.json":"95609bf9fe762c316878a30f371fa375a2e51c21a6fda24fa188a95cd9118f5c","src/css-parsing-tests/component_value_list.json":"516f9495fe089fa669321660bc431d7884839da8fb73e45edcbcd98625cb09dc","src/css-parsing-tests/declaration_list.json":"0b85cc3f19e945f838432acbfb9edb003abea13debc4ea27bcdcef25d117eac5","src/css-parsing-tests/make_color3_hsl.py":"3df7dd908ee719f78fd097ab24622f75edd10fcb67514230c172cbfc842c8ab7","src/css-parsing-tests/make_color3_keywords.py":"66bccab3f1dea18698fcfd854be79b1fd1cd724dd487e25b1f057b522163aad2","src/css-parsing-tests/one_component_value.json":"8798017709002e14cf11e203c9d716f82d308ce6ba0f6e64ee4eea331b8485c6","src/css-parsing-tests/one_declaration.json":"a34c9da56edfff9e2e21615f059e141b0e878e90f794dc8fa58d65b47cd193ed","src/css-parsing-tests/one_rule.json":"88f7b1b6049be88e1e2827673b75fc9261986b216e8ee6bf09621fecbe274e3c","src/css-parsing-tests/rule_list.json":"97c45e80fb83abef149a4016c5625a74f053e7ad70a2ce5a95c02fce1c195686","src/css-parsing-tests/stylesheet.json":"05f1e10fc486bfbda2c059c313a74ff78c0063c0768b99737cab41969c0c87ce","src/css-parsing-tests/stylesheet_bytes.json":"890fd856a596e61f82cf7ed77920ffe95df89209fdb5ee0afe0b26bdfdb80a42","src/css-parsing-tests/urange.json":"62720b143ddf52508baad42921473dd69519aad6c1cd49f37f3f264dc29e1c13","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"ccc0f04541147d4fb90d3fe70591bacfb0c7030706c9be8fa60b80533e522bbc","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"0a5e68bd8a597403e184ebf34e69230ae1e955f92b16b99b3f67cf8730a180a9","src/parser.rs":"a41b1d885389d34b4d81176f844ae3c4100e621628dd50d7348c42b08cdd13ae","src/rules_and_declarations.rs":"6b66a986e411a56998546ab0e64de5285df3368d7c4018c7230a1b6cf6bcc532","src/serializer.rs":"13bf417e747b742576402543e25d7bdf65aa5dd6971455eb9d355169d2182f89","src/tests.rs":"80e4fec507258fe4e63a590f842f3213b44418cd69d755f78f938894966037db","src/tokenizer.rs":"4ad65f6f96fe7162c10fc6a1451c2ce8670b47b6a520e51382a952246515a217","src/unicode_range.rs":"f1f1dee3cc4efb6647557c6166aa5a93354fc0ee9beba53c1d63427beffe5a8d"},"package":"ef6124306e5ebc5ab11891d063aeafdd0cdc308079b708c8b566125f3680292b"} \ No newline at end of file diff --git a/src/vendor/cssparser/.cargo-ok b/src/vendor/cssparser/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/cssparser/.gitignore b/src/vendor/cssparser/.gitignore new file mode 100644 index 0000000000..0b837eae33 --- /dev/null +++ b/src/vendor/cssparser/.gitignore @@ -0,0 +1,3 @@ +target +/Cargo.lock +/.cargo/config diff --git a/src/vendor/cssparser/.travis.yml b/src/vendor/cssparser/.travis.yml new file mode 100644 index 0000000000..2f0758b38d --- /dev/null +++ b/src/vendor/cssparser/.travis.yml @@ -0,0 +1,17 @@ +language: rust +rust: + - nightly + - beta + - stable + +script: + - cargo build --verbose + - cargo test --verbose + - cargo doc --verbose + - cargo test --features heapsize + - cargo test --features dummy_match_byte + - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo test --features bench; fi + - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo test --features "bench dummy_match_byte"; fi + +notifications: + webhooks: http://build.servo.org:54856/travis diff --git a/src/vendor/cssparser/Cargo.toml b/src/vendor/cssparser/Cargo.toml new file mode 100644 index 0000000000..f81b0dd387 --- /dev/null +++ b/src/vendor/cssparser/Cargo.toml @@ -0,0 +1,60 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "cssparser" +version = "0.13.7" +authors = ["Simon Sapin "] +build = "build.rs" +exclude = ["src/css-parsing-tests"] +description = "Rust implementation of CSS Syntax Level 3" +documentation = "https://docs.rs/cssparser/" +readme = "README.md" +keywords = ["css", "syntax", "parser"] +license = "MPL-2.0" +repository = "https://github.com/servo/rust-cssparser" +[dependencies.serde] +version = "0.9" +optional = true + +[dependencies.cssparser-macros] +version = "0.3" + +[dependencies.heapsize] +version = ">= 0.3, < 0.5" +optional = true + +[dependencies.procedural-masquerade] +version = "0.1" + +[dependencies.matches] +version = "0.1" + +[dependencies.phf] +version = "0.7" +[dev-dependencies.encoding_rs] +version = "0.5" + +[dev-dependencies.difference] +version = "1.0" + +[dev-dependencies.rustc-serialize] +version = "0.3" +[build-dependencies.quote] +version = "0.3" + +[build-dependencies.syn] +version = "0.11" + +[features] +dummy_match_byte = [] +bench = [] diff --git a/src/vendor/cssparser/Cargo.toml.orig b/src/vendor/cssparser/Cargo.toml.orig new file mode 100644 index 0000000000..0d88b8639f --- /dev/null +++ b/src/vendor/cssparser/Cargo.toml.orig @@ -0,0 +1,39 @@ +[package] + +name = "cssparser" +version = "0.13.7" +authors = [ "Simon Sapin " ] + +description = "Rust implementation of CSS Syntax Level 3" +documentation = "https://docs.rs/cssparser/" +repository = "https://github.com/servo/rust-cssparser" +readme = "README.md" +keywords = ["css", "syntax", "parser"] +license = "MPL-2.0" +build = "build.rs" + +exclude = ["src/css-parsing-tests"] + +[dev-dependencies] +rustc-serialize = "0.3" +difference = "1.0" +encoding_rs = "0.5" + +[dependencies] +cssparser-macros = {path = "./macros", version = "0.3"} +heapsize = {version = ">= 0.3, < 0.5", optional = true} +matches = "0.1" +phf = "0.7" +procedural-masquerade = {path = "./procedural-masquerade", version = "0.1"} +serde = {version = "0.9", optional = true} + +[build-dependencies] +syn = "0.11" +quote = "0.3" + +[features] +bench = [] +dummy_match_byte = [] + +[workspace] +members = [".", "./macros", "./procedural-masquerade"] diff --git a/src/vendor/cssparser/LICENSE b/src/vendor/cssparser/LICENSE new file mode 100644 index 0000000000..14e2f777f6 --- /dev/null +++ b/src/vendor/cssparser/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/src/vendor/cssparser/README.md b/src/vendor/cssparser/README.md new file mode 100644 index 0000000000..d926c038df --- /dev/null +++ b/src/vendor/cssparser/README.md @@ -0,0 +1,64 @@ +rust-cssparser +============== + +[![Build Status](https://travis-ci.org/servo/rust-cssparser.svg?branch=travis)](https://travis-ci.org/servo/rust-cssparser) + +[Documentation](https://docs.rs/cssparser/) + +Rust implementation of +[CSS Syntax Module Level 3](https://drafts.csswg.org/css-syntax/) + + +Overview +-------- + +Parsing CSS involves a series of steps: + +* When parsing from bytes, + (e.g. reading a file or fetching an URL from the network,) + detect the character encoding + (based on a `Content-Type` HTTP header, an `@charset` rule, a BOM, etc.) + and decode to Unicode text. + + rust-cssparser does not do this yet and just assumes UTF-8. + + This step is skipped when parsing from Unicode, e.g. in an HTML ` + + +# Usage + +The macro should be used to wrap an entire *single* `enum` or `struct` declaration, including its attributes (both `derive` and others). All derivation attributes which the macro does *not* recognise will be assumed to be custom, and treated accordingly. + +`custom_derive!` assumes that custom derivations are implemented as macros (of the same name). For example, here is a simple derivation macro: + +```rust +#[macro_use] extern crate custom_derive; + +trait TypeName { + fn type_name() -> &'static str; +} + +trait ReprType { + type Repr; +} + +macro_rules! TypeName { + (() $(pub)* enum $name:ident $($tail:tt)*) => { TypeName! { @impl $name } }; + (() $(pub)* struct $name:ident $($tail:tt)*) => { TypeName! { @impl $name } }; + + (@impl $name:ident) => { + impl TypeName for $name { + fn type_name() -> &'static str { stringify!($name) } + } + }; +} + +macro_rules! TryFrom { + (($repr:ty) $(pub)* enum $name:ident $($tail:tt)*) => { + impl ReprType for $name { + type Repr = $repr; + } + }; +} + +custom_derive! { + #[allow(dead_code)] + #[repr(u8)] + #[derive(Clone, Copy, Debug, TryFrom(u8), TypeName)] + enum Foo { A, B } +} + +fn main() { + let foo = Foo::B; + let v = foo as ::Repr; + let msg = format!("{}: {:?} ({:?})", Foo::type_name(), foo, v); + assert_eq!(msg, "Foo: B (1)"); +} +``` + +First, note that `custom_derive!` passes any arguments on the derivation attribute to the macro. In the case of attributes *without* any arguments, `()` is passed instead. + +Secondly, the macro is passed the entire item, *sans* attributes. It is the derivation macro's job to parse the item correctly. + +Third, each derivation macro is expected to result in zero or more items, not including the item itself. As a result, it is *not* possible to mutate the item in any way, or attach additional attributes to it. + +Finally, `@impl` is merely a trick to pack multiple, different functions into a single macro. The sequence has no special meaning; it is simply *distinct* from the usual invocation syntax. +*/ +#![cfg_attr(not(feature = "std"), no_std)] + +#[doc(hidden)] +#[macro_export] +macro_rules! custom_derive { + /* + + > **Convention**: a capture named `$fixed` is used for any part of a recursive rule that is needed in the terminal case, but is not actually being *used* for the recursive part. This avoids having to constantly repeat the full capture pattern (and makes changing it easier). + + # Primary Invocation Forms + + These need to catch any valid form of struct or enum. + + */ + ( + $(#[$($attrs:tt)*])* + enum $($it:tt)* + ) => { + custom_derive! { + @split_attrs + ($(#[$($attrs)*],)*), (), (), + (enum $($it)*) + } + }; + + ( + $(#[$($attrs:tt)*])* + pub $($it:tt)* + ) => { + custom_derive! { + @split_attrs + ($(#[$($attrs)*],)*), (), (), + (pub $($it)*) + } + }; + + ( + $(#[$($attrs:tt)*])* + struct $($it:tt)* + ) => { + custom_derive! { + @split_attrs + ($(#[$($attrs)*],)*), (), (), + (struct $($it)*) + } + }; + + /* + + # `@split_attrs` + + This is responsible for dividing all attributes on an item into two groups: + + - `#[derive(...)]` + - Everything else. + + As part of this, it also explodes `#[derive(A, B(..), C, ...)]` into `A, B(..), C, ...`. This is to simplify the next stage. + + */ + ( + @split_attrs + (), + $non_derives:tt, + $derives:tt, + $it:tt + ) => { + custom_derive! { + @split_derive_attrs + { $non_derives, $it }, + $derives, + (), + () + } + }; + + ( + @split_attrs + (#[derive($($new_drv:ident $(($($new_drv_args:tt)*))*),* $(,)*)], $(#[$($attrs:tt)*],)*), + $non_derives:tt, + ($($derives:ident,)*), + $it:tt + ) => { + custom_derive! { + @split_attrs + ($(#[$($attrs)*],)*), + $non_derives, + ($($derives,)* $($new_drv $(($($new_drv_args)*))*,)*), + $it + } + }; + + ( + @split_attrs + (#[$new_attr:meta], $(#[$($attrs:tt)*],)*), + ($($non_derives:tt)*), + $derives:tt, + $it:tt + ) => { + custom_derive! { + @split_attrs + ($(#[$($attrs)*],)*), + ($($non_derives)* #[$new_attr],), + $derives, + $it + } + }; + + /* + + # `@split_derive_attrs` + + This is responsible for taking the list of derivation attributes and splitting them into "built-in" and "custom" groups. + + The list of built-in derives currently supported is: Clone, Hash, RustcEncodable, RustcDecodable, PartialEq, Eq, PartialOrd, Ord, Debug, Default, Send, Sync, Copy. + + Anything not on that list is considered "custom". + + And yes, as far as I can see, we *have* to have a separate rule for each of those. What I wouldn't give for an alternation pattern... + */ + + (@split_derive_attrs + { ($(#[$($non_derives:tt)*],)*), ($($it:tt)*) }, + (), (), ($($user_drvs:tt)*) + ) => { + custom_derive! { + @as_item + $(#[$($non_derives)*])* + $($it)* + } + + custom_derive! { + @expand_user_drvs + ($($user_drvs)*), ($($it)*) + } + }; + + (@split_derive_attrs + { ($(#[$($non_derives:tt)*],)*), ($($it:tt)*) }, + (), ($($bi_drvs:ident,)+), ($($user_drvs:tt)*) + ) => { + custom_derive! { + @as_item + #[derive($($bi_drvs,)+)] + $(#[$($non_derives)*])* + $($it)* + } + + custom_derive! { + @expand_user_drvs + ($($user_drvs)*), ($($it)*) + } + }; + + (@split_derive_attrs + $fixed:tt, + (Hash, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* Hash,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (Clone, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* Clone,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (RustcEncodable, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* RustcEncodable,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (RustcDecodable, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* RustcDecodable,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (PartialEq, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* PartialEq,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (Eq, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* Eq,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (PartialOrd, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* PartialOrd,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (Ord, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* Ord,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (Debug, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* Debug,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (Default, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* Default,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (Send ,$($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* Send,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (Sync, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* Sync,), $user_drvs + } + }; + + (@split_derive_attrs + $fixed:tt, + (Copy, $($tail:tt)*), ($($bi_drvs:ident,)*), $user_drvs:tt + ) => { + custom_derive! { + @split_derive_attrs + $fixed, + ($($tail)*), ($($bi_drvs,)* Copy,), $user_drvs + } + }; + + /* + + ## Custom Derivations + + Now we can handle the custom derivations. There are two forms we care about: those *with* an argument, and those *without*. + + The *reason* we care is that, in order to simplify the derivation macros, we want to detect the argument-less case and generate an empty pair of parens. + + */ + (@split_derive_attrs + $fixed:tt, + ($new_user:ident, $($tail:tt)*), $bi_drvs:tt, ($($user_drvs:tt)*) + ) => { + custom_derive! { + @split_derive_attrs + $fixed, ($($tail)*), $bi_drvs, ($($user_drvs)* $new_user(),) + } + }; + + (@split_derive_attrs + $fixed:tt, + ($new_user:ident ($($new_user_args:tt)*), $($tail:tt)*), $bi_drvs:tt, ($($user_drvs:tt)*) + ) => { + custom_derive! { + @split_derive_attrs + $fixed, ($($tail)*), $bi_drvs, ($($user_drvs)* $new_user($($new_user_args)*),) + } + }; + + /* + + # `@expand_user_drvs` + + Finally, we have a recursive rule for expanding user derivations. This is basically just using the derivation name as a macro identifier. + + This *has* to be recursive because we need to expand two independent repetition sequences simultaneously, and this causes `macro_rules!` to throw a wobbly. Don't want that. So, recursive it is. + + */ + (@expand_user_drvs + (), ($($it:tt)*) + ) => {}; + + (@expand_user_drvs + ($user_drv:ident $arg:tt, $($tail:tt)*), ($($it:tt)*) + ) => { + $user_drv! { $arg $($it)* } + custom_derive! { + @expand_user_drvs + ($($tail)*), ($($it)*) + } + }; + + /* + + # Miscellaneous Rules + + */ + (@as_item $($i:item)*) => {$($i)*}; +} diff --git a/src/vendor/custom_derive/tests/empty_bi_derives.rs b/src/vendor/custom_derive/tests/empty_bi_derives.rs new file mode 100644 index 0000000000..6af19ab329 --- /dev/null +++ b/src/vendor/custom_derive/tests/empty_bi_derives.rs @@ -0,0 +1,24 @@ +/* +Copyright ⓒ 2015 rust-custom-derive contributors. + +Licensed under the MIT license (see LICENSE or ) or the Apache License, Version 2.0 (see LICENSE of +), at your option. All +files in the project carrying such notice may not be copied, modified, +or distributed except according to those terms. +*/ +#[macro_use] extern crate custom_derive; + +macro_rules! Dummy { + ($($tts:tt)*) => {}; +} + +custom_derive! { + #[derive(Dummy)] + enum Foo { Bar } +} + +#[test] +fn test_empty_bi_derives() { + let _ = Foo::Bar; +} diff --git a/src/vendor/custom_derive/tests/enum_iterator.rs b/src/vendor/custom_derive/tests/enum_iterator.rs new file mode 100644 index 0000000000..65cef29c00 --- /dev/null +++ b/src/vendor/custom_derive/tests/enum_iterator.rs @@ -0,0 +1,73 @@ +/* +Copyright ⓒ 2015 rust-custom-derive contributors. + +Licensed under the MIT license (see LICENSE or ) or the Apache License, Version 2.0 (see LICENSE of +), at your option. All +files in the project carrying such notice may not be copied, modified, +or distributed except according to those terms. +*/ +#[macro_use] extern crate custom_derive; + +macro_rules! EnumIterator { + (() $(pub)* enum $name:ident { $($body:tt)* }) => { + EnumIterator! { + @collect_variants ($name), + ($($body)*,) -> () + } + }; + + ( + @collect_variants ($name:ident), + ($(,)*) -> ($($var_names:ident,)*) + ) => { + type NameIter = ::std::vec::IntoIter<&'static str>; + type VariantIter = ::std::vec::IntoIter<$name>; + + impl $name { + #[allow(dead_code)] + pub fn iter_variants() -> VariantIter { + vec![$($name::$var_names),*].into_iter() + } + + #[allow(dead_code)] + pub fn iter_variant_names() -> NameIter { + vec![$(stringify!($var_names)),*].into_iter() + } + } + }; + + ( + @collect_variants $fixed:tt, + ($var:ident $(= $_val:expr)*, $($tail:tt)*) -> ($($var_names:tt)*) + ) => { + EnumIterator! { + @collect_variants $fixed, + ($($tail)*) -> ($($var_names)* $var,) + } + }; + + ( + @collect_variants ($name:ident), + ($var:ident $_struct:tt, $($tail:tt)*) -> ($($var_names:tt)*) + ) => { + const _error: () = concat!( + "cannot derive EnumIterator for ", + stringify!($name), + ", due to non-unitary variant ", + stringify!($var), + "." + ); + }; +} + +custom_derive! { + #[derive(Debug, PartialEq, EnumIterator)] + enum Get { Up, Down, AllAround } +} + +#[test] +fn test_enum_iterator() { + let vs: Vec<_> = Get::iter_variant_names().zip(Get::iter_variants()).collect(); + assert_eq!(&*vs, &[("Up", Get::Up), ("Down", Get::Down), ("AllAround", Get::AllAround)]); +} diff --git a/src/vendor/custom_derive/tests/enum_try_from.rs b/src/vendor/custom_derive/tests/enum_try_from.rs new file mode 100644 index 0000000000..41ad004552 --- /dev/null +++ b/src/vendor/custom_derive/tests/enum_try_from.rs @@ -0,0 +1,77 @@ +/* +Copyright ⓒ 2015 rust-custom-derive contributors. + +Licensed under the MIT license (see LICENSE or ) or the Apache License, Version 2.0 (see LICENSE of +), at your option. All +files in the project carrying such notice may not be copied, modified, +or distributed except according to those terms. +*/ +#[macro_use] extern crate custom_derive; + +trait TryFrom: Sized { + type Err; + fn try_from(src: Src) -> Result; +} + +macro_rules! TryFrom { + (($prim:ty) $(pub)* enum $name:ident { $($body:tt)* }) => { + TryFrom! { + @collect_variants ($name, $prim), + ($($body)*,) -> () + } + }; + + ( + @collect_variants ($name:ident, $prim:ty), + ($(,)*) -> ($($var_names:ident,)*) + ) => { + impl TryFrom<$prim> for $name { + type Err = $prim; + fn try_from(src: $prim) -> Result<$name, $prim> { + $( + if src == $name::$var_names as $prim { + return Ok($name::$var_names); + } + )* + Err(src) + } + } + }; + + ( + @collect_variants $fixed:tt, + ($var:ident $(= $_val:expr)*, $($tail:tt)*) -> ($($var_names:tt)*) + ) => { + TryFrom! { + @collect_variants $fixed, + ($($tail)*) -> ($($var_names)* $var,) + } + }; + + ( + @collect_variants ($name:ident), + ($var:ident $_struct:tt, $($tail:tt)*) -> ($($var_names:tt)*) + ) => { + const _error: () = concat!( + "cannot derive TryFrom for ", + stringify!($name), + ", due to non-unitary variant ", + stringify!($var), + "." + ); + }; +} + +custom_derive! { + #[derive(Debug, PartialEq, TryFrom(u8))] + enum Get { Up, Down, AllAround } +} + +#[test] +fn test_try_from() { + assert_eq!(Get::try_from(0u8), Ok(Get::Up)); + assert_eq!(Get::try_from(1u8), Ok(Get::Down)); + assert_eq!(Get::try_from(2u8), Ok(Get::AllAround)); + assert_eq!(Get::try_from(3u8), Err(3u8)); +} diff --git a/src/vendor/custom_derive/tests/passthru_derive.rs b/src/vendor/custom_derive/tests/passthru_derive.rs new file mode 100644 index 0000000000..3ff6242c11 --- /dev/null +++ b/src/vendor/custom_derive/tests/passthru_derive.rs @@ -0,0 +1,19 @@ +/* +Copyright ⓒ 2015 rust-custom-derive contributors. + +Licensed under the MIT license (see LICENSE or ) or the Apache License, Version 2.0 (see LICENSE of +), at your option. All +files in the project carrying such notice may not be copied, modified, +or distributed except according to those terms. +*/ +#[macro_use] extern crate custom_derive; +extern crate rustc_serialize; + +custom_derive! { + #[derive(Clone, Hash, RustcEncodable, RustcDecodable, PartialEq, Eq, PartialOrd, Ord, Debug, Default, Copy)] + pub struct Dummy(u32); +} + +#[test] +fn test_passthru_derive() {} diff --git a/src/vendor/custom_derive/tests/stable_encodable.rs b/src/vendor/custom_derive/tests/stable_encodable.rs new file mode 100644 index 0000000000..fddefca0a5 --- /dev/null +++ b/src/vendor/custom_derive/tests/stable_encodable.rs @@ -0,0 +1,378 @@ +/* +Copyright ⓒ 2015 rust-custom-derive contributors. + +Licensed under the MIT license (see LICENSE or ) or the Apache License, Version 2.0 (see LICENSE of +), at your option. All +files in the project carrying such notice may not be copied, modified, +or distributed except according to those terms. +*/ +#[macro_use] extern crate custom_derive; +extern crate rustc_serialize; + +macro_rules! StableEncodable { + ( + () $(pub)* enum $name:ident < $($tail:tt)* + ) => { + StableEncodable! { + @extract_gen_args (enum $name), + ($($tail)*) + -> bounds(), ty_clss(where) + } + }; + + ( + () $(pub)* enum $name:ident { $($body:tt)* } + ) => { + StableEncodable! { + @impl enum $name, + bounds(), + ty_clss(), + { $($body)* } + } + }; + + ( + () $(pub)* struct $name:ident { $($body:tt)* } + ) => { + StableEncodable! { + @impl struct $name, + bounds(), + ty_clss(), + { $($body)* } + } + }; + + ( + () $(pub)* struct $name:ident < $($tail:tt)* + ) => { + StableEncodable! { + @extract_gen_args (struct $name), + ($($tail)*) + -> bounds(), ty_clss(where) + } + }; + + ( + @impl enum $name:ident, + bounds($($bounds:tt)*), + ty_clss($($ty_clss:tt)*), + { $($body:tt)* } + ) => { + StableEncodable! { + @parse_variants (enum $name, bounds($($bounds)*), ty_clss($($ty_clss)*)), + 0usize, ($($body)*,) -> () + } + }; + + ( + @impl struct $name:ident, + bounds($($bounds:tt)*), + ty_clss($($ty_clss:tt)*), + { $($fnames:ident: $_ftys:ty),* $(,)* } + ) => { + StableEncodable! { + @as_item + impl<$($bounds)*> rustc_serialize::Encodable for $name<$($bounds)*> + $($ty_clss)* { + fn encode( + &self, + s: &mut StableEncodableEncoder + ) -> Result<(), StableEncodableEncoder::Error> + where StableEncodableEncoder: rustc_serialize::Encoder { + const NUM_FIELDS: usize = StableEncodable!(@count_tts $($fnames)*); + try!(s.emit_struct(stringify!($name), NUM_FIELDS, |s| { + // Poor man's enumerate!($($fnames)): + let mut idx = 0; + $( + try!(s.emit_struct_field(stringify!($fnames), idx, |s| { + self.$fnames.encode(s) + })); + idx += 1; + )* + let _ = idx; + Ok(()) + })); + Ok(()) + } + } + } + }; + + (@as_item $i:item) => {$i}; + + ( + @extract_gen_args ($kind:ident $name:ident), + (> { $($tail:tt)* }) + -> bounds($($bounds:tt)*), ty_clss($($ty_clss:tt)*) + ) => { + StableEncodable! { + @impl $kind $name, + bounds($($bounds)*), + ty_clss($($ty_clss)*), + { $($tail)* } + } + }; + + ( + @extract_gen_args $fixed:tt, + ($ty_name:ident: $($tail)*) + -> bounds($($bounds:tt)*), ty_clss($($ty_clss:tt)*) + ) => { + StableEncodable! { + @skip_inline_bound $fixed, + ($($tail)*) + -> bounds($($bounds)* $ty_name:), + ty_clss($($ty_clss)* $ty_name: ::rustc_serialize::Encodable,) + } + }; + + ( + @extract_gen_args $fixed:tt, + ($ty_name:ident $($tail:tt)*) + -> bounds($($bounds:tt)*), ty_clss($($ty_clss:tt)*) + ) => { + StableEncodable! { + @extract_gen_args $fixed, + ($($tail)*) + -> bounds($($bounds)* $ty_name), + ty_clss($($ty_clss)* $ty_name: ::rustc_serialize::Encodable,) + } + }; + + ( + @extract_gen_args $fixed:tt, + (, $($tail:tt)*) + -> bounds($($bounds:tt)*), ty_clss($($ty_clss:tt)*) + ) => { + StableEncodable! { + @extract_gen_args $fixed, + ($($tail)*) + -> bounds($($bounds)* ,), ty_clss($($ty_clss)*) + } + }; + + ( + @extract_gen_args $fixed:tt, + ($lt:tt $($tail:tt)*) + -> bounds($($bounds:tt)*), ty_clss($($ty_clss:tt)*) + ) => { + StableEncodable! { + @extract_gen_args $fixed, + ($($tail)*) + -> bounds($($bounds)* $lt), ty_clss($($ty_clss)*) + } + }; + + ( + @skip_inline_bound $fixed:tt, + (, $($tail:tt)*) + -> bounds($($bounds:tt)*), ty_clss($($ty_clss:tt)*) + ) => { + StableEncodable! { + @extract_gen_args $fixed, + ($($tail)*) + -> bounds($($bounds)* ,), ty_clss($($ty_clss)*) + } + }; + + ( + @skip_inline_bound $fixed:tt, + (> { $($tail:tt)* }) + -> bounds($($bounds:tt)*), ty_clss($($ty_clss:tt)*) + ) => { + StableEncodable! { + @impl $fixed, + bounds($($bounds)*), + ty_clss($($ty_clss)*), + { $($tail)* } + } + }; + + ( + @parse_variants (enum $name:ident, bounds($($bounds:tt)*), ty_clss($($ty_clss:tt)*)), + $_id:expr, ($(,)*) -> ($($variants:tt)*) + ) => { + StableEncodable! { + @as_item + impl<$($bounds)*> rustc_serialize::Encodable for $name<$($bounds)*> + $($ty_clss)* { + fn encode( + &self, + s: &mut StableEncodableEncoder) + -> Result<(), StableEncodableEncoder::Error> + where StableEncodableEncoder: rustc_serialize::Encoder { + s.emit_enum(stringify!($name), |s| { + $( + StableEncodable!(@encode_variant $name, $variants, self, s); + )* + unreachable!(); + }) + } + } + } + }; + + ( + @parse_variants $fixed:tt, + $id:expr, ($var_name:ident, $($tail:tt)*) -> ($($variants:tt)*) + ) => { + StableEncodable! { + @parse_variants $fixed, + ($id + 1usize), ($($tail)*) -> ($($variants)* ($var_name, $id)) + } + }; + + ( + @parse_variants $fixed:tt, + $id:expr, ($var_name:ident($(,)*), $($tail:tt)*) -> ($($variants:tt)*) + ) => { + StableEncodable! { + @parse_variants $fixed, + ($id + 1usize), ($($tail)*) -> ($($variants)* + ($var_name, $id)) + } + }; + + ( + @parse_variants $fixed:tt, + $id:expr, ($var_name:ident($_vta:ty), $($tail:tt)*) -> ($($variants:tt)*) + ) => { + StableEncodable! { + @parse_variants $fixed, + ($id + 1usize), ($($tail)*) -> ($($variants)* + ($var_name, $id, (a))) + } + }; + + ( + @parse_variants $fixed:tt, + $id:expr, ($var_name:ident($_vta:ty, $_vtb:ty), $($tail:tt)*) -> ($($variants:tt)*) + ) => { + StableEncodable! { + @parse_variants $fixed, + ($id + 1usize), ($($tail)*) -> ($($variants)* + ($var_name, $id, (a, b))) + } + }; + + ( + @parse_variants $fixed:tt, + $id:expr, ($var_name:ident($_vta:ty, $_vtb:ty, $_vtc:ty), $($tail:tt)*) -> ($($variants:tt)*) + ) => { + StableEncodable! { + @parse_variants $fixed, + ($id + 1usize), ($($tail)*) -> ($($variants)* + ($var_name, $id, (a, b, c))) + } + }; + + ( + @parse_variants $fixed:tt, + $id:expr, ($var_name:ident { $($vfn:ident: $_vft:ty),* $(,)* }, $($tail:tt)*) -> ($($variants:tt)*) + ) => { + StableEncodable! { + @parse_variants $fixed, + ($id + 1usize), ($($tail)*) -> ($($variants)* + ($var_name, $id, {$($vfn),*})) + } + }; + + ( + @encode_variant $name:ident, + ($var_name:ident, $var_id:expr), + $self_:expr, $s:ident + ) => { + { + if let $name::$var_name = *$self_ { + return $s.emit_enum_variant(stringify!($var_name), $var_id, 0, |_| Ok(())); + } + } + }; + + ( + @encode_variant $name:ident, + ($var_name:ident, $var_id:expr, ($($tup_elems:ident),*)), + $self_:expr, $s:ident + ) => { + { + if let $name::$var_name($(ref $tup_elems),*) = *$self_ { + return $s.emit_enum_variant( + stringify!($var_name), + $var_id, + StableEncodable!(@count_tts $($tup_elems)*), + |s| { + let mut idx = 0; + $( + try!(s.emit_enum_variant_arg(idx, |s| $tup_elems.encode(s))); + idx += 1; + )* + let _ = idx; + Ok(()) + } + ); + } + } + }; + + ( + @encode_variant $name:ident, + ($var_name:ident, $var_id:expr, {$($str_fields:ident),*}), + $self_:expr, $s:ident + ) => { + { + if let $name::$var_name { $(ref $str_fields),* } = *$self_ { + return $s.emit_enum_struct_variant( + stringify!($var_name), + $var_id, + StableEncodable!(@count_tts $($str_fields)*), + |s| { + let mut idx = 0; + $( + try!(s.emit_enum_struct_variant_field( + stringify!($str_fields), + idx, + |s| $str_fields.encode(s) + )); + idx += 1; + )* + let _ = idx; + Ok(()) + } + ); + } + } + }; + + (@count_tts) => {0usize}; + (@count_tts $_tt:tt $($tail:tt)*) => {1usize + StableEncodable!(@count_tts $($tail)*)}; +} + +custom_derive! { + #[derive(Debug, StableEncodable)] + struct LazyEg { a: A, b: i32, c: (u8, u8, u8) } +} + +custom_derive! { + #[derive(Clone, StableEncodable)] + enum Wonky { Flim, Flam, Flom(i32), Bees { say: S } } +} + +#[test] +fn test_stable_encodable() { + macro_rules! json { + ($e:expr) => (rustc_serialize::json::encode(&$e).unwrap()); + } + + let lazy_eg = LazyEg { + a: String::from("Oh hai!"), + b: 42, + c: (1, 3, 0), + }; + assert_eq!(&*json!(lazy_eg), r#"{"a":"Oh hai!","b":42,"c":[1,3,0]}"#); + + assert_eq!(&*json!(Wonky::Flim::<()>), r#""Flim""#); + assert_eq!(&*json!(Wonky::Flam::<()>), r#""Flam""#); + assert_eq!(&*json!(Wonky::Flom::<()>(42)), r#"{"variant":"Flom","fields":[42]}"#); + assert_eq!(&*json!(Wonky::Bees{say:"aaaaah!"}), r#"{"variant":"Bees","fields":["aaaaah!"]}"#); +} diff --git a/src/vendor/custom_derive/tests/trailing_comma.rs b/src/vendor/custom_derive/tests/trailing_comma.rs new file mode 100644 index 0000000000..ba5d09727d --- /dev/null +++ b/src/vendor/custom_derive/tests/trailing_comma.rs @@ -0,0 +1,24 @@ +/* +Copyright ⓒ 2015 rust-custom-derive contributors. + +Licensed under the MIT license (see LICENSE or ) or the Apache License, Version 2.0 (see LICENSE of +), at your option. All +files in the project carrying such notice may not be copied, modified, +or distributed except according to those terms. +*/ +#[macro_use] extern crate custom_derive; + +macro_rules! Dummy { + ($($tts:tt)*) => {}; +} + +custom_derive! { + #[derive(Dummy,)] + enum Foo { Bar } +} + +#[test] +fn test_trailing_comma() { + let _ = Foo::Bar; +} diff --git a/src/vendor/debug_unreachable/.cargo-checksum.json b/src/vendor/debug_unreachable/.cargo-checksum.json new file mode 100644 index 0000000000..f87ee75109 --- /dev/null +++ b/src/vendor/debug_unreachable/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f58cbb29ee4ff8a030c1e32d3f4ac2b19753d7fdf8f72d050d4bda1353364fda",".travis.yml":"d91d0d75087934c2d0503a8c04439fea459a19182021bb0b699644d442f5b6fc","Cargo.toml":"f6be96c7687e8e39cffc3f22aff16d12d669a31e932c1b98ba590dbaa181ab22","README.md":"0311a4ecf6dd300c555ef0f2316e4ba919476782b6aed1c35b01d8cc2f958c72","examples/simple.rs":"c05b124bdad67bfe9e48998bff6a7c6a8789e7f7c9fb3f318f8028a68ef944ed","src/lib.rs":"ef4e6d5732fcad9da5a500f401ccd0b538b489ae50771d15d5a2ec0f501cd0f9","tests/check.rs":"ac8691f78269e1cb0cd010150e707f5ea5df14055883f0ee5a5b55a686c5b8de"},"package":"9a032eac705ca39214d169f83e3d3da290af06d8d1d344d1baad2fd002dca4b3"} \ No newline at end of file diff --git a/src/vendor/debug_unreachable/.cargo-ok b/src/vendor/debug_unreachable/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/debug_unreachable/.gitignore b/src/vendor/debug_unreachable/.gitignore new file mode 100644 index 0000000000..8ac5ebd272 --- /dev/null +++ b/src/vendor/debug_unreachable/.gitignore @@ -0,0 +1,19 @@ +.DS_Store +*~ +*# +*.o +*.so +*.swp +*.dylib +*.dSYM +*.dll +*.rlib +*.dummy +*.exe +*-test +/doc/ +/target/ +/examples/* +!/examples/*.rs +Cargo.lock + diff --git a/src/vendor/debug_unreachable/.travis.yml b/src/vendor/debug_unreachable/.travis.yml new file mode 100644 index 0000000000..22761ba7ee --- /dev/null +++ b/src/vendor/debug_unreachable/.travis.yml @@ -0,0 +1 @@ +language: rust diff --git a/src/vendor/debug_unreachable/Cargo.toml b/src/vendor/debug_unreachable/Cargo.toml new file mode 100644 index 0000000000..758118ef12 --- /dev/null +++ b/src/vendor/debug_unreachable/Cargo.toml @@ -0,0 +1,18 @@ +[package] + +name = "debug_unreachable" +version = "0.1.1" +authors = ["Jonathan Reem "] +repository = "https://github.com/reem/rust-debug-unreachable.git" +description = "unreachable!() in debug, std::intrinsics::unreachable() in release." +documentation = "https://crates.fyi/crates/debug_unreachable/0.1.1" +readme = "README.md" +license = "MIT" + +[lib] +name = "debug_unreachable" +path = "src/lib.rs" + +[dependencies] +unreachable = "0.1" + diff --git a/src/vendor/debug_unreachable/README.md b/src/vendor/debug_unreachable/README.md new file mode 100644 index 0000000000..41fc29c97e --- /dev/null +++ b/src/vendor/debug_unreachable/README.md @@ -0,0 +1,24 @@ +# debug_unreachable + +> unreachable!() in debug, std::intrinsics::unreachable() in release. + +## [Documentation](https://crates.fyi/crates/debug_unreachable/0.1.1) + +## Usage + +Use the crates.io repository; add this to your `Cargo.toml` along +with the rest of your dependencies: + +```toml +[dependencies] +debug_unreachable = "0.1" +``` + +## Author + +[Jonathan Reem](https://medium.com/@jreem) is the primary author and maintainer of debug-unreachable. + +## License + +MIT + diff --git a/src/vendor/debug_unreachable/examples/simple.rs b/src/vendor/debug_unreachable/examples/simple.rs new file mode 100644 index 0000000000..3da34c149e --- /dev/null +++ b/src/vendor/debug_unreachable/examples/simple.rs @@ -0,0 +1,11 @@ +#[macro_use] +extern crate debug_unreachable; + +fn main() { + if 0 > 100 { + // Can't happen! + unsafe { debug_unreachable!() } + } else { + println!("Good, 0 <= 100."); + } +} diff --git a/src/vendor/debug_unreachable/src/lib.rs b/src/vendor/debug_unreachable/src/lib.rs new file mode 100644 index 0000000000..d5738d6996 --- /dev/null +++ b/src/vendor/debug_unreachable/src/lib.rs @@ -0,0 +1,22 @@ +#![deny(missing_docs, warnings)] + +//! `panic!()` in debug builds, optimization hint in release. + +extern crate unreachable; + +#[doc(hidden)] +pub use unreachable::unreachable as __unreachable; + +#[macro_export] +/// `panic!()` in debug builds, optimization hint in release. +macro_rules! debug_unreachable { + () => { debug_unreachable!("entered unreachable code") }; + ($e:expr) => { + if cfg!(ndebug) { + $crate::__unreachable() + } else { + panic!($e); + } + } +} + diff --git a/src/vendor/debug_unreachable/tests/check.rs b/src/vendor/debug_unreachable/tests/check.rs new file mode 100644 index 0000000000..f47ee2da50 --- /dev/null +++ b/src/vendor/debug_unreachable/tests/check.rs @@ -0,0 +1,9 @@ +#[macro_use] +extern crate debug_unreachable; + +#[test] +#[should_panic] +fn explodes_in_debug() { + unsafe { debug_unreachable!() } +} + diff --git a/src/vendor/dtoa/.cargo-checksum.json b/src/vendor/dtoa/.cargo-checksum.json index 8268bdd0bb..315dbfa1b0 100644 --- a/src/vendor/dtoa/.cargo-checksum.json +++ b/src/vendor/dtoa/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"a2b867b2e28af9bde20a669a6ff0f366ecc5150b89314cd7ec97ed95bb427547","Cargo.toml":"5755612ec9d7adc4ec1a68e3b096bfa45af8ae7dfd8237515c9f85786c9a9356","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e18259ab3aa7f39a194795bdad8039b3c5fd544f6dd922526c9326c44842b76d","README.md":"8de1d7a3224bfae275197dc75d128f73e2970c26f6790b575a8346074f7783c6","benches/bench.rs":"ac713ab4e1c668dea70416504955563fcd6bd2982ae1cfa3a1c0043e09dd893f","performance.png":"5909ebd7b98691502c6f019c126758da40edc7031b9da32bce45df34273b1b87","src/diyfp.rs":"81754c3d1b8ff2347a506187ef43a666f09e20ae0e53436226c969d7e3f737dc","src/dtoa.rs":"f5cdd96d6ac9d3c50289a090a6d6801d36cb121c2a5e6d8acd1aa41013fded76","src/lib.rs":"037eaaf26de236c916332fb76bc72b7a8d588df8c90a8dab5636140976559adb","tests/test.rs":"296f3c322e08508fd372e436434fdd209bb911cab2124ea654d5f78d90f3eeea"},"package":"80c8b71fd71146990a9742fc06dcbbde19161a267e0ad4e572c35162f4578c90"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"a2b867b2e28af9bde20a669a6ff0f366ecc5150b89314cd7ec97ed95bb427547","Cargo.toml":"f7341a8bad6e4c144f5d39e3a2254443738555214c26e1e92de45bbab02bb424","Cargo.toml.orig":"244efa56d5d6250b1c17f5142c29394ed0aba91008fdfdbffb50e6e6c712e8f0","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e18259ab3aa7f39a194795bdad8039b3c5fd544f6dd922526c9326c44842b76d","README.md":"2a26b3c04192f933e2a28f1f7069e0df4b0b24fbd3cef53ab9a2318a53a65553","benches/bench.rs":"ac713ab4e1c668dea70416504955563fcd6bd2982ae1cfa3a1c0043e09dd893f","src/diyfp.rs":"81754c3d1b8ff2347a506187ef43a666f09e20ae0e53436226c969d7e3f737dc","src/dtoa.rs":"f5cdd96d6ac9d3c50289a090a6d6801d36cb121c2a5e6d8acd1aa41013fded76","src/lib.rs":"141f175d15e6c5d0f0833f6c758305aa2b101bacfa9100f5d4cd98601df00e11","tests/test.rs":"296f3c322e08508fd372e436434fdd209bb911cab2124ea654d5f78d90f3eeea"},"package":"09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab"} \ No newline at end of file diff --git a/src/vendor/dtoa/Cargo.toml b/src/vendor/dtoa/Cargo.toml index edf6630f1e..d61e1defd9 100644 --- a/src/vendor/dtoa/Cargo.toml +++ b/src/vendor/dtoa/Cargo.toml @@ -1,9 +1,23 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] name = "dtoa" -version = "0.4.1" +version = "0.4.2" authors = ["David Tolnay "] -license = "MIT/Apache-2.0" +exclude = ["performance.png"] description = "Fast functions for printing floating-point primitives to an io::Write" -repository = "https://github.com/dtolnay/dtoa" documentation = "https://github.com/dtolnay/dtoa" +readme = "README.md" categories = ["value-formatting"] +license = "MIT/Apache-2.0" +repository = "https://github.com/dtolnay/dtoa" diff --git a/src/vendor/dtoa/Cargo.toml.orig b/src/vendor/dtoa/Cargo.toml.orig new file mode 100644 index 0000000000..b3d892ee2e --- /dev/null +++ b/src/vendor/dtoa/Cargo.toml.orig @@ -0,0 +1,11 @@ +[package] +name = "dtoa" +version = "0.4.2" # remember to update html_root_url +authors = ["David Tolnay "] +license = "MIT/Apache-2.0" +description = "Fast functions for printing floating-point primitives to an io::Write" +repository = "https://github.com/dtolnay/dtoa" +documentation = "https://github.com/dtolnay/dtoa" +categories = ["value-formatting"] +readme = "README.md" +exclude = ["performance.png"] diff --git a/src/vendor/dtoa/README.md b/src/vendor/dtoa/README.md index 43129d4011..2f26b68a59 100644 --- a/src/vendor/dtoa/README.md +++ b/src/vendor/dtoa/README.md @@ -14,7 +14,7 @@ The original C++ code of each function is included in comments. See also [`itoa`](https://github.com/dtolnay/itoa) for printing integer primitives. -## Performance +## Performance (lower is better) ![performance](https://raw.githubusercontent.com/dtolnay/dtoa/master/performance.png) diff --git a/src/vendor/dtoa/performance.png b/src/vendor/dtoa/performance.png deleted file mode 100644 index 69d04bdb7d720d8720e5ff5060d38915c835a56f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 70571 zcmeFZX*gGH`#-uw5oL@-ku;J*q0C80DP&3_N#>z4W~``Gk|`A`LguLqnJH8%naeC> zp6B`hS@qoa^V|FF-pAf=evW6j@8WB%Yn|75ex_^rsGd?>Ps>3|5XAc9O7iLivAUQb zD1&L%;7=lc?T*C%QC*Weu0ew@SDH(A@$YMGmCjxx2(A+HzZBNr?WOUDo9vG1*qyPt zZ0Bfr)tGQ}blhiVZE?-W(AIdL%~g}oVMz{x*h(CiKce9jHrV2O7EtLIQ894&dXS%AL`;9mUZtj4{o09xr8}ycWkrt|vrw%z zR(xdd`LvCD@%O>b#LqGBm)<2OTO8^++#_l7_)JUB;kI&FzF=j1=s#akcjPl#D7O9k z71>|H7}^{LY7p(aC9)7Y!IZSD)}&%~TOVrCXsGG2U(x)UE8YfxD5&_wp1MVFV~A9>2F zUAVr!-u#yJ{{8!dD?Ap*6!+@qu|IzNSSRh`>iOxR``^E-l7EuxFeGPYwr6^{CE;yL z#L+u~&f}NZ+1cCgaJv_ccD!fWxbe7-`}NieqEG$ z+}lhEwU$+@R(&t@@W{^1<&~7w>lvb@rM+NcawjiOa&~Uc@JFg)Nlt32;N;*>mK{5G z+`V^CZee9_9R&tPqBt!$Wa=MHsVem;dpkh;I)RKI5PyKB^1T3WM{HK$qYGfb;i)x@Y!5|%&H zqd8no|M!E|6^k5l9^Xz}m>uolYw+U`p(Sj8eOT+=?e6YQcpka6CN7LPliBk0fQhQU zzP|C&j{nx+G5h$Pvz_IBLKi>KY~Q)FwI&(r)vyRt&@zaXZy_1xDo$ zX@@SUt7BL1+qaJ+i+}%qMFoX5mX?;!S1$cq;CGc0#6_4TrKGmtH{X5mz^kxOratN1 z9kEs?O7aOGKklQxP+O}($Oi6I(HK6RnHLpBb^5s^)8h&`Tl=Q7Iyy!8+Ya{s-kQc# z{|BW}1z!%s<;6L6QA_s53=3+0e*O>Td3xx|Huw3yUAXnFyLT(`P-Ug19XWN1UiiQPO>6!_y@KSd@bE2ia&ooS zyZn_N1qQx)8!l;KVKLcPqxjnAKxb#?rknrnK(%oH;9&1N4BU}!3)8wy^>d7yW1qwd zZ`-y?O-+rq!phQ;k(JeZa?&oi>CSc54rqeA8$gPAOboMZC3WIA!?m{(U;49c_otZ&%Z|va&VU#=!%N2L^|R zI=Z_xqZhXS`{CC&g&n%`>s=O;=202_J!xraH0#z;sS0h=Ykg~0Ll7q4!qnnN1cH^X zO!le~MOY8kdX=z4Ot>W)dV02D^bteqnvaDOazq@AA)z!6ob<2s6eWlnGRk3QkC&Lb@8J>T0`K#dd zNmHe0^#|NC`aH62Z%h~-Za%!qU&(9Jfy){lXPN)$giKX$`sEJ?8Mx1_ZgZVjubXA< z`&i&yNp&^z*w`5Q#Z3*qV-vr>rrESfvv1l{xFy$dr0fyT(NxC~eZ4~WE&11{x zTXI&T*aq7Qxi4S7tlX4z_RSi9fB$UPIpMIduMn*>d*s%!XipR742`P-|iP)$K&GlcuuGdb{ z_s+@^L*=EJRC0a5FqmpEDF6A!WUT(2O2dbdV1a$7gJ32*VULUXeH@(RLZP)VHqx7 z+=hp5Xla>fpIhe3+1Xxvi|~w!+J4ZcRj)ht%D?5XEuPDM8&!yqsHnnyHUWJn7fQ-o z8}}LhYHuGcDgSS0i`;l-*XIbve;JBrPo+W{`SM@PABf5?VM6{%vhk8*LuG7wiS9|JN#~A~sV(b~61(y1fKFm!wW-dHBMd3A@lmd0+03e@0g9 zz6&&`CRg^YxUKJ=(o<_oHt^kYV3+m&efoJH|GTqdpP~K(&a@?S4@wp$rFF7Kw-Ku9 zM&*IWm6c1-F$e-+qa3)#B9B4C-ny;8*H3z$h7dIVyuo$8?{!8q@6`Cjgb)xL`h-z^ z;#sujpi@oJF)`bOgqZg1*<<=6m1l5p@OzHE#givbG_Ls#JjDDr>MZjmJcIeQBW>CW z@FB+dI^;CEHTit*ZE6Ott-QRH>Tw(B>FE#I^{QGqrYP(^pR?ihsqj_AW^p@=33~iY zOLL(I_kqh_2*UD5Dg{su{_IcJi9dl|S9b&-a50Tt_E>C`GJ2J=vhr4DpYNGgP0t3o zq;{Q`uEg(o85>JOv=0vI8M(RCr>V{P^)U9v%wf`^zJgn7Ue@+$$<7yhr@yx5WjxX+__>x$5L|$xS{! zKFR~iCr(%t9L)z7@_;T8!mqPePy zDc@yA;ST*4EYk%;L$%y+-td!81SvW?I{B$n?916!iTUt7wK{dKXXoHBm%1}JXsMTfoi)p*O~Krpuezqj(Ae0!LEmLPwvp37Qr7pV<1AZR zedEV^t9bSDoS0;fePXf?Jn>xGjRWJA;8RGR7`i9NDJ3M@IaeHI4USqR|t}e&77cva86C<(;4)iEx_JgL6QuU^nf@ONLvkygI;}S8~2(DipvXY-= zRlV*w&aN1rr#1j z+Ty{4s>ya=5+`2bqB(4sKNXzu{B4orx-ig?Lf3FJF)^_xhYbxkm=!;84~5%V8+Yc( z;cJ(JRfd)S;FTzP=pa%on>P74PyF8Fg-+pr=xP_uNfnijfq?`e+gV^rR2l!G0Neev z>r>{@4n}1U)M!tB{`@)BtY+up{6I=ctjbehZwi7WVU|CIeJL8txji1Vw_iXbinhOT z;|Aer)s#t6Y<8*Z2iL7zS35lL<>`TvxxD51fzioHji2b^^si69Sfi?@f2le93hjWe zudnxv;fK3SBq9x(=3%=(Hk8#Sx0j7Fmc2O7VMsUHf!T+u&0|IC>a}Yf{r&M9ZrT7B z`f3#XX1hAA)iI+^Q}O1(|!R~zI;!L-)0T>Kn6FA3h2EKJ2fOIs4K zW*tKP9VV6bxY z>C>n6LvP=CtQ6QTqmzBGNXfqqWK^h3b`SCi9bhyLaza zHFd#`+AMW_LqS1-X-$mp=T0se_stX(6ohA*QF+OS5As&rl$4Zd?n~DRSx^M8HVaf= zwaLaUTYP(aO{3j@{QRjYPE~lVhu+oI_0fwL`}f{|@ZbYfgH-&;UW0Uu(X^LvERe>FL5@hp#C-sqy@SQetxeKB{?1 zaAWMpX%5)%ml+r37P&OBuNBA z0##bKYuB!~)-4>+7nF`4ugVU8@cQ*G@Qs`3ScY~Uh3-p1ji2s(6JA+pTN$m`UkE8v zZaXtppM^vVOY>7#Mp~ts^PE|Z9XqCzX}KvfGBWkjC+1)`n)T~h^#^oU8d_VqZc#Io zy}Q4+9Ov>V-tLyluU3c%_zjf;tWn-2N1L*I^k4gX%N4NQ3sERyh zd-dwgckjsM_Ku9)c1S{kluZC96r7w6VL*`|SSh`*4pNc%i7X8*kX@3>ZrHdH;DVFz z{QWib)zLfj%2uGXx^YIh#Ewv#f+I(cP>9?0dY%dwkI2j03^ai&IEp@uZ@)uc-|nzd`I#QO$)Sax|e@G3k| z)zQ|`*-h1+bS~>Q?#R;0%Fxj82AVWb;41V?rIRN=V9ck26KCh-X!RY|B%cDd)$b^k@u$H1B~w^FPo>i{V(Fc(D?@IehN{k7ZY)NMa=K z7M}T&r%#I;8ng$-W(I%qQnf=|g}Qi4Zm1O>;<_}|fU;4SZM$sV z-0x6Kn>(MsAAle`(Nl?evMLpuc2c6&DkF-D&Av5GI4O+!_0ypBxh)#8F!_#Y;6o|n~t&EF{OYUq~{Pxhh zlcdrMn8sQ^1j!^pJLR_gwi_ua``4`J@WPd8Idgk@6cmp4G$&~9->dsJR`=m98;k_G zr#w(ae0iL7b)SvqI$XJ;c70pXhYwUJ$M+CK6E?I{vYx)PS=yDIUn|j5gRc}-W-`|>@Ssbs}PfKx# z2=9+Cj2$1pnyB}VS+3F>s*UX!YH;}ePJbJ>3X{!MtkaGyUOw~n6Z_)YV2L1`4b z)^%lRhPDEvaCgw!I9iNG6qz<{()#u5$Dn1<^w>i^C%bre4c&TDMr%CpF@2jCe`h!|+)|b2e2plYo9I!~(1^+$iaYb#Yh?MOKW}h7 zHmqSsi8mjmEy3s`&n{-YIp*ZkRWEADm20_} zF-+<7$DQ|BeB?Q}1x8wOvl_SlKDI^bx<+iEB(!awL)RbQxIuxz=2eItHc?VO*pTvq zFq9>L42ayzRK2C{h`>Q`KbZ^=E>fs zto`El{ZZ*jK;qEc35}C^aSW(2LG#+(83Vgio^7vfm$vI=B#B|Xg@^)J!K+F-I=U-> z1Q}LKi;ILtGnFgAx&G{vaj`8JJo-*Dn$dhIt1AOM0(SEAC(vFeN2I)b6c>rxwzA## z=BNU<(cSfdQ`Ax?m`g@TNXXj+ls=}L-(hJ41qYk8tkk?dO*$%oMD5V#v(ev@ z4GJHkGg5k%rIvpBv=&TmbbOrorm`l7nDySbXEeDvW7((p6TIC4o4x%Xrq8#*TI#J& z;v}sk*>|U$AhUs96Q(KD^n^{`*zI~zO%?aZV?Uiuw;6EjFX!U++xNJXr0sxHw0-N8sD1DR(dSo(oE1=+s&M5-4dUvA&<66s1ppK=PLs$_s=*C`s7 z`|*MP{yce)RhD=0h7udT59MCz{>(~6MMV(Trw8|({DrA)Kq5)#(>qWyAlnQqEG#FV z9XxKh0Q%RF_0pPI!;rN zQ+4t=hBT+5sJq@XtGo(-@%rn;Bk%7D3JT)h9^>WOtj{r$>EY_B>s$ur0hp^zF)(1y zUjV_pTY3?IL3@0iT|?42v4~~o3m0Ch)9mAHv9hqBb8&G28sQZbRNX_BqMh>mb7Rgp zHjTM)`k^_duzrQY_`v7SlSi`vw?}nDUJjrIl?*txIx8ARO+$Y6UaPf(d97B8{(4kd z1QjLGT;O)jDamZxQ9`%k%g2V=kl@y;=p<>U)`5=|j6bwOn0OUPj0vF#Jn{qZq_;i% zADY{tA{MuwOS!~1D>^$LmR_1X6D{5r6BCn~o~}OY{>j}VeV2my8BKk<=Q}*49u=PY zD_Uz(158|(bq|nN3G|mb9lG&wf&4Zmg${Fdef{LONe>DO3mHAnojYfQnr9chK){-c zdHVvha~mIDJhPOCJ7$2OurR4ctfHh8vT8gS_4+lPp}*@V&*7F_I-WxW6gIj*`Rh(j zH1^4Lao;}e7x8zK-sZ1l_0`h;c~WQ#{EiO$t5<~qKg*c6!+0V1!)Z(09r zDmDARwZhGR`prd~hJUgEDk^o2`XQo~E{l+EAj@C6bjftfF+Pt)lYx+t)_hLcQmSh} zk~aeb0~?nqQ$5lwJhI#3OFA3jt7hKC^H-L86W zPf#Ps@^-3vZnsIb=;xaSMMS)TZ)xf1upIuVq$?ij<9o0e<`(><2^$?9O@itu^*xXG zou@?&!7ZfW4d|(Su~OoeOh4Wcn;EK_u9_i=Z#1)d)#tbvAD*%kMi{IFnZ>20vG!t0 zl2yer?Khx3R^Xm3iR})%n26n-@W!};jobysP4U?=Qk5?I4U(0D|EyGj-o3@Iu=?;7 z*<+l|Bma+EV7!clb*}iJ}DLSpL zlN$!0GWThB`OM4_MJzP|JU27arrl&c_#ze*J9vj83*-uR1iHXg|%S+YH?M@*W4T{`B!KYwnQnLhsWgrGqJ%jKr%E6){*@!R2Z;BmWDfmJSJ{J+bmnsE3Lr+i7JT{B9 z>!{A0FjYLr4_%7i7749Bql(k2aROa|bvo3Zz#C_bnmz^YGJ=GG&6NRZjUDI@*@56* ztEul9?f{`esKw|j2IDWHGZ`n2fe9FpWItD0LgL($qFS&6Gq2R7q$I|Ai-e8)=mHEb zOSw$TBPW0mfhF_(QdqW8g4Mb$yKQC17L6BFU-P-!jI%s8&n+hPQs04udEKoOXrvhC z6tUT3(Zg{Je`_Zmu@=m z6$5lzQvdS2H)3Ufa@{#RWk@{dhvq>J*RqHetSm1mixjJzJbA&&YCS;%B*2T`Jg?dhz0X7`v_Y85F}VLmiFuC?S4QbCL&UL&L%(F;oO{$2Z&;ACIr`F zFd@DcZ8T@Yf*X+C?VfS)v%;rPtX~+-DYgd{elaP&>bu8Z^dM4W+h4l}W1jZK zYfFuts$+){yV0TPhu(P)F2Gh983VIIOhNGH+36-5q03*`gUg4jqv@djQ4m+IT;W*2 zefaD-6GOz#?wgyPy#P@`Ioby6d=LHYLT#)Xt?^-CSs^fFy<7*C&MW4+5Hq}9bAAgu z%v*c+F^r&MGK(=$DMKfwbI`1n8Yq?e<;9?7Z3DMxjc1+UaZiMem4i>$E?_iuWaZ%}_?l8&j|Ty5 z(THWNq2C;PusB*$QBl!w$-raD8yCAfIBjfV;=RcWx0o6c6;8mXh!kW&bn=|is$<=O zIZ2U3*H0S_Y12=tY~uUt)7;_T?*G-RgbxYr^nVVfGYh$M461x*U(M@Ld>pt@$M4^3 z{-ayLy&_XU@aa=86tD7K;FNIK>oC&l3y}#@F~4^sq|@TaWA`^K)300i0h84~H1t07 zLQ>a-0t&<9C~S4cFcf6xKuV!jtkvE(_#o0w!st0<-@gHnonA~#3>xJ&AW#NwX*lww zi*pkl(AYnI{K)4t^W2~z`8?}QEd~Y#Boz$Qb0Mqo^748=dMDbi$-pVP4tjtrzydH} zaBCKl07ei7O4^VfDuNbAVPIeYsJ25fv3>DqvUx*DG&f zIQK`PhYawFe?UN_#!=l?IA)A2EVm##ze-G8=Q2H53?OyE$cPeQ6%yM?Nu4*fflPB+ zdX5US3g34Jtq3Ka26IR^DSuyOZS4kb_gNk!4ZJZB77?W)xjLD|2n*Yc$W!Fow;P~@ zksgPrO>5#64;cS)oBYZCU{*T~N>f$nv9fV^B+q#w)n&$dc7FaMR-(5yjs;RRBa}cO zbAr`y0PA17rie83$VUSJNcsgRzFUETEb!d?yu7F`nV7sUDOp9T$JhC~i~%O7w6(Qy zv5FBNd}7ph%a$%cMwBEu_a834mYROE=bJYN?%lsnMpfW+l)xVWA7ixMtyKouc@%T# zB~&m#igGAiRWT}@@MwO)E%;bjsfZyk*6Am65iTmnjAn2Hc(lU3L4{ZN#Onx`^-cV> z2di)M7W+&pVe^vrcj(X|fQ{7(E%_v%&XMXMegk1q~MIJa1JAG3Xrl6(x|tysuZo z)+mWO9{9G9r%p-hw|q;RF!YDsA8D^f9Wh6}K5<(F`1>>Ad8|WU3_p>EoDCObb+&Iw zcv`>|Iro5~Og; z!4c2s30lyJFw>KRX3Q}vp=-Fgxe3;8c68EPkWItCW$YwvN~CgFlr=9I8}lGngO;^h zK%kUN=D?~j%-;!StW)To-`F@FZ!IY0hsI$vP@iPl_;%0Lt5BEQ1-p4?5gJP!J0VrggX+% z8L}&I*42^5nVR<{T<$gqk^RNV~!H;(k`M;g znV6VlXJ^N*FMOe?s!B%mpzmONZ?S58dj!H5ycoXPqYq(7U`ApLlfG>5f{(8+tNC7u zLx*Cw+z10Lhel4YvMia=Rf4pXc)6qfFwRdUg3CrlM^nVIvkGwEOMv=u6C58%YhXia zH%L&%?Q?HUd77!zUZcm5SE{1IDqRWPdTpRnJTt@%{5%p??aNCGf(H(;C|q`Nax#_L zNi5EG`WSGr9Lkn09gz|dVa0$i`5FW3g#YBrIJ&vc&zbu%BBA|)GTnef zfj~hdlxheu)y~52fg1G+TsH{`2|?8MLW4qNvtxL8pI1rxYrfhgX{aw=^P|s_rHEAU zjEHrgu}{9gF*)7RMkN5c!}^)eY@tPxf5(p1Ff^2ulw=JJc_7LY=w^6^G=n8<;tB!;XWsU;RH3`_W)o>!aN&Bs@Q@uJrE+I6aqblaSt!Nc$h zh(HXbfpT4O!fk2qcKQ9UFlHWXKCB+gB!&DIT>riZ2AM@h;1klj2TC;0#WX?qVx(YU zAMqzB++L?KCJ)?62>oF1 zo{h}MH6+Hz$8E>EjuR_-)PL~7UGsZLXnXb=8(z5J8J3;DTRG+3h2*WgRBNo_9e#;M zhCF@siWg1t0kRj1cul$k3lFOqmXaA%^i(x3j2fk0=~ic6=j*k~E2 z4bp3WFAe`2OPjWp_pi=H{_p>TsM;$eAgU@ZqUU1#d-!tCw81Dv=vTAzzlpwhI^g7` zLJ|oayUzv}J~_>(HTw!BhRXlQ&uOUOyFQu0$!>L>_{t%HQ8;5hsFPs~R1IK7No(+jX>DEOo%NE~Fi65%6^II_W$ z;UkPgGJMo8*~=}nJj?sXUq1vt#<#V(T9Y*W=DIhcHFf|7A!k8BAXG|+sh>FBd3-xF z$+SF69LY%rjEu;pfVvLAmlpRap#|qP&JGF$jUmO-e`3_y$1*r5VEj6VB+297_;tXWSkS zQu1IY_@*^v2ov-?65lC4LT0~yyt}pwX&***_T78A#qD;(UKDhlwMEKn(tBP}j%_y&bEHu$;#$-O{u;k3oCA|nOiX%j^lok6X#9dcNi->H=~XB2t)TisT3Y2=Em<073g9nlumkDq2(a)o$907cV|S#)>p2 zz$265&b$oyPG<7~Dz=G5`)W=^W@ob_rY1%dA()L54GG;` z`A=|)gEqwkUhWM_>i)OkfM&Gk(@iGt86oUi7w@sM{5LTBO4<9)9bS+QLhx#r5^#;FB{v(}Z73l`5l&~Mn%D7p zPXRV5;J8LdXD4s1_s*r@fPigz_0bQ$Rq%<3XgUr3{ykl?aK*vl_S2`E$doNC(fD4D z{9ijyh1~`E;RV(J>nQbF%VPeYm72U)V>KOSUq~($Yp@Rp#Fu90!5H<{<_<-fAgar8iB_HBR zH;?pT-YNdQV`I4ZVqVqlb^yv(Y%81r z)H30T2vrgMrvXu85Rwoc$5UY zghQfWpCK}3r(Q_=c6D8fu#%Ve`dn$S>~dxk;BPikASLhLlhHIX(1jBkh@LmF|3Y-8 z1MQkvc79R!sj{*hnQMC4xtTXe$$)Zh-?3wD#m!r{STn`2XU)3`2hsKk+3u;|&YxBk zE-kqIL`xq0`Gy*;5gfz`?a{7r_=68iiT#De*)ia~GdeG@1q)Y}IdQ8bmqrYWM11Y- z1T{7Fx-H^wt64s>As6{4K8Z_vdHRJkM^lCS#MD4T`HwUsk<4JF?c{Eb#PZ|n5d=jl zrWlDd;36`!cFHXby95{M|M>A5SkMo+q;#_a|AtT1Z;*Orw&MsF#Isc|Blq60qzcdT z^du4u3iW53kfg`nQ#E(&EL$JZ@+MGYC+>IP^0%cR0hP(^r6Lw{|1~c^Y^WF{fYbd+>&xS0Nr8x?}*L?LS8YBbEQS8#xX` zd|vI?Kz#3in(PXrqoq~fbh9E004=o92z7c9bd?Og;JT^`++2SA_^}m?;I(HKEKIBz z4JlcAc>-J_kf>?ilzqj>yATe<7y^pW`0wzjsi@JGqhY9PO40-8Ls0D(Sb zqW7p(m*Oz0PW`nN_(4_NnLmeD0FKFZKretjO97Ps9qf?IDD&PmHwjFYVt@*ji3%gSg=pe5iJ7stwV@~>yq3#JN_ zF%RSe$Y7|U2Wp}Nrp;R+;a`Q14R#>s!IFHbkd{?(XU5ce?IHEnN<(lWo8L13Yg+7E_qa1e4F;@Zn9M zC`x$ZmNui8qQotdx4OlopyL-G~|VTtV!3KACzL z2N#TuR|8xcZkHs`e8~Jg^4(tONT+3%yrB4Lonu^roK3{CFA2~U=jZ1KD^O~0#85L_ zbz<(?;_||X8el83J<<)!mkeK9+Ibj%TyaN`&k%=7YaDl%dhE|AK%s8Z3wk<_ujo|gR2YX`}w0MSnijS(IkK+GUrAn@*!Pw%nY=*dt!D5 zJ%0QPx>}vXUxvADsr-*ohqYoHZh=0JaZfvdY3&~pLQTkxLdU*~74>$2?k%^6Wlph5 z(_cCIiKrC^#GxR$7aQH=4K^YVn{C^%%JUv!UVL*kpCU^E(qSdLKLK$=cU2vd?tZ^h zvpDc1_z6NOyWiG`P*r~U@&Oxo!{a49SXOgI?bUGWaU^UHRXcG319~+b8_kPPa5Ge~ zx(M(6`Ex5PU*P$*9u*CbJnC>1O@W7?=97L8Tx`_!(VyTeSOL3F@g1(C;pF6Wa(2dk zJ<%c7h^>!*F%tlehavQ&D4p-~`op9x*4?xAJE>y9gY2!2zF%?`*$nc>s;bhe-D*BS zzeNHO3EuuLj3@IZbkL2U0fZ<1fnTa)6+y<{;5^pp-+||r>NBS|d zTh?YpNACcRq%krwB32&EKlWcCjVlg66R3IEK!w#y?)vHJ`-wlwyw$FgPqx+u(9_VU zHl1?6m}9alE-)l8kT=h|KJn=(zu!F|Pvzy*q`n>(wMacdJ_8hj28%f&_ zU1=Z4O~lxt=g*)2N%q;38CD2M{EJdZ3+bFYmtffhj?A+)IAv&=@0z{6H$u3LQy3tu z-<)w%B6UgH&n&O%yo?+?X8h?+J5slalgs7-u4h?(D%ogWy2eW|TIc26g_lS7s?hlK zhuRpRwT|2R0RLk!Yb-4H=N>+*tIK!tP{KnbJ4fe#zN7z3`JYph(IB3cC$n!lGWSx= zK)gY@lewhS#h(tI)Bf#9$p$}996k1NXNP57JS!PVNJ~Ods~U+TK4c{^ca{E`vIHn3 zZHj#ywLYTt(Cy{fm*%jyFJOP@WZS}{t7-zzl0v{U`YM@10Rh{(bt?gW%;C3V;bfiv z{-@*|$rzH~=C3OE3!s@}W-gxn;N9ugf3g683}sVb`ErjIMmvs%f1ay726zW$4-xdc zf^m^RAfOZc`8FFjZUm7hVxM?j0VZ%-oJ}i{kJ--;lcVE0BPI^;317w@$S7#%6dJYf z$zWOil5(^45#7Yf2@WFw&TQO99?8bgfOOL8oP!?U*_C!$JuB7S(K`=Gl@fQEYRk0 zrekWSU3ywtRbv?rdy*$T5L|(1V9~v>Bs_t(hP}sX&G^>xC-b%6U*ZC;?pAqruox?9MYi>?D}X67gutP{n_ozN06e!Q3)f7xweQ!6{Wv6bq`*mpmh4K z{5HFxM%In{^hl%h5zc(T+9S_U;8;GfytX*ke&9Cx7Z221pdSR2nKS*98e&9u8U?0s zFU*b^d5^EkfzcJKL9@?*@0iw#Rv@Gmnw0JnJBcEY0W$Am-69S!2%S+I>46fA1~1Wc zj%Rv({z){I?;s^z?Q@g7G`@F zO#x+AxVh#B3>n~6u$KFk@*z2;#_=<`m0Odb(l@7RYiJ3xJf{y9nteXU?&BB^1KX2 z`Dt~vrjUT8xfAmM9vMz!7mo+v{T9!!_wRw?auRhWa9}#_^>eG>+>TT zMY zE>ZFA4D7r8zPqs+!eeI|ewV)MSSiQ5VVo&@i|KVdm|9@RxqC1(KVMAKm^CZ{w|Nl! z1`2c;?Y9A`xvoG&vuXg-62X4*M@#5*vuz`g>5T<&#t61R=PjJ9w~Uhtt$;~rP#oMJG%{&jyFUk~>n2^b zwf#=b?W!8@I6Y;ZxnB9x%&X8Psp0Ptx5l!e6d7w3w>=0LQl}}Vz_K;36D^|}RFakT zteWpYFy!->FSUrhZ4ajZ7`@{COJ;HO7KHRkKv^ZeegXYZPW@J;qTZn)t6HsQi`<=v z;w0dpmN{Z`(frM!2hA=f?yRh=+|L@OUx@>kI^9l4v~+iMX&jn<*YmsvV%~PvDccuP zE|~?hzvzx1%m0wVU_78ZX7R;RRQuiMoOs_6>#m-a6PmjOB^`&uq9PmQ#>!rnD!ZT8 ze5s!|-_{zuPTT`WTSE(4k@WE`4_;lGl@0+g0S0wI6pCe2O3$xM6-1rN3W)B*_WeOo zkqirbbVU;r6E@;hHdx`RkVQza!4E}3zXsoKM*)g)ZCsg&i3!qMt1oOxjWzM)JVrzR zkac+u=+f^tf+s^aw|K16lj(AGYb@Nvh`RpR}koSH}cAu-y%13mRi4UxHq# z+oSue)UR3hv*=v*SmTLamb2wQ;`7u2O17DF9z~z2A9`DJpWNB=H=*tuaesguyqaO{JOP zv1hJe`NUzIbKoq~ntHa+x?9~J-Hao}dy9laZSU=z9ojeB;yoe|ZYA2MWy$3za9iN; zgFnC*XByo)m2eIb1=B@m=dnX^)^el2xW2$>)4VMK9V0L8`Yf52F*i5Qc)m+QBDdrk zcw>#rNd9YxR;M8zCQXX(xb-A{{^myNivg3sNdurs$##p-6qTp8Bl26YLz@kU?#i4N zkqe!NmtLj58zB>v!T+hfvv^ASoFa>gTw8~4zsccn~cjqjW_YMIkvf(xL&Z! z$3P!|=ah8g-bxRiVVT7>=q@7Hhe8d7pFSwO2hm_$#USO9t?pTaxLHB%?I#egFL+OIh+3NFDI!9td-m+v?uQ3Lzs|#TvWTkx)z%5&31VxA#Liv2 z6hZUdqZ`J?$Ay;zN-aePe8pO0wUi7X!1k3rhf`N}#}eM2<}|~f^7_nplhPrk>)r^m zmB+iOn|20%I;1a|46%Who<7-|??i)IUJfD|=WGg>GRYV3;QP+Q0>`T)1nRE70%E@2 z<*&G%b+w6~Ua*0Qe2UQR97iEand)_J#H<7 zeV=WwsgmaFF%;`|~hmzX?v zE;GHOb_Q4Vpy4-8hFDBCPPHID;yYr6ed4Q!6FNVfr0#foZz|+O#Dqief^76Pm&sn9 zxZO(Ri6Fh-Ztyx-uo~$&J>1pbf4wH-v8CArOt&EC7l+*!vv;0;o`?u3pV)6VWQu%M z-2mBf)=bxX8gOK6ktm?F=S27l(pIdDjHCAl+>oc7!dp%HG8*)2Zmy<-77&H943~6P#~ui{ zH~+OVR4k>$LUQZ_3leDgBm#l1>UL&)`nPOZkvor8GFn1l! zAjRSmjrM!uq@zYoo9FhgPY);}ok2nlG%mZ*_9JAD5m6FQsGurpS1c4v^zSNJt(!zG zpNKmSCqmTZ2XO*{)xy*0>g;?4XYT3O%Oa-VWo`1O9xqR&cxW~ze?-8*184lEaMEW0 z4?-Np!sRk3-00kF>TKnR~$bEX*s(s5eJ4ni|uC{*)@w?{Q{8eOq@E21^Xb6wm z+uFvahV)PIf3V19RZc_4w!j&fe$1Yy?q32vZ@%W}*g*C~kHt=Ig_U;P9geswgRJG_ zFs3zCJKhYFg9sO{?j=XUOhe&v#`Zq!gt8b%*0Isi@5lyDzLr^v(~xP0A0K|`N(mA& z2TfmsTUkI}omWp~0E8`(1r~ad?PBKi?=~ha6ZnO20+iwDcZpKX#*c zgDKvgg~Oy5C#f;+Yw){Sw`@sjZq~~%t5JYLt%=5O@pd{us^P@+t3uWdEBadXHu*Wq z%NdHAhF4Ue@f;NOva`1rnV)I!!jlbhu8dE_Vqs$qggdqhX$2Ud?}76+zPt7sOTIPm zQ&fol^|%Mfw=we0sWY!bz1ZWGMR=U=va@^fvsHJQc;oH!k@q!2fc84M z$ES~gv_el#B{v>4b6&!x@ogNJ`>f%s{c}@TCJf!(alJ0u=~m3-gIr|ZX`!;%#@}Cy zcPkvUZa#GU82<|aWEe`yiq_m3IUfsa!geXU?6bfud=hjEn-X3 z5V;Ut?m~QbI@tR2WzkUIt(g#oqD9vwDj{=Sbs!u4TyuGohra%E@&pLtPOp-Sii&6# zs336kTWR_{8N zrtpRX`@tVVIL^99R8$LF8TF+Ore24O7oIUq)3L*XbuP4bcSi$&Zt6L)9F{Quv*lxD z2RbKw9lkeArp}~r0P?nb=S~H<<*FEkwYiQqM(-;SYvM38!ajRiG5U%MIAVNrg zxqbtLrC1zB{dh^(3|xGCbab|~fp-0RO&makmbHP2N$r;A;}Kj(Ef#ghaftmj;2{>1 z^Qh}HKxROS*<_S&=p7nrV}V;PCJiRUrMy!(En}Xih{Mrkrk~EV-U&l^;hwK=u*Q_j zGn5{7w*CC1S!r1rj(KY3Onj55L)f8n<`=Z$&!0ZUjTi3P@nQvcShjE#ofMX5GWif; zLH9#$iRO0r6KVyCAa4f1iJf~Litpt#UZ$c1MJGz%W66HnB#sg~|8<-boWhZ;Gl)Va z3|zH7oKi3!pSD{_Ts%jYF4XkSsvi-*QX|BzFWJY1glKyweCB6nWVAh+v^Oj~ypAlC zJ8q>BC+%-Oz1M8&tFFe-jClKQg!b`vk_OPGgs{$zj)+H(7`?r`GzJdG(?-~I^|~tez*1CApB9|CZL z969xj9upjOK}F2tfbIh$k)MyFgb^Hlw0)^xyD%Ut%P~oESG);!5y~|Hir#alUp_he z{=mBM3p)-Gi*n>6Lv9zxYa%dTKpvBmPu$#sY@H^4itOg*8C>yJkQnQr=v@*GwZ}_U zcy$~w4R8e2pyM9-Y8$EXkGwFgbmQEAfy!j0$C(-$Mqpv;pnZfUt}6d}_TAO!?S0Q( zhTp}lze4i3m)qrR+Y3+b41{7~Za-0s_h^}Cy(vn9<|a*|JPlP<+xJolH{#cEOY7mx zbIO1)AC&fJv?2r+rEAb}gTuoGj{o_szWY*4(sY8JJ~eY4>gtLDcYuXsCQ^`Q>WmuA z0A3E2VXzJ>y;y~+7J)8c54AP9g1n9kiZsPHR-QrrM{xIiLZzAT(}U1pb=^+YCjNT7 zTh^@>tothj31F87SSn<{Grn|5qw!56&ktW-FvTO^%QtDP^uIg8z+iKGvSnDK>I5vjtOi2rns ztU&Th<0U@wwA54(!VgK9RQ+i5tPk%$d{_^q5uT|yIEyJ>;YVT|phsl4Z1I}WSIy2) z1Uba*V)0^tWW)fauyfF(?;t2K_5CPAALdv_UD7|lWH~|qcKZvr>8E7UorI-_9r|nI zfHcF%Nd&E30Iw^+TUrD4ox53$FJCqXBk9gOJolb^`6WCwzU|xPkx8iotFi}r6~Lhn z2#ME!`RodM^ytF3*g<#)k`Ffi zm0_?>RS@)`XV1t%6#Og%|9~x4aY?ujud4_O4Gq0w4X67$QhCN{I&YoEem%=S{CB|! z**#PHs63{Bniz|QP9gGAfx%Us1_&3t-Iq%$N8^#eaX@jFohKhf2^&u1nu9|_6Y!3P z3;?Vz_T0{0{^|{67bL%aDCE-?gKQ*`Zz3QC0a#7i-LnAR$^m%8jqdOqp5^glZtqWs zU-{*=os_ch|;kE=shsgN>o zmBdS6(J0EsNJC1zvZ0~DG)aAN^WV!LJfCUX$c=&#?SY$5z5t=SqeCs}a4Cg7W z-0AAo^sz%iLTS~`dJO@+n4a>C9>7jRN6Bqy&aFB(V)B>^vy7gF zB1DXWtPOP?KV}2*)x|=Ln&nSX>x?=lFH?gDmU@{Z=s{(Xx+@Kk+NEE=MKq@u*sLV2JW%L7F2gF!bH= z;8GahCs){BrsjzwXH*;0{GdyhtJkj&N7>#m_Z3O}L7E1bD%QNV{zi%ncqW5ZHyJ*D z`YYM#=t*h+v`mF4RaCHN#f@PsrQQvQhmx@4U!pF3fn*t|kyPSv4u7ycNA3pL(p)%_ z7fDH3;oSn>J@5Se<(cmrv*bSNgmxcBgZk2->_*1$oVL_tRv-vRY1uzPNmRAS?5Czb z+cAIjEJ)To>LaBJihOp=yv0>(NiC|z_qXGygZ+|TsuxA(9jRJ)lOh1aQ zzn%1t1cMQ@l&_Ay9eO9|TUp9NTm56QPr6xV^Lkqe9d;I$MnR=FcD^f^bv4xT16p|> zNb)CFwt;ZOkBFlTsm_wRI_1c3DoBh{e#2{NMS$%(BFXwSErs{e>lB?iplr#s-DP|R z@hSulmn0wyqJ(qz>BPil1X%%{c(*YuX!MWAU!@nDnoqG$I$lz7VJ1cD`o%9_CM<6e z5bf%L5mLh@P3E4uig4eIooctE=ci@QD!zVNZ7UtrEWhi&s&2*F8h4}|f&PDg{r{ec zZdv{Rd_gqRsw*;|-yVWa3f2b=&1hz&rgQcQ>I<`10k8mdR*yWIdZq2DrDb zqR+a1w^92a7r6u2-Kc~S<`4)(f@ZMy&6}@mHWoI?T4v=ChQy@7tZBt;h0ZO#ouT^l z)0g&4qLf@8)Ut5HpccdZ{Px2hdUoEjF(kxeb(cna`c-HFaBC6NujE48ozID8hfS;N zUL9rla{$R~sS7r4RL12Pt1g~De~ku*p%Y`313@@9(cP_$A`F+!4iZwqr(<%`had4x zufG7HMZnW7lV@{@Hs^$sFgu3LT_NhaxQBi zs9dGvwDh|ljJJX7gMB7+$6-o{QVN$djXd?sBGnEeX+(Jrtdy#xDMD=6H+{jUgGw(- z%cvh-PoOqo-fO**GWNpRhL1*!52@P|><9VRM)Xgp7NX$EoGs4LGLf4#ku<1o8-Fan zx%*wI5aZBgs~jjIFB&#L{;%R;ISb@+bOg`X=t8wiG znNOgU2~`&2yV|YGKX~G4B73|_-E-Eb;tJ*qo=l-)q~gj=X5F1|pdIpDjT1&N{F#~L zk>V=on2u~-6MH7%Y@1ednP)aq2{Hw3o$?e#??vCP%AbFM1Dv=S@z@ z2S1Bw5a?@zZQ`gydoP_0181Wb>FoNu0AP*c2Dy8WTeS+|r8A34%C@g>)&)Hebr};g z`zZbM^q|jfDhJeb+Wq6L|L0YhV?_j&VnVUSrqxmgJppK8I93Hl7$O4{tU2iXq2E3P zecQ)ig}G1e5C9^<3MnNbcMokOj#yc@`*u8d!Nw0|PA2*_O(e6~&J6RDcfjRk4B~!5Yb;=-c;CY=Cy7+w}%) z8>o1NyCWd|&F8<@{yaG(^uC4>styV686$*<8vFI%#vK97DXThv9r8fRA7&`o@4q8T z7_8xA_cDiZdxTNNN?(KW$K5RRorcZ}Y(W_0Xpjm4wSmt}RWJvF-K8);dgk=$CbR6? zgjo8&h^BnLQRnCmcJ{0L_ak^D&({Tg+0*OO;|Acu?A8n2kK?oQ=m3v!OhBPg*{7>P zhn*`rpclcAsV(N-|I_9i9il4*-SB{bbU|GKwm^3&|6(@9o=!MDsa2}eyG8i$$4E(E z2HWH`dv26wFwT`%)sj%JnIjp_czKXK2O^6@l8lYSCsp>(>*QlfJ=^ zw2GP1I2YZT@aQxB_N#1X>VH9@%yFnTMss1JCMjw9)ZQKSe;v`{psHF61A{mTL5wh7 z=h3+(SP&a;P}wO}%;iM%)_-a6&y4yMLhDOc3c#H>I_)%7V+x9~u~V3_}uQm2rm-%8F!Tk6CRji#lq>ifW#dCzq3; zM2*hJ?gG>c!YLO;aF_`Wm@sojjp!2pb~%u*Md`vQ%bz2g$y1pS94HPts0%J!P{G|m zu7sg8g(C-iiXl~u!cg}Wa1Z0SQF6b;el?#~`N)uJffVPe&SRp>KTcua0TLb__E^2( zIS@ern?PRcj~+dGFN67Y&V;m1vo$ zzrqs2;SVMc+)Eb$*px1h~RLm2vm(Ba@E) zyON~o(x74|?r+d&)c}6jVR)l?f2@4g_?KS_X1k|ab=RIAFlO$9AK8~pzSVBt{nwc2 z6xD@CPs(hE4$VbHRI>Bim*on5rQfG1q0bx5rm_%3yqh|K7IUx$A|U97`oVkq)~!C4 zFk}FTJ0}JzxlZCV>zHNvV*?9ZTTaP4jcf2f=(cFlkH*Kx5tY(07lhv{`+`k`%azK z+x=c#Z`+dl73(O33QM}I!pskMg%*IQj;Eow)r@v`&ml@vdD+9w5&yA~*d$AJ=gzT3 zmW(UqOb>i(U}0&QduD1xZ8-k&PfYFj@$+X<1qr1fF%y})s`LAnLu_s1{H8IW|4aS) zy8%Stt+XR9r2bgoR4outwB+UOypN@9-G56;a!};VzPIp0*~8);mg<#_f73sq&WKYI zh~vdDzayxzn8k+8Z%D9nPn$V_7c68Y-vE4O1WqPF)hgiK`o)Xw4i7%v@zvgbuep{z z>xRC`(`)%9lJ!t6Cpl|UB4YmKpq%7HxD>)V%EjLt{DUpX-R&;^^<%vq{5!tt)ks<^PpZ2i5=B~n|m z84-fSkPM`vl3PH)<3ot;q;Z`+>p_Q^S|M~A1{S5 z%49rvg9l{kKL8)8A1<3)J(AlwQDR z|Gs~3sy1a4vO}DKYm{CH+n>bk?g^Uc{ z0I=;sWCdq*MNk+!`zr<4w0DL2O2Pg(A*dbF5{(vp(#&)G=0IRC$Ul2(zbr$K&Rc$| zTQ`E2gYNk9q>_iZ6DWyWwd%{!1ntzm7%bLp zJTUZQ$;zuVUU^cvUjJlR{t!=eynN@*ow=hs*EPE^v9rn{bd}Gla}AFS{j*G7vADHb zT9!7xI%md=n3LZ6=U#9{LI~88NHIHhOeomY4o(#lK)29a6J{7}jCJjLDv-6yFI_jB zQ!sv&ZN>UnPE49j3NvccCKf`+^UAvB76<>X^}DTc?UN=&2DLw1_xMHsB}ZhOO_fbl z2zp~K(P{nqUsXk*e0Cd`-VV0w_ZoJdlzsbQw*D82c2PK6v$-@m-jsDG&C;<_T0(`z zj(<6;Wi_@!UD{V`*euMCZ7E|&<)41aR zs3HuySN3X-tZ9I_7$g_ZLx%ObPT}G^yaH{F*l|cz?G?xrr({_(6~yf5AAkR&*_n)a z{&03!+N4XHcaiO4LBgcz6d5bs^7qnP5k1*Aw|3M>q9$PD$Thf(X*Ob?gDeiU!VoqN z{xKm+k>qh!(sj|Lp!-ZqPtQ;9qQ49O-z+RfVQDFC0f~C!h7G&$HTGuJpWTe=ASyEQ zJe44gp?YLT?~u#ZuZtUY+qtugmxf|2nd7lC-{<>~oP4K->3f;`UBl?in1A%4K{+_G zL+IMeG}j0~q%tKzI@Ye;)obIQZbK55-6vg312b~uW}J%?p-7FfBh@y6P#d#Ae|@o@ z45F@Fv0}ZGHuSe&f9->XFtq4`EeEKXHZEVjoL;qIg9dxZtvNGO*yeRkUfpitk=*On z0gvV2)i&WD-o4Y}-tr6nFVsDfUs15-es5DRQ)o4<^wH`=!5;K2F67EmBz{@7P8u2- zvhpKezxE@fHCU9_plkJ$jt&m*5ZajVY}vO_SIB}>IE7;1^Eg+ zi-Kh4cU0LZePgZm5n|CKE#4Ejf8JZnRVE|wG57{iKDBJ4{wrUMMwEem-$+G6n6tyB zU}aiF#ECZQ2e}@!IN+^$j2knlXBxF{pAV@iKV2OcxqFx^+T6Y5P1!4~o8$p{kVNrf z^PxkB=&3e_hbPd#ozKfV;`)|`&Zwd1=IE9*)<>pX??>ARm3aAG5Y}vmL^})fuBPMVntw0qSXSir5nJaruLW^@RFYkS6-@JKqEe|pi z+hSy0+qf4IlCfiAsXnDchJyfeh^2GNLNE`WaPjKZBc2t{{6!xSawCfe~V?d2^QkWZQoZ$tvj=>T1id9 zPc`Z=a$K@y+751&k+ozd*;EdwNu&+Q@6b?v9)!d=gy$qIKNU9cl=O@f4~+EA5P5|B zIg*A%PZW!;2Pgg|HTH2RB5xE)GkT$l!)A!BDw$n*0q|USjX2HlpL|;^YK-Ka!b(08 zkC%o_MDR{JKN2N2BevcQR?xN_R{!@9z{8|RC7)jK?TUu%diOc-Xbv8zQp`DA2dFLe za8`dq`m#V+X(v^X{<=i~$U8>hS{yEh4X!ufaq1b)bHI48=M`t>I)xl1?cxVUP{ z-8FhRC)jW%X#}JE=?lW)cS0^G8#fNKjQ{z_58I-y(MHKW>St|j4)rNW5+(ZK0L${{ zs_29zd#<{A>Hv~aotibBP|(22PyAl5DC|43bum64SyD^?E77yMJkiHszpT$V6Cx+rF)ixCmM#3Y#^yWNlKPTkOWFf*V2W`a zL01gA6!r6KyAreh?zo~NSD%_7LLF9?j!pnC83L-{Qk2Wu=Um+a`zP&ah zBSVh&V8Iw8ph1aPN)ZlT3X`@QUWXV6H=A{H$RYSgwR_hh#ecu+irSt%)P)f=!(IzU zBEE4!-=$=2`LNw&pQ2ohkYr0sM)>5by~Sd-{7(i)BE_RQibKm!FYAEOARSnQ4!9!< zAX-WW7O@Mrbf(siezbD9!FQhJrlDA}8+02gf?lkBeqre|UqJqoD$3kECDFEj$C#YD z1rrVkx@$9Z=m;(dIi}0DVfs`bu>*TzCpH8F_Mr^Q@UX$D^3(zPp77`BGw zbE1yd;_yQ4WXx*feAEQOpkRMe@scbtMhQ!(KrR*9)?I6y#BeSi(W4|I1{_5&$v(Td zIs-_J20U6&G_p^?tF&hlxZFa&L#XevZOtxBIB=eE%S79gQw3C{*N09sQ3-d*6u|YL z&jkboc%xq z&7c7TRGL?*kJj2$eytf3_{me?!HcN)sPte`hkKNW!9>85P?w8V9~ftK2g{K?L7i}M z`w`a!9fQh1>qd(?`0M#lP|!uiy`D!=-!U%7wP-H&p$w*6%**@GxS%3PM5Oy3{QW6u z!AM28ihJLjtXl$(r~R@uo1uEU&7Jz}jHl5SxfsBL$kG5i}3j zsozn-*}ezsQ}T3uW9-k6L9g?OWsf5Q4#DYvEbX2+GXDORO<-q-k z90i!77z|rzTa%ji^oiliOd32Jm1;P!sq~W){*896B_^AVxo+6BX$;zNY8DrZXZv;r zc4h|SQ_3zB=&Tk7jJE?gI2u(&ahTwDj_K9E0Smc9xF$b?4)k^&nv zXpM0nv~8O9D8>5}y5ay4b3+JykH_ZEY5e4N5aolSw~L9cF@SPQ4K5Kw&7vON%6pq# z{w~J5?J%ei`UkWJm55v6M=o?v!eA!*FTT7y)daU9;+7^OWB!(i`NS7Zq$q!TeAuF#9^9=aacBF5czUCkC+=B3&tk zmI%e-waR^gGxmuB$2EB{woRTfv-Ss-XDmS)1{?nz(1Qre`hCt*7pg*nZ!!KGvLR(x z(d=G0d2*L_rMDZnrxOj4NWckG#XJ&cpd{Nh?Wa8x9C2n64aj7b1B_41WwTW-$#bBw zvgc8UL&9{M)4KeU`&g}qYl2de75ikzH`c69><9u!Z?^c=vCq>)#q{;-*R!d(K^4<{ zCxvdE;j>@W;};cU30tL_#|>84E|BT#Xi=3a284kD+W#>b^&OKcn-grZ8K0#Z>8{GA zpc6WZxCbWTghIq@&e6!9UG_F^iBQ{-gwBT=s~{x$Mah&)JN!!J?!YH0pq|*>XbMj_NO^ zik@@`$Sbdp{Z{JYwrLLys9|@%KBB6;Smz`NG(g3E!%XvN5yLV9sAv!9 zw#s4LxN&Kf<{p4XjOAh23?4jghPTy(8+DIvrfe3jA%AYH_wQSVNYP5Mw~tH~!{9RM ze?0|CB$%#yeKo`-e6l&<~di5Hw1AffT4SL1@n0$3Ck{=KvYgi@#yn_u4PW zQ{iGdZT+)H+lTrs+*G_U{QmYWd1Vh4PKDYXUG1(Md)IUBwfwt}gF;Jcet7?W6C6}Y zfRLNcX|J*ydsVGc#c5O?<1DkBuZoXoMQx{b^K@?v?4Q{ENW9$)(&;YqUGF{Nb<$T48obI%K4b* zVPE&bgI9pzBps@Nbh6KcjknQopw`=B?)5YTF4SuU0ds>0z5lmPKJ>17rNGAQ8xl z*I0s-!L3Ce{p9Q^ShdeD<=F9Ipk$6SW*ktzV8k|?%#5Tm2~~QSn_$2EyRC*ED+ z_@+2>XC4Ve1N5sx)xWP3fJ7WnW$yY4NG#{CRjcCaE(%K-)yx0I=9F49oG-dI|6E$y zY>bKb@@DgXf0~0?a7N^+-V1UJ7X<+h3o}mZr3JPHD>4TC!}}E4V*`wfh_>9OrA7Un z5Ai1wLB<*qC_l77u)qfn=TwQlvSa`p1%9Dw0>_r85~$h^l zQKChoUzrW(gi}&q^u{7V5KN3g&Jj%qFJH1m+PAGtdWv0P8D#^b2RWSa6s>DYX(^(L zIWiP+40Ar~D{vE;i@RwKhP(eZb*V4S>cVD-F01}Sw!o=UIBn^r+ zsV^?Js*p>$o)OEOZH+N?A2+pKPz<@=CdtJU zL}sg{+FQNR$tdK)Z0qTPnRkEPR>AKmyvL*{4{?V#K;yj1df}A@o#{f8!|8nUZ>7NN z(V02$b`YZ`6}8j92ZchF^UEU6y*KXMIlHusKq|s=!1gKB#yK-;lm~rjf*uM=&Jez=A-Ax_&BhJZp?_e5O%9mrKVtL|dLCI_2<_THrM>WV zulR7qz*ZGasiGkHd!wZ2*3mkkg23scmg&6E?b{ce`pi6tleKxY?f0M52C&X31`n&7 zMlb%Kzhx(Cu;$ncaqRw~7n|1}R!SYZ)H1lDb?uYzj3QPFjhq~3PPhN4{)hUYIet3M z9-h%J=T@lKgI&$&p*0(6wqR=vTo$J<`R^lK8ZC0`(1^jFi)V8pOuyfYeKE!HrToS{ zMF+xvNZYmJK1L9E1pBzSMsa|{S$FRq%m1M7TkN|rxE=>0#KcX=s#RHB3B4(t>je<1 zW^vY2{jFQgC1!%14tGx(X}6{G((jjGL5p?tfz`$Q*odzYhCk~c?ESrJ>$WF3Dhce= zfnVYCT|V#7|Ht6cXUqEeG2luDc5V1oCb6LIp}fN&H%(%1=ChrA&UW}8vk5x}K66lVq_>bQN!9_-t0M@1ug zVLIisV%gY`m6Jb}mWuu1H+CJo@i@b)7cLAh@208KVwkfS*lsU|=DS^hg?{@4G3Pe_= z%;kup2suqw)nS#~Qg_uox_`etG=6lVYdn?m_f<6#$?c71!4hkThwv(bFw40Hf)I)o zJ!Ku-vHAGFm&{BU2CkSwc_+$;g4E&Ioo4Xgrj~S%?%VB`MPl;I(#vsP)i7vf#XEDL z7!Ogxr#&Yb9Bi?>7Ec(Zv~J)C!^%xcO4R-R{`n^&;@7eVb%L%*LZFy?c{kIIkDq_J zR~6rfyjYB+fK!gAd+Ym{U>k%4gEh3ZCbPx8%7OMnM$fhkX#>f5B-@HKu#5gk8gz5> zoadV1wtkPBm0wh!1jJhwEC!qOCUafRQkiG~y<1J5{LJ_R3t*!XUHynzq0tH7AClJF z6LA!P{0Pf^FVdGUKj-=3-*@l$L3|e-)zL5i=Lc!6bCc*1O~RpxE|z5&>kyz@w{N@1 z4^E42`?2IaEwM^2U{t;oL9k^`CeHuG~M3gGXlWgg8m>t@; zBfy5Wlj}a`PSl_l^ZI9>$!#klWKr)R*IW4E&YUh$`ix*dlzuXYsda<=J#>`P7;z<2 z&W&Ez1jK@z56?%>BR!g^$+s6XUk8T_gO^s3l?Z~$eSD9TT{*GN(5{iPpUIkRGC~9im@Abi_{{r%x&AEfX zTLVmZ@@F4x+ysoEATqR=d3wb`{hLsftZvmR6)zEG`?hWO6_l_=RM~iWlHWvcQr&YP?UXK=H$Vy1a= zsY_GPl@eA6qo*vHd+0`ymKUwjKP|0J5z5X|VqrQ=RQO;>w8QGh=j%ybFKQ@JFYy-b zIJ$%l-8HdU%Aey~eMCV8EW70AJizUD47wL_CQ2T?Ai#4g8Qy?V40v(i+(crWQ-$iL zC|V9_)I5ImDp7n3@)xDiK;VcH1F#TFLtad>kf~$r6|8_|OJYlxJbF55WVJveu{?W) zb%fiU7Ufso$Iu~n#snlfp;d2FW6ChlyBwFXqBn%gxB+q%$7a6D9vyENw#xPN`W^T6 zw`}i@%a0r*t8$>O4SkDcaTh$H;Fkw$Hg4V=EeEmD!J9j@^=@0yDk)N2hY#OSQXJ>C zPfm3d>5PWsdD~|)!U`5>9^XqlZQ0WMb*$d4r}5u|Nncs)(_uU?@${rVp0meB-<{b1 z#6VSa-^HuK;Tl4cyf0%m+yw_>9S?1e$sgE)!HCx0X4d`u;04f^6#*sz$h~$>kn#Mr zEv#Z8I+|9vdlI6x0|CCG<{W>eci)c% z&En1L27K8|2ua7xJG0{Zerqi*ki&+#K(-M@6SDrS)ik@oXbtnJ76=P$D1D{XWBczC zGbUnc7+OmTl!h%^CLtk><$w!~)LBxtJJ&B?(!tv{!woV*7R%3LOJyw$kBCvI{k-6T z1kqU&*Wd1s4prvhcqBqGX1hm=fr1d}%%vGW>y1~^R=i1BIm5p{$31eArEqx&j)*3k zGBX#Zy`hc~g{lDm`ww$D{DW3NI6kIO!qeia%LeR;_N?i(+@TGTS2!tTiK3f9^Y``M znwKwx4hO#}Bm#=_3g-b%o@k-pokh{mEOh?N{6%77rh@!Ut}4e8d*h1*-oA^kfhjv3 z@X;RygbeVn$*p}mcEh?JdidCN)w`d?wdN3gfvn8IkV$h+HhTyXE?!&)T)b;jp^!|h!JBd9pKR>dtu+UH> ze=~XI-1Qu;P##O8FaKAx1%Rn*z9x117}c*|Kcxx{M%u$(p0l9SVh>nqCoQSC67NiA z>}@rHWhE|9@bLvB386+R#K^-L-VdFR3GJw{5u*;boEG>$zaoluI!w7YmQuV>V;vf) zbgl%nm&nk3I7%3TJZXO~TsK!WHXDILm|aN)IO4`Sg@M<&VtqhV+_ynVV2+2ko><8d zKa?g-q5&qT{-=Nu%T#tU2bK#K3X>4R+yb#l)FL=^x{lp%LHN$!rEUHF`iiR1I#|8G z=>eW0puQ9W?5^0qkID!gbz|r&PFD&ld3ERZbBE!pz6%he|A11A!!|=D+A7jGHNY*C z#2J2mGA%lrLuy>d?*6NE@%NT4e8TkUNrV@=^)Z&Xsv{icf7FJ1keN~BU7#F7i=`2- zO!5MSV}82tO-=%BD&2`7di%ho}V?mrs^4R0K~`-Rt|FULPVF8ZlPF zH(7_DQwYtuqYhIRBEpcWf2q0Dx2@Op+$|@Tp*lt!GH|1w{Enz?J7kDfrs+8!oDpK8 zgV^&6z^lH34CoyPJ-0J5iYmw_(2JcObWL>O!SU8aX%q{>uTWy9AOnKK9zA0XH1mRm z3qx5IjL+6zr0+r6O{ViL#bAh**f*0tQ*2lG^r9<0{^75-KfSth?6jd=YToz8Gw4;% zdu6M(Cl@bTq*O5)l9&7nZMP>#6je#K1)(B zea>)BbZV+j1kRchWeC*2+d1q$_X4YW6G1hRVjxt+)bW4mbTP0(!mbjMfpIto_O(_S z8yh90ND$<99@D0~EV@ zzDevV*yF$mc-qC5L~RlIgpOD?GAg7zccvQ5fv}J!Sukjpi;)V_Ky*7DTmEEz{#xCm zL&)nXB64cndyB=ar>k+3HM7c~WRr)LnD5_jMttT0_WR{yJDTRE;k^B3I zTL3Z4t#AzOV%VNN9(JL%4dOP`{o>Fj4Wy+TnC{BAK%{_x-M)?IUms+by=a2w8dNu8SM)I@~$A{TwI}v1G4>1qr|pqQ-3{ z%iqrY2BqF))c3HzqtgrJTpOE}yx@Ici%rxONN}_RzfTCDT5ij!b8cak4SrWvIf9C! z)A&W^3h{On=|uD99p=pWV3J5t*y(%!Tyh>f0`|m|H6sP@LAAyHdUW_8F3(Bz8S8}(R%*RNboc23Kl}u;m z7g@jb99%8%>7}MB2dwd{ozR!$1^bCA#ugTz(sNOLIRRa?pEDH+M|yE$M(mz0+L`_X zeFMnKHX00FC!c;_aWSJCS`S?`2YQ{2D|22*%X#!`QuXSJ-~i6<7yt|(5`HK2>W3B< zGy4-4rN9^BeGT2KmL?~Q^1d5}XF7*7GH;O#W1)Rm{olvzMlCI}0|tMDbNZ>p+&(b8 zVeW4k`XmR)NMo}{QxPleeD9z5(>pIQ zTwcDIoJkB6S)z2DG~b6!>!fTZ>LP*?GcJ_|S62}wM%ji@Yz?F0J9y;C@fW3Z3jZ$F zuZD>2z*a@KChyD<%?s7M7i|eG5IT}aVlm?uw%qmN`nq2r-z4=dEyntb4(0g6QmT>G z6>1;*a|9tT`fVA6M~1kUB{eO}DTWuH5EPAZ6nFHFeVQY}E|?D=Be< z;(^P&e3syPpD7n#rCJ0USxLqj6#8=!~Ez*A5-4!+n8 z^Y6nkDe^p)geO$2`(Oi(>eT!5$=zuy`q2ouR5MbSe5Cs9YJ5` zvD$~S_d9R9eAu)`N^q-}_OTAGuH-F2zS^TKqfIrRH(!4%-O?xweJJO6T4ADBqB^1+ zyOrMI=K2S%=7JLld?9)YsuY%b@>(EP>7sC!+eaBw2Nir%cl{d=4u}A5JNjz_P0eY` z{{=!3a|OABX494WClV6a0O8KjxT%t+A&^S1U>yjgTy0$ud}vL89~6I_5La0)2~ za%g-h3zAR#L4})>;*UYxL@1b>(G_@JK^3Y$#H;O)#oW5(>^qmEX5xrO=NSm)G%a+p z-cadc54zZUye+jGz^l}5N{o&NNJ}5Ly%;Kfv)a-aK3nZU@e zEOpjPBF80rcbWa%o>eJC0v!g@qtC|iAovHwREBx&t)>QDCk`I=8jYb!#$n71g2xu2 z8YLg>Vg1i0JpZ@6moF5ZOp14?In5iVcqDMl4W*h!nJ_x-V76kkMi2p#0Jv_>;LK=4 z7OTgE2}%_U@pzFr)e`%{F%8C7_vcs64N{55S$To;jL3E)L|nOhKn+zkZ$s?&FwwEr zXgR(^AiHm0p7j>?6zM95fPhl-1Y4D={gH}HsFa~O zO{Ca$@LXKbv8y>zSuKTv&X$HTUh)zVT;?K`G$W#hATw*F|EH!ypuFjJ0pV=?UO&Kceb)RX;e5Ys5AswLbTbj zkNuFvh_n$_uNAx$<3M6m{P$KEZEphhz|e}1t}T69pu2P4UsY?F4h-kI!|vb@6z|o- zw?Ye`(lDO#$vd`oN;pM%7H6mQ?AdB$dN)-ucnf9;zbJzUa>3_-IkY6%VNhoaJJ}uFT zdVliEAMa8n7a7+|X>P?ih`K-80Oac<=suJxz@EI6bJ$eoaGVTvxKOK>S=Jo~R#aRO zxqoaK;_~9dLL>G|%wBK%uxqvTYf0gE<2zjP|DmbBv((YuWX?<+0ss|%u2bg-dfH0= zVFzoZ>xKg7u^l%3(R|7GTM6E_sa30wf=(ctKyWdDih5u{1&k4F(mB#_)GPyu~g**iFJHh354 zS|q_P1WJ^T)FoZVjjT3?bD8Ai0ZYsLfR4-~2|NrK20X0h{8spDgb3pIDXKVTz^>M5 zt-nJXsXA$`U65D<-^gr{h~|I&^@QbQP#wv8nBuPhQ^ zB4PHW+r;5@#Q3|^l?>q! zRVhlubO!Xrc>*!K7}P;7=tQ>kJNywrJ~kq`CFG+#c_^#|$+9Vs5d{JIZ(AQpfV9y-;LJuxo&_Q=fI%G1`gvdg9(IV5CSmtzFmt+f zp$@5>F#?Vtgyg-bd}0!5Icv1f;(23M=$G34ATZlvTT`5@ul)RoR!M|fk*l^$1nV@u za}JY9FjLL!H$QHRZYeokU8GcryD%{U-im@vE%QGW^vSQx9SiR;@#V|1mOAfwoN8cUh zeh*Qes_|mAwhl!k6A@cEnM3KthEK=l4PCZlVFCE{l+!Efm9j95RP3zZrViIXy^(1d z8E(f-eOArPq*ZKNIANWiwoa|p`5$otyhb}C6Vj6Ckw8busg1P85SOS$jFA!J40<@R zX?MQ}^8fh%V9{S`ttosEv<{neC@tGU@+?q@E&#fWGT3;q!GqLKO&cvjzT<<4UkHD( zQsAtNc8t}sKpH-^9GfMH z89FIta zVTvU>`bpD_uV6_sCkf7ttE#L^^1KiAX_$K$BcUK(7eT_wMfNyX%0w#X0XaQ;q)(6C zG7o;JcJ}PqF&|p+R9IdiN-;~cBw6{LYI~AH2$f}HAAPzgEl4Z#pO zyVyISPD);_rDDmL`@RLKJD`m8p`QxT9uN9v5VyY>eor<*WyNE4BBWaXaHwIl$Uejc zMttX>@qhLaIk8;32#_#XPsqC$P~5TXiCNHfk9AgiNd;j(SIkt1;RMR&G}YZy#kr-n zX{dbhqI6|`zpVEiMoXmy>pM&je-!kTr4%z@h9}dZeXTy;U!3Dqujpsg38&$0=&H9; z+}682_7EFbOl%t7{fHsM{7kl)CX#4kBD#%_RB#jqtwaOG$XWYt?-6q9Y5+!=Kre30 z^gy&FqEk;O67Y#lpPhS2Y^x?_+s#)PQ+q%`qsE+mwCeA-~=4Lr1-E2_wMUmzcmoMjfYTd{)jHP5#Ncp<|tcci=@u0B>&Za7n@W+oj zBteDJTA)RZBoJnoM!(%b7~d2g-YVSnA{Jo*#R*50D(UqXFSbs5!#O`H@vw<1Mi9s| z_;D->JFBKUQB>Yi6Z2@b9U%{p*;m&Y1)fkq@Ho;M=cKEB+`=5p7z%NwC_}?HZZq%= zsD|tknq=|t&^PMjBOxm&E(De&bMS_y7i-41?$&(KamKO)v>v*nXH4Fh`F5NFw^jWD z4-kRe66akd70yf;vAQ#zFm1|EwqJnAqgU^Xj2!F`TI+*Z<_@S=VdiD7-Yf(8>`=)A z)WtS7KYv@S8%V7wBQ?dP-=b1yAmrY!uCAEoKm#VOAFQeoo~xcRD2}g0c4;O9XEeBx zER6cg@~pYQ5BMuckmjh_7*2ZhZddql=a8n{wp!vVSgMSz=VUm8W!y6k|o= z{S5L0I@kq_Gp~PIw9&NgRl*|CfXi4JV?6d$R|MxSGamLQb&r5+JpQ!Iv~dfUp#KD7=Qn0rS8ql|dgaoSB0anrBIQJ@Lj%dnHvlLG!< zT+kttapiiEzG{_i+OfNy`!2@0f>HZeJ-8F83bA>{0o1@hV?X_=Ou(mBWUsMl<672SNnK;+1<#Y03Z?AioS?A*PKQXz6g`ea(5F zCj#)9BTFHDomlZQ={7&z1&c0l38{aX*va>N7Pj;jvaHaxVI|!@-y30!xHQB`y-jma zO=cx(q?PI)r=lq4V2gX~!K8l;=cAe{rJHWY;}PvVoMrZz_gQ$T#+Ce9NWjf1sd?`5 z<@I%YB5gn~aE}ox3{c$42_zVaA|ij~>=vb%&+!fV!>GXzB~>tI^WUXI)V<0L~nR(z#kXl6eyy z5CouCDRg(&*v8d;QMrk(9&Ar{FbWjhP}@B=8QL7shA-PW4Y`le??}`8`ks9l4%()yB2={77KW=wDCL8bML>G zR2L{-pMi_z*dw4)af<@Hak*KHS#vhi$6|&iQ;3vtg7s4VsG$Wog(S!62?zCo* zMl#Dx^^N|``0^uDYJHS|@82hOYJ|yN%)i0SyX_08wqa!dBymgUbU)doNq zmu^uAnB5-#fZ_)|f<`I6|4n+Mn2TIhc4~NU?W(#9sHtf1Ganv}ZC2D9L4p9VaxyxY zBytkbeB|~HkG=>Gh1z2`!fZPcUeJ!q!3b1=|EBM#=wy?qtbRcSgw{e7hggfO=dr7g z(!NU@4ywmM^=|}spXsk{rQg$BOgM8n0efF^_VEW+)Z_w~a^~T?t+|xLH%j&SH?=Q_ zZw9b%5_>@WbMiChB#CDvwCE?LTo8HHaA3`|s1m~q!j$M-1t4!PKW z5OzqMzJ(AZ0TF4pTz)!p7R;wwf(hkY!`!w;MuGu4`t zbm%{?(9CKlW*BwtQGMEhkrXSv+hct|d(F-aOKGW}{8cD&PVTR}pcG>a%+J_M0@#Nd zWFcf{aVC3aH7Yiz>=!pSqF3yH-0n|=Wb6obbHWU==r~v%?tW(IRa%YGr`x6rOgpN> zrkNfz=qp#R7JHG$^hvGF_xN{;Z2OV!hVw-P^Os`Gq}wKjA;=vz8j)Bs>LACHPzi-7 z%ZPHFb}RKeUVT%@Av!WC<~CfI7~5$F`VgdR1Ms1Vugi7Tc>v2kTdj@ek?4t(oJkq|_f52+W-6(0 z1-hqz5Vt-cUYuHbRtRkrDh&!b`YWY#;OqCuGoA4>SBf?cWYmxIjYaRxUnfx17_%(M z8=|Rxa|ZPnI=1)Es(Ox}yM>_=v4Yk_Li9x}XyxypYk%~GvVwTWsYjTI>@$J)znPFD z#b;?gyDd38@Bu(DMBr+j5Q`GCTjDsaF&b*SS{NHL= zY+5VD4AI^Ap7^tCFe3;&vrs2xgyKn%XpA892)<8pcH-z)4MLfJ9i0Br%Z|S@jyL&y zdatmI{|P6sr3?lCjl7sKcw>0|53gURJ{;pY>tgN451&pRTjC_1d*JDKVIIYGP8{V2uB7UuIr>y7A@mZ9CC= zD>L7mHIQIU>KZfur8(6zw($5GvAm-G{z5A_SYQp~g}d66e_U{BT>R(Lv~9@mpU(84 zm(@h_OG1S?zuJJ^p~T7hKg}p^aH`*HuCa`>wglm(IDO1eo&Z(ML1S+VFM&dd1Md$e z>Z53|?H!7LdACY0M)75goWW}o89h7%uq9^_%if0R>c6xEZ!ay)(>@A0Nc@~;v5%hS zoj89P!=gm!JolUeR*Q{B{5~eC^bev)&Rr7ahi(vxy+^M1A_)L9hoPqjXp2H6%DL-~ zBDQ4A4e{eXl0Mqnk(Fot^wUp>2DdanDB$67SK15W!eLYWwfE`x6!;1bezc&s(0u9# zfBz_~-nX5ZJ=Vpg4}I7BYpy`g1^TtDM!S=&jyt5m1JQROmw8M?l)iiY*UaB%+VyNAQ0q1bx=LdAZ_hbWuQL1qn`HZ5G8I;Cy?fa(=? z6VHC5o@Ive2!WAq-VzzZu&&z@E}O&gNa_{RIOM|P$jB*X+Z`5R2wF&ZB$F-|lMxxH z!s7#OMc%6zNiZfbxxXGSxKEA{#0?o3rvko+<{L`8$c=S(f0g&S2K6>(pj(!HF{E8N ztt^{d=VmzF*H;V5uHCKZ)}-e9JXW@7DBhtT?9s(Up$K1U?Z2p-;o{#^^p$dYYrzY% z7j3AwY12ICoH!1Qa$?TpMQ$lJs;QHVbQ+5dj%e2<|v$Zrw__X3Fu@~_4Ow>wrK1J^c>p?k-r=H`}ql}Mu%Q*rwM>TdnGVn+aeH(X;KbdC%EUC=E66{0%WC{5K#-^()dRaIri8`D#mNK{ zPSjLY0-8!eMhVDAhR+yWy_ZZ8Pz+~VBv~LurD@ulpzClV;-(lU&o3!b=cd`sDk?5^ z9yKZk_^Nvs14VLMGM$VE5}GjrRjQ!dNJQ7gmq7YjmKxRA%oz^Jjcd0*IcQBYBb6L` zt1+^Dx11;!?<43KC^7?BvRT)z(1G!NGB`e@m1^dk-k;YM?Yrvb>pOd78^x4$B9-$v zTT{Sr8Q|tS_40{j%c=q$(_;vMC>l%#4(G1$Yy50|{i6b{v+zXkScW0A6YvLiPWRA& zN1ow4%po|4%#t^%^aAPc+-%MFYZ#Z}>g?!vQTKg)+xy$0oCAa;XBU^9Z%%I3 z%tl_P!c{oW;$`!ng0Cb3XQeZ7q~>%|vGD{96^)6vkB?`Hs|*r}hbPmQlyI_|uojB* zIHhX-zFQPC0zy^GS;jKx;qD$TO$NGy!<&6NHJN?Gu3KRc{@0(f3?v|2i^9i9j=QMT$F6-50)87<%Z#8JCT*psqF&1nj*02%fq)R{)--gRd}jZ_TolZV_IYkv~ysveRuGR(Zr+n`hy9yM(RLg_WezT%P1e zu-+E5OP3?BH5E7;Qsv{Q4;-WYVDWkzT{UdIPK(sQKc79z3I@(ibTN@(k*|mY_~l0KABgV4@P)jL{Ha6!an;Z zAG)QzdGmQu6NO^-NcL*Nh&buN=U2Lmq0MC+ZYCZ2w7pm2xXxRe*DIGxNj!0McfT_) zAto{L@bSdx=qX*FzItUkefmGEG_@4LS1z_PnoF4sB(79ZLuCU-U3{Y}(Sl8|5Fah2pukJe1pL$d&IXv9hpHsCQ4_yZ34AdFYnRXan+L zC{@f6iiq4@rcO-=;G_ed9L=T>YGPr&df=Y8)~5#WabcmMDlu3XQ2w_1n8?Wm$VYdo z$8g-eRHG`^%;mHAb2sPe^ABy^+Dy*fp*6NA;m6G)j%{6t-)ua%q@0(v@qzn#Yn#{b zXM;63KN`*`=in`+=p~%RrKh_NTHE|OHK)jnyXHk=pJ@9UDb$^&zgLR56B{3|6)2c( zdDsG@7+Z65^Y=-%Ha71l$cn{;P%MrE|GIgmdt{ZCD5(j4vW2-UUZ2Io+q%qrZC6d0 z2Hj~jXn4e_fjQ|!5Wm^X(VIZe=DblvxTC_jagQOg`IUNMnk4QwGXAPoi`JtA8Z|mG zaVYr_?o&%@71^nj*rZFr8!01V#0#|Nsr@x(oWC1{^h@9aYMY)By2TcJgA<_(1)68( z{R#RxA!n}!eLaMcj_J0`PmAU%u1yn%DRKiMof{%Y=P;YW_`=$wLrhG4__N4Up^xeI zzMj$ri2L6R7(@}Vq*ySi)tnj0K^mOv>deweTw7J~(gF3P(hKya+3yqj$IX(tAn~d~ z0-m2a{|GT%sp1f7!!0j{Go2)&F(6dI}SbqqAo;* zhZH@V19utybT_P}7<~mbsSsvl5@cNgxrPM@-*js6Mjl99HLhmouUl9|R#~@V#kJ#K zOUwop=)^_AL**DS-7qFXyJFsn$G+bbgF82=Da;|@s0@_RkcH2fM*ogRSgC@C_xgQ| zqhsqrty# z{=!5lB1-EXJ_XqXk8gJ2GFA|lugoR$mU$I8VXh7LUTV`cB%7&MFH z^phNml$4a%goKo!OT8%~p-$!KuGwcAxCdXk!}Ks3_7FRlP98@HEZ^c(=x32QD6T% zOy8oEAb^B{s7Q6qU80SSWnX8<+*ynHAqw&9L}}mNc{EDfw~%yb_{G{r6N#e8321tQ za(??>bSvV2p%JP>!mV^)*u=yjYIuFUysq9iF~wD^U*=b8!3d=Y{;*1BXIVT-r{dyi zdqK{_pF5Hp#G(r!%E#5u?*5<%c5MFdy9*p29CPg07;0=8VsTr2+xPIKu~*Tbio{(G z-4WDh?FwBZ9GoTNz9b}#s04H-7S1m2Ig=`xs)!uaX4iQ%V!0|KZqNm&7()J-P`OXW zeyCaTWtPI4q9^WAL}i*2BD1M(o?RNWylwt-pFMt@JaU%I(SU(tBlgp`db(_68AVTP zs}UNF>E)(to_+K9B(wmPn@apFO(*qKOvw}ZF4rrRbO7~-Zp3}^S=_Ruc-d`l=FybD)3R_`_O{Q8~hoea6yYAD|M&z}#?s?=wY zqoboJedPVlskJ=R2 zClRp^L;e3g3Gsr6K8i^1U@%#NJEwGC+thuVb&J)vk}DrA>{ejM$Y7-7j@G(m7h*>|-xfbDBu}R@s7M;>pgfY8qNPF`qkP;J{_X ztQ4G(<+Es{K?{jx;)=;OY=|%$o-Ez6^XN;hdYts|@OXmonzYLbyA-&P1MQ#(y{AmM z1~=o|+u#c!F#x*Vh~q3Td_7>Qi^`TElt(HDrkAnKFpA3wxclKI8CRS+RSsOC8L#Jg zCt57Q>curs<_;K|kTat`&!tk6{vV;3s~pEH#sX*04s6_JnTf^3wyA(Ofp1ogm9u(U zE_nSX0}c-y?_Ij%F4A=AGbK?oIsiKD{r=ZW*agVcrM}$@ww3vPLZ_^LXjoBe z%UMD|12gImX#8k>eWp3)J#D%WE!O+DC%5(FJj2-7*lb+ZII}rFUcso}F8P5$1N2dY zQdTMnVd})e}MyH@$j{};p}O(O-`C+yy)!if6KFDqCw57!b@`I%nn&4 z#AjKhz>!*lW%GtQtRUF`B&RM?YNb&2zk>Q&#S6`d2+r8@VxU0V8GH2TXr1JN8Xu4UzsDPWyd{;c^|G2Xa*pV}>4YbQ z)VsK`Wi||Gq{z&*bY?^n&@p>}Giq}F#R$%mPLfyULWL`bstVxosB)j>^(p1ips^rE zaWKJgX%q30aXGiBL0WwIz;s01A5CgfATFU*cu>?%ajmfFnt`8prcwG%4g9xic#R6X z#9-4Xic>khHqPUO3N$io(%>dMgPuel6i@d23Rd^^xlP9J$cg9=6p6J)T>tOAm$ ziRfD0H#hNHOZImjvBT-!o2wa?HepWlx(rnh`ue1(Nym2_m6B8MT3;fWnTg2_w9HH} zU`HmNGX~RA`%05|CI1aEp?|rbO8!#T#}s}Z}k@LQ@ zSwihjT`z2U$4YBbI^N~nbB(WNX!5E0%~S2^MmdyM&ak2s5kUZ<%K!8c-K|BOqWR4A zNy^+y1=97I*gXM30K}jcAI@H2JIgGfEDc)FnGO@cLc0>;bs-Src>D5!{MX-Tw~K^R zMjyvclL-(+x|H6`uh?<9a`|%N-@${OrN%&th$lKf<_V`)OyV5)2cY|TxOAj&WR}#@!j}kH%h3Sz zV=@UZU5Ugptp z4uaY9M!=zb>}5s6c4dngds&LH%uTYF zLD|Vt8ZC&HF@_mSaa%&A5DlpYH4!OeXiOrll9UN;NZJgg-|M{Y%=dYIzvK7c^T%@^ z-{X5Ux$mp%zCNGNxxCN!`95*`l2rTpzIc5{Nj$Oz07;N6wS}6t3BfN|1tvp?06h#< zQQh-=tHW&_M@;p($#w46%>Ms$Tk3m7evNXhmhavqvt++c;v8jFF42?o96dbzhi%7jO#nWq5w<; zBFps6#Cc->``<#)Ku-e>!>b~!j$-S>e2Cy4u?+-+VAuea{E9FUwDXfv926t z-axTm!2{$_@Nkw5wrhL+J6#9Ih;M5ZeZ}2iiH%W8$8G`ahzGD*e_Ma$7K=kPg1~Bd z{`3zeann_nN3zK;hCi}T+tKQzLx9d4$2Q<#QZ6}(hVdn1c=CQn@29FpKpv-kE1mJ= zN7JC2wSPR^fp3oC4fU*}7)CH&l0NCGmDxd8uU2&r96M>8uGnNca+alK5QVlroD?#c zvuDoCARSWMCIQ|F3PL>_DdsZc8`U4nIMasXbszNDFSH%9r~Rxh+}`_zzEb}~&YMKf z);XHO1tbj#--({2@_}7WXn($rlAqXHH>cO0Va**S2?4JPIg~>_l_<6>D#}=+y>ByH zsIt)MCDp>|Xafl>Qq$9=J=Q#-sS+wu)f!?hAutoxF(K^&F4WMXpQCahM;?k2-V*?6`DLE4JK)_r-VYXsV+ipJ%d;UCXU}IxR1m*Sc`_oo?AitAj zS^78t{P)%vyH#CcsOOD~bxb!mYu0R$N!zNt135Hsr2>L5jl|{riQ|5`W*Q@s{JaoB3+X2PT0UL!`)32HUThApShU2^?#0L3dkts8vG82xMI%?wD)2LPJ*;NuQ6!(u{!8&xz4 z-Q&TYNTH2qi%Mzz&s~QEW!>3AH|48WNqKpMiBdX_Emwes!~s13h6TIla<*63?gq!M zoCVugt%#ozzU!$=WfUbKc}Xwb&oAfBnR5`h22(3V8i=OBP6O% zwx);8J%1u2oopzG70Br~;@@tp#8u~OhnThf2{sqy-FwCZ=mW&X9MnTa=+}(HAZ%Y> zsDsm*uB)!)=z-%(Pz$V52G$sl>h$(6?WSW)?_Ih@el+DZiLbFiUku16amUr0&e6l7 zN@u>9Ke8f%2Tcvt7Lk?)C*u*FsI@^qlbZBIxyYotV49Hi39QO!GAYDIO(#1EhLWCG z@bOOSQhj&H`M#lJrfno6DyA@gB3%?PrE^270wp|+As?THbj6J>`J<(<^@--BfxDWS zXEOdX_#{{D^YN-n;2cE(^M)gMDeP&GNtwzbGn+_;EaSM{uN^ixsb5U7|BEhdVKz$K zL@FnYbl{2Sg|aVf{@w6+hx-!43H5=aMl#^wM6ua;h_mjYCN`0l9@#opkP7i0&jslq5M16-+S35beWsN&}eyv*X*xMXsfvmxu zfy*SHhdkzsXIkq&YvIB$;l5&HfoBft>4YpTQNWvnTM7QVo|1U{<8u*6S_*j-TOYh| z0qrr2E}NM}?Fy$PB;#^Y=b=8Reg%+Jh%jFif-O!N%e{@hf`ZejWoi5M!X^wsJr$RnSw|N?JnuAWH-iD^Vcq5fO!<^5A_H8kq5?wP(gxkDaOp zny~eRu^KtzEchCu8z<8Ugm`N^D>BoQI^6%5l=Ez--tajy5 z@B8>A^&pQN5{-l+$+6&n=xJ!wmR6R+!a^y*1AUx4ITTQ|O=UY6L!!?G38u}YvmDYx zyI1PSBwd0mNvi5$9?JWo!3jpLcUDJ`s#SIGSzie#t+np48Za7t4RB|OXL?%N!-f8!U_(8>l77V$Vc!>_?KJptFCJLZnVDJ1$)*3-C0PNTOpIzz$RU@CNHy&I zXXv?7TFl3p^?!Ar#g#4`ZA!4^fVdr4*&jZhg#6t^9mh7i{d}Rx%F84*tY>jyqyA>l z4bgv_Z5*1MJd)%3? zlPzu%k~*Ql0Km@p#;XXXE`2ni7epPBI_EL$myeam(SRID<})lkFE7skFzf$uF};|) zlV-xC=r#>5>7kD!^u_z4wo8W5HV{)qQsOLl6}a^%GC)c~qRk(1YsFnlaG@%fQC~?|R z?ICEhIu}XCn=^Z2Zq3V+`RQT)c9))fA{+*%EMy0JapIubi{Q|@Vb61O9qv2SHQhV} zX(+4>aI7kNZ{q&JC~S|XXPRh&MP@gBQo^lvRc8da&8Lvp#H_JmnwO+M`L*!(rN0#c z%!@@p3@iIz|FS~|i5Uc8&<0XSSw(eAm|Ni>?jlz~v=S2P3LIXJM+4_cO8~<>s!*P@irs=Zw$g~k9t;39G_1&E z9qcbbgqK=48Pa{cN|;EcGGtQF>vPb-2&6omZ4rK0y>{&Z$JIAd{^>Ai*-yt@3j2UO zyxEGjMPjPs&x5a@JaGcW>0p|Lr09Z$tuzXep%XGRk}XBbCv)>Ce9waAB^ZccR5XS1 zMyrcXf_yfMM^uyGfbv@kuaOU%alN=`?|pIcmICHO`5rm*=`I;l3% z1RB^T&tm(wZTXCWJ*ZM`6K;v(3#l%e;g*QQu^l1X%IH$9)U9%_4(5o54`+v^`hO_v zC72GSMV0xq>~;$@TQ3=tI$*o@w5S^Aij~l&ocPmjeZtW6Z?vGoN3GVqbD`X08=869 zq1Vbwbucm4lapt`l1=}*>1ZjJ(to7(a|n%0uufuH_TG<9JnjK0@XGWxC@+L~GSOhN z7F)OMV#bT6n?+4CaPXM77gljRXMy8^2pXAl=515^L zUkS)M6A~ijcamr698w+p=@ky_WKy>vR+gtLb9pJ;ROA}hQDQS@Y>hllb}xE44+=LX zmlJqH&Ukol3B1I04u0T9#6WYbtfopDXDr7c%l^g}g0jjE=8Dn8o}r!%l$Vn!HjMe_ zX-i0O?}xg(yJxMB?=KUE^a?l}!lUcyld!^nKcjaRx^q^~uFJl5_lqWqX~RFh>ut+u zTk_QJdxRKOyCg3tNU1k8bKZt`4+@GtSOK8H`?~k8sT%oJ^hVg%Yjnk7P!{LwP2FHg z2^kAI@tliw7idL%ioP;`k(H)L$)~k+z*BOoxC~ zL=uWk(wQE^+77UO^uqTwdsS?=(1S%zl*)y0Jnh?XJ7`wSK*O$jcd>iA<7gTUGU{oL zy2Y+>F?}90vijYT$uhTF$mDqklC!h@bRXn{F?v)5AGA^jHpBqGoT(ykn_ zar%n=kdF}P&E4%2#_iTTvA;EJH+{NEXG$N=yGVcaiqfOP`qWZ>{%u&gs2pGk+Umo~ zHQk&}{6bViEno3geLwCm!Wep&zXc(>HCY|Wtl^SzY}hU=^kST%DkR0Fq%VQ0Z^d~& zFXO+ofl!yy6VfERQXA}&lZBgvL4AK1*N7GEtr29%_qA00I_41~h-NY291*(=Zo?^@ z%kYJTWCLW1#?=A%FX-D5PfA}V9NH_rjg(6hmo7aSIP1!_`3~=bc29H9N*i@dzkyFvzYfZHM~YQ4 z98cUR;U`~g4!GSlDuWj8BV%+nIG+{rmhL_nm3_%oi9Dr@3v$)EUAz3l!{PAdETX|i z#4G#zB&03r$IfObyd_==^WRG&gT2W=ct8YFLDW5X+=UVyL&>^X)3#3jprgaA@x1vv z4ML|={dl<);7`Q;i9dD5izIFNkl#k#F2j=T)5GmO$g8J#q&XdFQH8HQ1v4e-xi4*; z5trKVW!PpECah;-4`KBNgVFS*p;OdhtGgYpxr>AFcYAlndegJkymKmqaAEcfP1Yy7 zY9hWUx_2?(DYi8Q;za{uajKK)8#Y%}sidhHgoB}EhpBYp792?u`ofO8=0*rqs|3m8 zGsvQy+Hg}BC1ikoPig#B;Ns+mJT}LfBJG}^L;@hW&D>DRxh3!aMGER z(*%b>kZG)T$m*2dme-K5G2Ui4YikUsS=*~=ylL9#@{~ydKDWSJ;=)1bKQXZa>{_fN_ z4`FkVrd$;M1w!}l{ey^bc*(5ew$ezL&K6pg-3_;R_guV+KgHhsj?$h++HHcbL?TF- z3Ll6V>q5rYaWz(e1o#jthk&i8$pUvQ|HPx>k3|D*P*xT-N@3K2WzaggnwHOm&g(a2 zIi;_na3QTZB9w+>MR2w^9e-pCdttJDmQD26si6w@+>I~ja>Jx0$(jUp=NjyRLm)nc zg7mJV%@yOVuq60&xY0_8GSXldIIHR1r`-T&2l`7@cN;{?E7V*dgk6n5C6hy)b^_nuflAYrOu4B@yNsWU*dE7i2wUX*xbghYH?T;A5S%(cTP zN2vUnWkcS?oKoz^Nb>>+OK%1o={~1}XY*{4S1?%_?=V!C(`BV(gvh@sDbC%v7Zj@!9-v z6%2nUyl|K%my9QR3)cwTV-`ubGldgsDA2nh_s!n7Zy)&Sq3erm9BhdIrIlKfL@eEx z6<#?WikNZry<7N6vgemuU+pJuXE*O^{TU(X9`V581-FAfOo6g7C<>m-Wbb8X!e-8$ zYhoE^GY86ws<(OJLXz7{G83)L=$S6XO!QvvUAhLELua119*YyVy@7{wcSPntyc6Qt z+_?`LWUU0*k!AJmA?RQtB_5XsP3HA3Jym1xbilA6Zrb)KXV*LJZbLXLx=4_3!j0W% zC6e}pD7a=r=_uY|z!TEqP^o0*C49}NKnTnbu zO1FTV;`;=j5%_GwUId2<2WQecRHBJ^& zCF_VE(1yeDLh9(RuQiC5kH4jX^}nifPN* ztHaiOc&}~^;Pc&0erSA3WP*}2`f6`FDfk#Ci%`no=FOAt?i&NCJ0}?~wI`{WJwS7el zw?8=njv)1Tf)Qc7`F850cr^K0Eia8&2h`$1r`v#y(f4gqpr068cUAJc;?<#(32kz+ zmU6ySZo%?;(|Zo=Ojr~Kl8^uBNfv>nlOZQMZ4MbqFKuuHqB13neZIcr3OmI0U(ikl zBqIr_NkaTWdXL@*EruFahAa)k>1PlDP*D>RQbr0cUg%CeJil$s+aImBluQsX5gNY~ zj{Z~8>SaV>O3HlW=uBVh+;KKl476WQ0X%W43NFMOuCeeK-k3mbX!-&e&6l5GJ8gBy zuQCGBtDp7gI*&>%A@s{w7#VS6WZ%A%wXvcG+;dcnSVacVD7@rgB zSx+(N*>#h4a9gKGh;w~-c<)~E?n)!qb7#*Mc0UEw>7W+5Jfdl{JZEff7dfF(+tNrKO2e&C?!+k8g`|6<@tHxW@4%+0h$$pD@OhDeg zxRl&`vT?*oGMeh{|3UPK)-*@&RvGr|r_TSNiBavxnK@G{5E`dV%oq(*^sqr_1_>c+ z_(n^PQ2IOI%h-~lC2~9!Ux=99It*Oof&46^1JBVxFRUg4USQz;da`YaK}7TAG2%zU z=)*1Hgx|Tp%-Uq1QvRf-B)wNJBj=VE8rL6p`RU$fC$`_%HYGTqdlQ>af9c(M^lZm- z;TD;9%Z4uvYoFP^eelhl6PnMPKY#M6Hq*zNH=o{YYvb>GcH6eMo>s|7i1x{vEP|`|ZaWsdc1kr=nbXrd3JZVO|GIZMf`98o6VYB-w z`Ew{)Nfu^Z(IGhXZ1I-7k&@EDL4ITxDFW?U?fU!C?FfuI>yPm8xJ>Uwl+}TN_Z^$; z81pW_$Hk>eqwX9HX_3x+nFf(8D!Pj;0Trk4rh#u0W$QFr9%yX5Bjw4k)+?N4*OVEu z3p$|5rdI%Gpl6U+ZjX5N+?#q^ZEW^s<`H?@rH8tei6>h@4G1a&IqB_H@=rNQ57gSU z)i4^pXd=^SJAC;iV|KEi89|XRr-CzC;I2@cF-ray5~gBDgncJ}7bKD0>I5XV<>dqN z6ejH0NQWqj)^_suiYe6u=o;Fxz9tk3TY{?onkWmM>qD zF%Y1|wEo*=pu#nWcZ+OK|i zRA(=}+ePJWFC(}{3ZFi?ZCibgY&4CeGQK*dM_r|vV{O@~Q8S-}o*LY6VM;lsN^0@o z($7jZikDcEea-d5Zuw(B4;(4GX52I?Sa#R`S&^L$a(w5rdP*JkclUxVjbD|!EN?e4 z=vF_vs{|89%`6z~7sM$wn<7r54(jtYjg_zyZhvVt!IzhMyV44;SwEs9+$x)(I8nd3 ztkOizAPIyBNnaT<45ow0+v`oY1mEUxALaP+WJ@>QQpBy1ZU>9Y|)|&%wpkn z{&4-_>H;lj4$hQ@-tn@?%xCr^p4yL4Id>F>fcjt|Jy}{MPp7Vq&61V&_;Dx;x0>{v zN>WkEVQBUunJz^^pbqzX5;`^7q&mj@LvU=Qcd#<#?W=R3gg{Y+6w#BZ_lJO`zwOga zsqb~@P#YEykLc5>f#PAU^O9E1XakYk+_ChCyLD?d$e}>A&=EMdTP959s2@FglzSDq zO**sZ_V3@H$1Eg+W3Cn70*|NP@n!+@%3Xwu{U7rQ$xx1X@lkf12z86d8u(gZ>HF`I z>6Uk8fK;=Ou)f&K@sQe;{U8y4o?zJh}ny~+kLHdK2&Vr~DhYITy1@umT zQc*6Otb2S~(gT7IjrQ96tm4&lHd=M)LdBKc6%5!c9Z}fn)JKobK~Qo3qk%{8UD~qV zA6!4*IL3Gjm={|8d*{uYwmlMSne8Um$Uvpugzre)3MAv>;gBUjb7L zM=8lLP?je#NNmi={CI z?BtqJovsh0n8fvnmt}jaoni8`^!`dqm{tVZicsPG=dX@%{KYK^5g28KPxu;2^z67V zhs$pC9tf_f$+O&s-L$)x%m&6e?|NR}W6_2u-fmDuuGOuIAsETpFlzZ<2d3Ob+NXy& z6z{m)%fI*Gd}Fb=Oukk`!Uld0%S;ppuX(-L@MC&WDn9h7qcQV)PIvoy``d1Plm)J^ z6ZAB|*PV{x-`;eBJF#&6vCnd|>>@ihiEC|!&3V0eq)<&AHHc;jJWWYQ#wUPXiWqrV z$C!>`>A^77q~<5JF}(sZCxMo_%D%!W>uYRnZ95&?VC;7-H!Uq~%H7iJyi3%h?Q2io z1?fK$TgAB)*$w8%PbYF0X(T7-qb5=4uPO+0n$0yelCR6{`Kobv-VFvPta` zFA<+xg2aimK3^*(+LaDLoWJxqQIjy|zE_q6mpPSSARaL{@FeKbyK7_c6m)R7O6aIpuy0c5`%o}PZZUl>RCQ8G>IV(_4d)27{^+-d z;ee|DZX2`r#*^vvW0RKQ2?s>=gm*3M8R^%-R%3^y%|83>t>NUheQpdio7_|}Ev_Yn zcxbS{=PZi!?~$YwG5@O3qV5ux`1{BylUM!zs!Nd{O%v5PNq*$l735mH0=zR&f|SgS zb&$bdE8^-XIcL?K!$$@zjm#7Y3mzhouwtLOR6Ql~$^U-2-@dNnQA7NBf@T`lfd4_h z4UUMI!xqPJkzI=f1#ei^ZhOXe1dxgGv**k?6BQK|SL$o*RQPFN9i{6KOrA%e3_HP= zNR5tSnscr0qk*3bL;+K2maY|F z>Sokwz=DK-GGqUD$N41P9%)|3aVDy5zY&DYh2Q0hdfqDUxNrl{{01Q$0XriGjkX4{zh(X zEq$wVSQw`A?VgHxFQ4`Qq{x{pN69Xck?fQ*`T}|l;({M0p%KqR;C~swOtZI9Gx&3+D&2lfb(K>iQyQoX%CGZa{`B z<|=)G!=Bq`xTDeBQ9wgHx@#OP#-2UdUZw-TflWvC(h*9KxMeUPX*VaG@-+_<2Z#ug z%xDyeAV{ZlBPV!%LD^l!-gzv+X4JemJR*C<64GgH28NB5+M`zh0LP#Nzb2iGMSCml zX*h=>OT7m-m<#Vc$yCkEFb5usz!!uSBz;=Mcn>eG^rD2(J1DtNSo#`gLbgyB)-16e z4D1)!U8PYs;+4don)i@~Vb_wIZQ(Wy8Hw(W<>*>J<8lPf5%Z)|UjRaseY$vs>Skmi zV;2hG;{C+=$}^&~mMxb$OTbnCdulBPI6YFP6_EA1F2f1L))*n7EVORwsquD^@lWjK zXc4)<&Jw-@c@Q3jGK2JqigQ{D>m{IVdm1D8%{phwBI7<4TVY8a4k1f~b*;l9Z>%BZ zgqenD3%8)f5EdUjeaRQndVp79-wWaZm?_+N z>MNpS!KROSdWl_0?jdO~#vPUB%ZCOm43$y!&6_{6(3zyrp0qNJOG8CJB`K*pxn*4E z&1Ucdic&6dSMZ5?lK|1%pMpG50Q^VV7cmW4_t=H`1TjR61-BC2i zMN$^74M?C0;=~pU04T#yv?63$oj=KBkP#-#GE#Ln?WM?QY1vCd4(RChT=W+8U_71+ zI-ENdqmLH-zOkYn+rhM}UeBJgwY7oBTJGmy*_WR@U#FQC zU&@S}v)EV?$On}@PXu$$VHWB8)$|s7l(UY25Yq0@k@SW}f0B7zY4j}=KyN*i-D{GB z2{Gz%fny=OtCKuHy)*OQ?_^u>ij86GbzLSc{{&((XZ-`>nMEYMb7#SXyv7|d~cSWH=*U4 zHNlX%)i5y_Dukpq!djwGz$5fcE||T1`6&qFLP>f3`nCBU0|SFu;DLN5^vWw^6G5Oi zfb@CPv?R}^V++%E1fQxXWAEBU%CttI$He9CbNrBZpJp=~<}HDycz1HA9w!MDY=ct* z>#z9;mzc68K?O1_f6H>Xk|q6%1SdH8lRkD)Kj+De_$^d7^p7dZt)a=v44bm#}`UFHT8YEA_*$YxP_bRYh(T{)Xe5Ky8))>y( zQmm;5v(&%Q0yc8o{hNgSj8ZXF1(|}QnMs8YqWoA`o2J9nQ-z1@3dVd;4_j+h5yc)E zUG@5gszxdYpXOB{JGXnJqrcWR2y``NCG-kFDHd)HS=>$p#o^T(%2>w~7_f#x$pz`V zv7%qrn%}yA2;~|uf2311ZznMq{TfYPWvd!MK~uSvw&M@$W(9dn--nJfXxd{Huy|m; zhX!6@$Ix-g1j!{LRp9>>*>>N5(Mo{WZ0SS7oyk%l%>(e<(5psU2JXf zF}=~!LapjOAdc22GBfK`33*Z)lNqX;(}@*uvv$%`{X~(rP?wc?A4N9`k?cuUjv6 zR=PHmUKHuf2akEtyBnGzk5x}A-)6>voJNfaPm@EZRsJ<7zsbdu+X5}#8(r+7`-)x1 z&)C{y+4zaQ@{eYw9qH5q?WNZSb6(ZBuZYvV!L-qmX|I(L3`TnFUgIi&gI#3iixIki zRGJOyeZ5+o6(I@Iwj%$H^*=>zwGeD!k3?+htE=9xop$=8ow|l7jebv=oqATf^r`DytCgCE1KZrNImu zw(fO(&&%mm0car55-G!naB|M~FB9C#sQUwYBC@%AW9@s@o6i~G{?9G~CWOs#g>Md) zQvdf-<&YaY4yk=(qdo_6+@{Z)*H=T&@RA*Qy~uG?{1;;xyI$uN71Lji*RTA6s97@; zounOkM47>~qHC}=PXFaiPp%(!qx+>#L6ZKZ#qHp}%p`Fq5qhe-m|>eComTF0Vg@HpjjuVH%bT%u#CiL~f-9 zdBL>SL8pr^aCCcFi45Ce7z5eK560db_U=0 z^(gjx>%WRDHm~t}IKSwQd5a6auS$cDD`n=ko-Ho+*fxaxqByrsz?ar;|16hQM86N=ZS^m8 z5#bH(0q^vZbp``o>}Z2>yHA4iglld|;>J7uwrw|TOW0Js2sf(@q`>6we)vJnTw;B_ zRm@@-^m&hfjM|Og^}zJ4@5e%V5RMy&e)Wr$CRg$wL2;B?%M`7M<+e=vcmVPjjcjG| z4?UD2Kw7f~M3-p=ri70UJ~!LIGITaIBSv2b0Jb0Q)Ux(ZZ0&&DnHe=>$)iYR!byEX ztgcvSTY}p0#3^nRj`|kQClkH6yy(r#EGB^5*)}VgHZ)OJh&r`iH@62z{y9R&$ zwM0tz@dOq@_v*Go8C8r%IN?lgGD5w^E`! z{m0e`cpv4`^b2>m6xOd7cC62-{dBWeJ=5WoH`Wjh@ZW<*eyWu9mHrd3UlBk0@yGMm zK2f56r;C~3^W)(F diff --git a/src/vendor/dtoa/src/lib.rs b/src/vendor/dtoa/src/lib.rs index 0970fe3316..a0f9454cd1 100644 --- a/src/vendor/dtoa/src/lib.rs +++ b/src/vendor/dtoa/src/lib.rs @@ -6,6 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![doc(html_root_url = "https://docs.rs/dtoa/0.4.2")] + #[macro_use] mod diyfp; #[macro_use] mod dtoa; diff --git a/src/vendor/error-chain-0.10.0/.cargo-checksum.json b/src/vendor/error-chain-0.10.0/.cargo-checksum.json deleted file mode 100644 index e1498dadba..0000000000 --- a/src/vendor/error-chain-0.10.0/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"ed8bb3864902ddf6322e6b1d6358bcaec9b51382a5216b9526ad0987ae430b0d",".travis.yml":"d56246d6c8796c638b5012c2d7a91d9b6ec101b6a47128e2d4bfa957c1c784e8","CHANGELOG.md":"4f602de0b17e0d0121371482dfcf3caf2265b70bf92e8b5db1cba5dd8f391469","Cargo.toml":"8e4d1f0b25be862107a6938190c9817cd7ea516db50e688de1d0fe87519105ee","README.md":"6771ca940645b2f7e7a018c8cd25b25f8bf35786e229b54fa2fded1f2d0ae411","examples/all.rs":"6f073ea0e3db541a4eefb41436fc03a121a1f932fd6a2798b485a72d64bd1a3c","examples/doc.rs":"574948eb776c3d363f5cff9a48015bab6c17828c7306dc3eb8818afa90a31a83","examples/quickstart.rs":"c3142d5139d89c3861b119507a372fba47ac3d7df61aa90b068d518dea8fd6f6","examples/size.rs":"7922acd891dfd06f1d36308a3ccdf03def2646b2f39bfd1b15cf2896247bad8f","src/error_chain.rs":"d0cb3e4a93f9c358e4bd18ab8443573e57ace15442f4697ad95963d10408f882","src/example_generated.rs":"7d5220020aada7def70d3b3e396dadb0b139ed104b1253d06ac53f48517ec668","src/lib.rs":"0d1c972252dd1df3117ddf0a71a4734cdb350b41376e09cbe4b868afb0e2762b","src/quick_error.rs":"1889b9ca1f7a5e9124275fd5da81e709d0d6bd3b06915bf320c23d4c4f083301","src/quick_main.rs":"106a0cf44a6a2fbb9fb1d8932d234f43cd7af230fc6685b28f6b9dfaca2a3210","tests/quick_main.rs":"1d6a726856b954d4cffddab00602583921972ceeeb2bf7ba9ebbac6a51584b53","tests/tests.rs":"67b6acf87f4986fa013f018195e3becd6dd63d8101a7af07a417e8e526cf50ad"},"package":"d9435d864e017c3c6afeac1654189b06cdb491cf2ff73dbf0d73b0f292f42ff8"} \ No newline at end of file diff --git a/src/vendor/error-chain-0.10.0/CHANGELOG.md b/src/vendor/error-chain-0.10.0/CHANGELOG.md deleted file mode 100644 index 0a533e7aca..0000000000 --- a/src/vendor/error-chain-0.10.0/CHANGELOG.md +++ /dev/null @@ -1,100 +0,0 @@ -# Unreleased - -# 0.10.0 - -- [Add a new constructor for `Error`: `with_chain`.](https://github.com/brson/error-chain/pull/126) -- [Add the `ensure!` macro.](https://github.com/brson/error-chain/pull/135) - -# 0.9.0 - -- Revert [Add a `Sync` bound to errors](https://github.com/brson/error-chain/pull/110) - -# 0.8.1 - -- Add crates.io categorie. - -# 0.8.0 - -- [Add a `Sync` bound to errors](https://github.com/brson/error-chain/pull/110) -- [Add `ChainedError::display` to format error chains](https://github.com/brson/error-chain/pull/113) - -# 0.7.2 - -- Add `quick_main!` (#88). -- `allow(unused)` for the `Result` wrapper. -- Minimum rust version supported is now 1.10 on some conditions (#103). - -# 0.7.1 - -- [Add the `bail!` macro](https://github.com/brson/error-chain/pull/76) - -# 0.7.0 - -- [Rollback several design changes to fix regressions](https://github.com/brson/error-chain/pull/75) -- New `Variant(Error) #[attrs]` for `links` and `foreign_links`. -- Hide implementation details from the doc. -- Always generate `Error::backtrace`. - -# 0.6.2 - -- Allow dead code. - -# 0.6.1 - -- Fix wrong trait constraint in ResultExt implementation (#66). - -# 0.6.0 - -- Conditional compilation for error variants. -- Backtrace generation is now a feature. -- More standard trait implementations for extra convenience. -- Remove ChainErr. -- Remove need to specify `ErrorKind` in `links {}`. -- Add ResultExt trait. -- Error.1 is a struct instead of a tuple. -- Error is now a struct. -- The declarations order is more flexible. -- Way better error reporting when there is a syntax error in the macro call. -- `Result` generation can be disabled. -- At most one declaration of each type can be present. - -# 0.5.0 - -- [Only generate backtraces with RUST_BACKTRACE set](https://github.com/brson/error-chain/pull/27) -- [Fixup matching, disallow repeating "types" section](https://github.com/brson/error-chain/pull/26) -- [Fix tests on stable/beta](https://github.com/brson/error-chain/pull/28) -- [Only deploy docs when tagged](https://github.com/brson/error-chain/pull/30) - -Contributors: benaryorg, Brian Anderson, Georg Brandl - -# 0.4.2 - -- [Fix the resolution of the ErrorKind description method](https://github.com/brson/error-chain/pull/24) - -Contributors: Brian Anderson - -# 0.4.1 (yanked) - -- [Fix a problem with resolving methods of the standard Error type](https://github.com/brson/error-chain/pull/22) - -Contributors: Brian Anderson - -# 0.4.0 (yanked) - -- [Remove the foreign link description and forward to the foreign error](https://github.com/brson/error-chain/pull/19) -- [Allow missing sections](https://github.com/brson/error-chain/pull/17) - -Contributors: Brian Anderson, Taylor Cramer - -# 0.3.0 - -- [Forward Display implementation for foreign errors](https://github.com/brson/error-chain/pull/13) - -Contributors: Brian Anderson, Taylor Cramer - -# 0.2.2 - -- [Don't require `types` section in macro invocation](https://github.com/brson/error-chain/pull/8) -- [Add "quick start" to README](https://github.com/brson/error-chain/pull/9) - -Contributors: Brian Anderson, Jake Shadle, Nate Mara diff --git a/src/vendor/error-chain/.cargo-checksum.json b/src/vendor/error-chain/.cargo-checksum.json new file mode 100644 index 0000000000..4416baf4ee --- /dev/null +++ b/src/vendor/error-chain/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"ed8bb3864902ddf6322e6b1d6358bcaec9b51382a5216b9526ad0987ae430b0d",".travis.yml":"6dcf8fe7a4ef9f228cabc1a9179b3084c8be6dd09b9b838dbf4019d113e1bbcf","CHANGELOG.md":"010cba4f5cdf9607eb744e83a666a3642d6c1aae5d467c67978e97a7f7e86bd8","Cargo.toml":"d8d10d260671883629d2b7e6c5daca5eb2b49b8b1c88345e530462ea1a2e6913","Cargo.toml.orig":"a0c128e6c71607dc81b4bb0ee8336a98a10fd1f86fc2d018b172a4aaee8a7744","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c43864d39cedab9a1b2aa3d5e480cb58f74cac0b07756670a30c683ce34976a1","README.md":"d63912fd7a5a1a2b1d21edde3cee58cbe422c29b9fcdcc7b8ba2264bf15825b5","examples/all.rs":"cf8422ea6fdb61dbe9ddbb2db51daba48768e3b1d81c5f9e454371c258954cfb","examples/chain_err.rs":"fbb6f90397d5e621e4982d224caf3d419a3c1aaa50f07e5c9c617a9352cfb81f","examples/doc.rs":"790ad6877c91e4e5b2d78829c58702100dcccf3eac2279940257691420cdff03","examples/quickstart.rs":"ca471b3c310d40e5f5dc07db2bfbfcedb71dfc1e25021a6383a9810b24e8fc40","examples/size.rs":"a67ba47b254fb899cb0ecf809e95f75649bb0e401feece9485a2064e223602ab","src/bin/has_backtrace.rs":"eedf028ff206938760a53e91d13534b6ad6780b2b6635f405b7896125484a869","src/error_chain.rs":"5fc674d965746f3ea1a6ea65f82352c40b83439004480bf5a338748a90e476cc","src/example_generated.rs":"95a1e191917740885286f199186674ed575d807077b57dffe6388a4fe2e1ba98","src/impl_error_chain_kind.rs":"538c6f7a2382d555f809c4d7f33e739dff7aa75b2fb3c1629ca2afaa38ff4279","src/lib.rs":"0adc37e316f45d57d56d76245c76942d2a894643c4d2da744639d33c3cd99099","src/quick_main.rs":"472f0b90b11d346cbceec5a95da78fabda0fb55e7e019dc62ac8ff0c206841ea","tests/quick_main.rs":"39a1113cc0d30e5b265e4139cda36f910f8c534a4409e99a9a506a0e88e58f19","tests/tests.rs":"2b76317571651999c294c639088ec7d764c27721d802ea6bc624cfdf31619623"},"package":"ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3"} \ No newline at end of file diff --git a/src/vendor/error-chain/.cargo-ok b/src/vendor/error-chain/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/error-chain-0.10.0/.gitignore b/src/vendor/error-chain/.gitignore similarity index 100% rename from src/vendor/error-chain-0.10.0/.gitignore rename to src/vendor/error-chain/.gitignore diff --git a/src/vendor/error-chain-0.10.0/.travis.yml b/src/vendor/error-chain/.travis.yml similarity index 97% rename from src/vendor/error-chain-0.10.0/.travis.yml rename to src/vendor/error-chain/.travis.yml index 31d9297034..6bbf37eb3e 100644 --- a/src/vendor/error-chain-0.10.0/.travis.yml +++ b/src/vendor/error-chain/.travis.yml @@ -5,7 +5,7 @@ rust: - nightly # Oldest supported version for all features. # Use of https://github.com/rust-lang/rfcs/pull/16 -- 1.13.0 +- 1.14.0 # Oldest supported version as dependency, with no features, tests, or examples. - 1.10.0 @@ -35,6 +35,7 @@ env: global: - secure: ncxJbvJM1vCZfcEftjsFKJMxxhKLgWKaR8Go9AMo0VB5fB2XVW/6NYO5bQEEYpOf1Nc/+2FbI2+Dkz0S/mJpUcNSfBgablCHgwU2sHse7KsoaqfHj2mf1E3exjzSHoP96hPGicC5zAjSXFjCgJPOUSGqqRaJ7z5AsJLhJT6LuK7QpvwPBZzklUN8T+n1sVmws8TNmRIbaniq/q6wYHANHcy6Dl59dx4sKwniUGiZdUhCiddVpoxbECSxc0A8mN2pk7/aW+WGxK3goBs5ZF7+JXF318F62pDcXQmR5CX6WdpenIcJ25g1Vg1WhQ4Ifpe17CN0bfxV8ShuzrQUThCDMffZCo9XySBtODdEowwK1UIpjnFLfIxjOs45Cd8o3tM2j0CfvtnjOz6BCdUU0qiwNPPNx0wFkx3ZiOfSh+FhBhvyPM12HN2tdN0esgVBItFmEci+sSIIXqjVL6DNiu5zTjbu0bs6COwlUWdmL6vmsZtq5tl7Cno9+C3szxRVAkShGydd04l9NYjqNEzTa1EPG50OsnVRKGdRiFzSxhc3BWExNKvcQ4v867t6/PpPkW6s4oXmYI3+De+8O7ExWc6a4alcrDXKlMs5fCb5Pcd4Ju9kowcjkoJo5yf2wW3Ox5R8SJpaEEpvyhx5O/qtIxjhHNzeo8Wsr/6gdNDv20r91TI= - TRAVIS_CARGO_NIGHTLY_FEATURE="" + - RUSTFLAGS="-D warnings" matrix: - FEATURES=--features=backtrace - FEATURES=--no-default-features diff --git a/src/vendor/error-chain/CHANGELOG.md b/src/vendor/error-chain/CHANGELOG.md new file mode 100644 index 0000000000..ed0f12e3a1 --- /dev/null +++ b/src/vendor/error-chain/CHANGELOG.md @@ -0,0 +1,120 @@ +# Unreleased + +# 0.11.0 + +- Change last rust version supported to 1.14 +- [Cache whether RUST_BACKTRACE is enabled in a relaxed atomic static.](https://github.com/rust-lang-nursery/error-chain/pull/210) +- [Mask the `quick_error` macro from the doc](https://github.com/rust-lang-nursery/error-chain/pull/210) +- [Make generated `ErrorKind` enums non-exhaustive](https://github.com/rust-lang-nursery/error-chain/pull/193) +- All 0.11.0-rc.2 changes + +# 0.11.0-rc.2 + +- [Make `ErrorChainIter`'s field private](https://github.com/rust-lang-nursery/error-chain/issues/178) +- [Rename `ErrorChainIter` to `Iter`](https://github.com/rust-lang-nursery/error-chain/issues/168) +- [Implement `Debug` for `ErrorChainIter`](https://github.com/rust-lang-nursery/error-chain/issues/169) +- [Rename `ChainedError::display` to `display_chain`](https://github.com/rust-lang-nursery/error-chain/issues/180) +- [Add a new method for `Error`: `chain_err`.](https://github.com/rust-lang-nursery/error-chain/pull/141) +- [Allow `chain_err` to be used on `Option`](https://github.com/rust-lang-nursery/error-chain/pull/156) +- [Add support for creating an error chain on boxed trait errors (`Box`)](https://github.com/rust-lang-nursery/error-chain/pull/156) +- [Remove lint for unused doc comment.](https://github.com/rust-lang-nursery/error-chain/pull/199) +- [Hide error_chain_processed macro from documentation.](https://github.com/rust-lang-nursery/error-chain/pull/212) + +# 0.10.0 + +- [Add a new constructor for `Error`: `with_chain`.](https://github.com/rust-lang-nursery/error-chain/pull/126) +- [Add the `ensure!` macro.](https://github.com/rust-lang-nursery/error-chain/pull/135) + +# 0.9.0 + +- Revert [Add a `Sync` bound to errors](https://github.com/rust-lang-nursery/error-chain/pull/110) + +# 0.8.1 + +- Add crates.io category. + +# 0.8.0 + +- [Add a `Sync` bound to errors](https://github.com/rust-lang-nursery/error-chain/pull/110) +- [Add `ChainedError::display` to format error chains](https://github.com/rust-lang-nursery/error-chain/pull/113) + +# 0.7.2 + +- Add `quick_main!` (#88). +- `allow(unused)` for the `Result` wrapper. +- Minimum rust version supported is now 1.10 on some conditions (#103). + +# 0.7.1 + +- [Add the `bail!` macro](https://github.com/rust-lang-nursery/error-chain/pull/76) + +# 0.7.0 + +- [Rollback several design changes to fix regressions](https://github.com/rust-lang-nursery/error-chain/pull/75) +- New `Variant(Error) #[attrs]` for `links` and `foreign_links`. +- Hide implementation details from the doc. +- Always generate `Error::backtrace`. + +# 0.6.2 + +- Allow dead code. + +# 0.6.1 + +- Fix wrong trait constraint in ResultExt implementation (#66). + +# 0.6.0 + +- Conditional compilation for error variants. +- Backtrace generation is now a feature. +- More standard trait implementations for extra convenience. +- Remove ChainErr. +- Remove need to specify `ErrorKind` in `links {}`. +- Add ResultExt trait. +- Error.1 is a struct instead of a tuple. +- Error is now a struct. +- The declarations order is more flexible. +- Way better error reporting when there is a syntax error in the macro call. +- `Result` generation can be disabled. +- At most one declaration of each type can be present. + +# 0.5.0 + +- [Only generate backtraces with RUST_BACKTRACE set](https://github.com/rust-lang-nursery/error-chain/pull/27) +- [Fixup matching, disallow repeating "types" section](https://github.com/rust-lang-nursery/error-chain/pull/26) +- [Fix tests on stable/beta](https://github.com/rust-lang-nursery/error-chain/pull/28) +- [Only deploy docs when tagged](https://github.com/rust-lang-nursery/error-chain/pull/30) + +Contributors: benaryorg, Brian Anderson, Georg Brandl + +# 0.4.2 + +- [Fix the resolution of the ErrorKind description method](https://github.com/rust-lang-nursery/error-chain/pull/24) + +Contributors: Brian Anderson + +# 0.4.1 (yanked) + +- [Fix a problem with resolving methods of the standard Error type](https://github.com/rust-lang-nursery/error-chain/pull/22) + +Contributors: Brian Anderson + +# 0.4.0 (yanked) + +- [Remove the foreign link description and forward to the foreign error](https://github.com/rust-lang-nursery/error-chain/pull/19) +- [Allow missing sections](https://github.com/rust-lang-nursery/error-chain/pull/17) + +Contributors: Brian Anderson, Taylor Cramer + +# 0.3.0 + +- [Forward Display implementation for foreign errors](https://github.com/rust-lang-nursery/error-chain/pull/13) + +Contributors: Brian Anderson, Taylor Cramer + +# 0.2.2 + +- [Don't require `types` section in macro invocation](https://github.com/rust-lang-nursery/error-chain/pull/8) +- [Add "quick start" to README](https://github.com/rust-lang-nursery/error-chain/pull/9) + +Contributors: Brian Anderson, Jake Shadle, Nate Mara diff --git a/src/vendor/error-chain/Cargo.toml b/src/vendor/error-chain/Cargo.toml new file mode 100644 index 0000000000..6c4e0e4986 --- /dev/null +++ b/src/vendor/error-chain/Cargo.toml @@ -0,0 +1,32 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "error-chain" +version = "0.11.0" +authors = ["Brian Anderson ", "Paul Colomiets ", "Colin Kiegel ", "Yamakaky "] +description = "Yet another error boilerplate library." +documentation = "https://docs.rs/error-chain" +readme = "README.md" +keywords = ["error"] +categories = ["rust-patterns"] +license = "MIT/Apache-2.0" +repository = "https://github.com/rust-lang-nursery/error-chain" +[dependencies.backtrace] +version = "0.3" +optional = true + +[features] +example_generated = [] +default = ["backtrace", "example_generated"] +[badges.travis-ci] +repository = "rust-lang-nursery/error-chain" diff --git a/src/vendor/error-chain-0.10.0/Cargo.toml b/src/vendor/error-chain/Cargo.toml.orig similarity index 71% rename from src/vendor/error-chain-0.10.0/Cargo.toml rename to src/vendor/error-chain/Cargo.toml.orig index a6993e5ec0..25b8cad0b2 100644 --- a/src/vendor/error-chain-0.10.0/Cargo.toml +++ b/src/vendor/error-chain/Cargo.toml.orig @@ -1,22 +1,22 @@ [package] - name = "error-chain" -version = "0.10.0" +version = "0.11.0" # remember to update html_root_url authors = [ "Brian Anderson ", "Paul Colomiets ", "Colin Kiegel ", "Yamakaky "] description = "Yet another error boilerplate library." +keywords = ["error"] categories = ["rust-patterns"] documentation = "https://docs.rs/error-chain" -homepage = "https://github.com/brson/error-chain" -repository = "https://github.com/brson/error-chain" +repository = "https://github.com/rust-lang-nursery/error-chain" +readme = "README.md" license = "MIT/Apache-2.0" [badges] -travis-ci = { repository = "brson/error-chain" } +travis-ci = { repository = "rust-lang-nursery/error-chain" } [features] default = ["backtrace", "example_generated"] diff --git a/src/vendor/error-chain/LICENSE-APACHE b/src/vendor/error-chain/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/src/vendor/error-chain/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/vendor/error-chain/LICENSE-MIT b/src/vendor/error-chain/LICENSE-MIT new file mode 100644 index 0000000000..5f28864c84 --- /dev/null +++ b/src/vendor/error-chain/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright (c) 2017 The Error-Chain Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/src/vendor/error-chain-0.10.0/README.md b/src/vendor/error-chain/README.md similarity index 68% rename from src/vendor/error-chain-0.10.0/README.md rename to src/vendor/error-chain/README.md index 1e9bbf4caa..51e50533ed 100644 --- a/src/vendor/error-chain-0.10.0/README.md +++ b/src/vendor/error-chain/README.md @@ -1,8 +1,8 @@ # error-chain - Consistent error handling for Rust -[![Build Status](https://api.travis-ci.org/brson/error-chain.svg?branch=master)](https://travis-ci.org/brson/error-chain) +[![Build Status](https://api.travis-ci.org/rust-lang-nursery/error-chain.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/error-chain) [![Latest Version](https://img.shields.io/crates/v/error-chain.svg)](https://crates.io/crates/error-chain) -[![License](https://img.shields.io/github/license/brson/error-chain.svg)](https://github.com/brson/error-chain) +[![License](https://img.shields.io/github/license/rust-lang-nursery/error-chain.svg)](https://github.com/rust-lang-nursery/error-chain) `error-chain` makes it easy to take full advantage of Rust's error handling features without the overhead of maintaining boilerplate @@ -12,7 +12,7 @@ error types. [Documentation (crates.io)](https://docs.rs/error-chain). -[Documentation (master)](https://brson.github.io/error-chain). +[Documentation (master)](https://rust-lang-nursery.github.io/error-chain). ## Quick start @@ -20,7 +20,7 @@ If you just want to set up your new project with error-chain, follow the [quickstart.rs] template, and read this [intro] to error-chain. -[quickstart.rs]: https://github.com/brson/error-chain/blob/master/examples/quickstart.rs +[quickstart.rs]: https://github.com/rust-lang-nursery/error-chain/blob/master/examples/quickstart.rs [intro]: http://brson.github.io/2016/11/30/starting-with-error-chain ## Supported Rust version diff --git a/src/vendor/error-chain-0.10.0/examples/all.rs b/src/vendor/error-chain/examples/all.rs similarity index 95% rename from src/vendor/error-chain-0.10.0/examples/all.rs rename to src/vendor/error-chain/examples/all.rs index 840ca95e27..ccc3ab7032 100644 --- a/src/vendor/error-chain-0.10.0/examples/all.rs +++ b/src/vendor/error-chain/examples/all.rs @@ -2,12 +2,12 @@ extern crate error_chain; pub mod inner { - error_chain! {} + error_chain!{} } #[cfg(feature = "a_feature")] pub mod feature { - error_chain! {} + error_chain!{} } error_chain! { diff --git a/src/vendor/error-chain/examples/chain_err.rs b/src/vendor/error-chain/examples/chain_err.rs new file mode 100644 index 0000000000..bd8effdaf6 --- /dev/null +++ b/src/vendor/error-chain/examples/chain_err.rs @@ -0,0 +1,69 @@ +//! Demonstrates usage of `Error::caused` method. This method enables chaining errors +//! like `ResultExt::chain_err` but doesn't require the presence of a `Result` wrapper. + +#[macro_use] +extern crate error_chain; + +use std::fs::File; + +mod errors { + use std::io; + use super::LaunchStage; + + error_chain! { + foreign_links { + Io(io::Error) #[doc = "Error during IO"]; + } + + errors { + Launch(phase: LaunchStage) { + description("An error occurred during startup") + display("Startup aborted: {:?} did not complete successfully", phase) + } + + ConfigLoad(path: String) { + description("Config file not found") + display("Unable to read file `{}`", path) + } + } + } + + impl From for ErrorKind { + fn from(v: LaunchStage) -> Self { + ErrorKind::Launch(v) + } + } +} + +pub use errors::*; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum LaunchStage { + ConfigLoad, + ConfigParse, + ConfigResolve, +} + +/// Read the service config from the file specified. +fn load_config(rel_path: &str) -> Result<()> { + File::open(rel_path) + .map(|_| ()) + .chain_err(|| ErrorKind::ConfigLoad(rel_path.to_string())) +} + +/// Launch the service. +fn launch(rel_path: &str) -> Result<()> { + load_config(rel_path).map_err(|e| match e { + e @ Error(ErrorKind::ConfigLoad(_), _) => { + e.chain_err(|| LaunchStage::ConfigLoad) + } + e => e.chain_err(|| "Unknown failure"), + }) +} + +fn main() { + let chain = launch("does_not_exist.json").unwrap_err(); + for err in chain.iter() { + println!("{}", err); + } +} diff --git a/src/vendor/error-chain-0.10.0/examples/doc.rs b/src/vendor/error-chain/examples/doc.rs similarity index 94% rename from src/vendor/error-chain-0.10.0/examples/doc.rs rename to src/vendor/error-chain/examples/doc.rs index 103040458c..999ac9cef0 100644 --- a/src/vendor/error-chain-0.10.0/examples/doc.rs +++ b/src/vendor/error-chain/examples/doc.rs @@ -7,8 +7,7 @@ extern crate error_chain; /// Inner module. pub mod inner { - error_chain! { - } + error_chain!{} } error_chain! { diff --git a/src/vendor/error-chain-0.10.0/examples/quickstart.rs b/src/vendor/error-chain/examples/quickstart.rs similarity index 84% rename from src/vendor/error-chain-0.10.0/examples/quickstart.rs rename to src/vendor/error-chain/examples/quickstart.rs index 8f0c5f0775..2e3e2b5d39 100644 --- a/src/vendor/error-chain-0.10.0/examples/quickstart.rs +++ b/src/vendor/error-chain/examples/quickstart.rs @@ -14,14 +14,17 @@ extern crate error_chain; // `error_chain!` creates. mod errors { // Create the Error, ErrorKind, ResultExt, and Result types - error_chain! { } + error_chain!{} } +// This only gives access within this module. Make this `pub use errors::*;` +// instead if the types must be accessible from other modules (e.g., within +// a `links` section). use errors::*; fn main() { if let Err(ref e) = run() { - use ::std::io::Write; + use std::io::Write; let stderr = &mut ::std::io::stderr(); let errmsg = "Error writing to stderr"; @@ -43,24 +46,24 @@ fn main() { // The above main gives you maximum control over how the error is // formatted. If you don't care (i.e. you want to display the full -// error during an assert) you can just call the `display` method +// error during an assert) you can just call the `display_chain` method // on the error object #[allow(dead_code)] fn alternative_main() { if let Err(ref e) = run() { use std::io::Write; - use error_chain::ChainedError; // trait which holds `display` + use error_chain::ChainedError; // trait which holds `display_chain` let stderr = &mut ::std::io::stderr(); let errmsg = "Error writing to stderr"; - writeln!(stderr, "{}", e.display()).expect(errmsg); + writeln!(stderr, "{}", e.display_chain()).expect(errmsg); ::std::process::exit(1); } } // Use this macro to auto-generate the main above. You may want to // set the `RUST_BACKTRACE` env variable to see a backtrace. -//quick_main!(run); +// quick_main!(run); // Most functions will return the `Result` type, imported from the diff --git a/src/vendor/error-chain-0.10.0/examples/size.rs b/src/vendor/error-chain/examples/size.rs similarity index 93% rename from src/vendor/error-chain-0.10.0/examples/size.rs rename to src/vendor/error-chain/examples/size.rs index 01f677b491..ae360d66e9 100644 --- a/src/vendor/error-chain-0.10.0/examples/size.rs +++ b/src/vendor/error-chain/examples/size.rs @@ -32,9 +32,7 @@ fn main() { } #[cfg(not(feature = "backtrace"))] { - let state = error_chain::State { - next_error: None, - }; + let state = error_chain::State { next_error: None }; println!(" State.next_error: {}", size_of_val(&state.next_error)); } } diff --git a/src/vendor/error-chain/src/bin/has_backtrace.rs b/src/vendor/error-chain/src/bin/has_backtrace.rs new file mode 100644 index 0000000000..c5dac058a8 --- /dev/null +++ b/src/vendor/error-chain/src/bin/has_backtrace.rs @@ -0,0 +1,18 @@ +//! Exits with exit code 0 if backtraces are disabled and 1 if they are enabled. +//! Used by tests to make sure backtraces are available when they should be. Should not be used +//! outside of the tests. + +#[macro_use] +extern crate error_chain; + +error_chain! { + errors { + MyError + } +} + +fn main() { + let err = Error::from(ErrorKind::MyError); + let has_backtrace = err.backtrace().is_some(); + ::std::process::exit(has_backtrace as i32); +} diff --git a/src/vendor/error-chain-0.10.0/src/error_chain.rs b/src/vendor/error-chain/src/error_chain.rs similarity index 85% rename from src/vendor/error-chain-0.10.0/src/error_chain.rs rename to src/vendor/error-chain/src/error_chain.rs index a8c58964ce..cbd42cd67e 100644 --- a/src/vendor/error-chain-0.10.0/src/error_chain.rs +++ b/src/vendor/error-chain/src/error_chain.rs @@ -1,12 +1,13 @@ /// Prefer to use `error_chain` instead of this macro. +#[doc(hidden)] #[macro_export] -macro_rules! error_chain_processed { +macro_rules! impl_error_chain_processed { // Default values for `types`. ( types {} $( $rest: tt )* ) => { - error_chain_processed! { + impl_error_chain_processed! { types { Error, ErrorKind, ResultExt, Result; } @@ -21,7 +22,7 @@ macro_rules! error_chain_processed { } $( $rest: tt )* ) => { - error_chain_processed! { + impl_error_chain_processed! { types { $error_name, $error_kind_name, $result_ext_name; @@ -96,8 +97,14 @@ macro_rules! error_chain_processed { self.kind() } - fn iter(&self) -> $crate::ErrorChainIter { - $crate::ErrorChainIter(Some(self)) + fn iter(&self) -> $crate::Iter { + $crate::Iter::new(Some(self)) + } + + fn chain_err(self, error: F) -> Self + where F: FnOnce() -> EK, + EK: Into<$error_kind_name> { + self.chain_err(error) } fn backtrace(&self) -> Option<&$crate::Backtrace> { @@ -124,10 +131,18 @@ macro_rules! error_chain_processed { -> $error_name where E: ::std::error::Error + Send + 'static, K: Into<$error_kind_name> + { + $error_name::with_boxed_chain(Box::new(error), kind) + } + + /// Construct a chained error from another boxed error and a kind, and generates a backtrace + pub fn with_boxed_chain(error: Box<::std::error::Error + Send>, kind: K) + -> $error_name + where K: Into<$error_kind_name> { $error_name( kind.into(), - $crate::State::new::<$error_name>(Box::new(error), ), + $crate::State::new::<$error_name>(error, ), ) } @@ -137,7 +152,7 @@ macro_rules! error_chain_processed { } /// Iterates over the error chain. - pub fn iter(&self) -> $crate::ErrorChainIter { + pub fn iter(&self) -> $crate::Iter { $crate::ChainedError::iter(self) } @@ -145,6 +160,12 @@ macro_rules! error_chain_processed { pub fn backtrace(&self) -> Option<&$crate::Backtrace> { self.1.backtrace() } + + /// Extends the error chain with a new entry. + pub fn chain_err(self, error: F) -> $error_name + where F: FnOnce() -> EK, EK: Into<$error_kind_name> { + $error_name::with_chain(self, Self::from_kind(error().into())) + } } impl ::std::error::Error for $error_name { @@ -152,6 +173,7 @@ macro_rules! error_chain_processed { self.0.description() } + #[allow(unknown_lints, unused_doc_comment)] fn cause(&self) -> Option<&::std::error::Error> { match self.1.next_error { Some(ref c) => Some(&**c), @@ -229,7 +251,7 @@ macro_rules! error_chain_processed { // The ErrorKind type // -------------- - quick_error! { + impl_error_chain_kind! { /// The kind of an error. #[derive(Debug)] pub enum $error_kind_name { @@ -290,7 +312,7 @@ macro_rules! error_chain_processed { // The ResultExt trait defines the `chain_err` method. /// Additional methods for `Result`, for easy interaction with this crate. - pub trait $result_ext_name { + pub trait $result_ext_name { /// If the `Result` is an `Err` then `chain_err` evaluates the closure, /// which returns *some type that can be converted to `ErrorKind`*, boxes /// the original error to store as the cause, then returns a new error @@ -300,7 +322,7 @@ macro_rules! error_chain_processed { EK: Into<$error_kind_name>; } - impl $result_ext_name for ::std::result::Result where E: ::std::error::Error + Send + 'static { + impl $result_ext_name for ::std::result::Result where E: ::std::error::Error + Send + 'static { fn chain_err(self, callback: F) -> ::std::result::Result where F: FnOnce() -> EK, EK: Into<$error_kind_name> { @@ -311,6 +333,16 @@ macro_rules! error_chain_processed { } } + impl $result_ext_name for ::std::option::Option { + fn chain_err(self, callback: F) -> ::std::result::Result + where F: FnOnce() -> EK, + EK: Into<$error_kind_name> { + self.ok_or_else(move || { + $crate::ChainedError::from_kind(callback().into()) + }) + } + } + }; } @@ -360,7 +392,7 @@ macro_rules! error_chain_processing { } }; ( ($a:tt, $b:tt, $c:tt, $d:tt) ) => { - error_chain_processed! { + impl_error_chain_processed! { types $a links $b foreign_links $c @@ -369,8 +401,7 @@ macro_rules! error_chain_processing { }; } -/// This macro is used for handling of duplicated and out-of-order fields. For -/// the exact rules, see `error_chain_processed`. +/// Macro for generating error types and traits. See crate level documentation for details. #[macro_export] macro_rules! error_chain { ( $( $block_name:ident { $( $block_content:tt )* } )* ) => { @@ -393,6 +424,7 @@ macro_rules! impl_extract_backtrace { ($error_name: ident $error_kind_name: ident $([$link_error_path: path, $(#[$meta_links: meta])*])*) => { + #[allow(unknown_lints, unused_doc_comment)] fn extract_backtrace(e: &(::std::error::Error + Send + 'static)) -> Option<::std::sync::Arc<$crate::Backtrace>> { if let Some(e) = e.downcast_ref::<$error_name>() { diff --git a/src/vendor/error-chain-0.10.0/src/example_generated.rs b/src/vendor/error-chain/src/example_generated.rs similarity index 98% rename from src/vendor/error-chain-0.10.0/src/example_generated.rs rename to src/vendor/error-chain/src/example_generated.rs index b7e6b51127..413407cae9 100644 --- a/src/vendor/error-chain-0.10.0/src/example_generated.rs +++ b/src/vendor/error-chain/src/example_generated.rs @@ -21,7 +21,7 @@ /// Another code generated by the macro. pub mod inner { - error_chain! {} + error_chain!{} } error_chain! { diff --git a/src/vendor/error-chain-0.10.0/src/quick_error.rs b/src/vendor/error-chain/src/impl_error_chain_kind.rs similarity index 83% rename from src/vendor/error-chain-0.10.0/src/quick_error.rs rename to src/vendor/error-chain/src/impl_error_chain_kind.rs index 46ed8fe21a..d6c05c8a88 100644 --- a/src/vendor/error-chain-0.10.0/src/quick_error.rs +++ b/src/vendor/error-chain/src/impl_error_chain_kind.rs @@ -4,18 +4,19 @@ // - $imeta #[macro_export] -macro_rules! quick_error { +#[doc(hidden)] +macro_rules! impl_error_chain_kind { ( $(#[$meta:meta])* pub enum $name:ident { $($chunks:tt)* } ) => { - quick_error!(SORT [pub enum $name $(#[$meta])* ] + impl_error_chain_kind!(SORT [pub enum $name $(#[$meta])* ] items [] buf [] queue [ $($chunks)* ]); }; ( $(#[$meta:meta])* enum $name:ident { $($chunks:tt)* } ) => { - quick_error!(SORT [enum $name $(#[$meta])* ] + impl_error_chain_kind!(SORT [enum $name $(#[$meta])* ] items [] buf [] queue [ $($chunks)* ]); }; @@ -27,16 +28,16 @@ macro_rules! quick_error { buf [ ] queue [ ] ) => { - quick_error!(ENUM_DEFINITION [enum $name $( #[$meta] )*] + impl_error_chain_kind!(ENUM_DEFINITION [enum $name $( #[$meta] )*] body [] queue [$($( #[$imeta] )* => $iitem: $imode [$( $ivar: $ityp ),*] )*] ); - quick_error!(IMPLEMENTATIONS $name {$( + impl_error_chain_kind!(IMPLEMENTATIONS $name {$( $iitem: $imode [$(#[$imeta])*] [$( $ivar: $ityp ),*] {$( $ifuncs )*} )*}); $( - quick_error!(ERROR_CHECK $imode $($ifuncs)*); + impl_error_chain_kind!(ERROR_CHECK $imode $($ifuncs)*); )* }; (SORT [pub enum $name:ident $( #[$meta:meta] )*] @@ -46,16 +47,16 @@ macro_rules! quick_error { buf [ ] queue [ ] ) => { - quick_error!(ENUM_DEFINITION [pub enum $name $( #[$meta] )*] + impl_error_chain_kind!(ENUM_DEFINITION [pub enum $name $( #[$meta] )*] body [] queue [$($( #[$imeta] )* => $iitem: $imode [$( $ivar: $ityp ),*] )*] ); - quick_error!(IMPLEMENTATIONS $name {$( + impl_error_chain_kind!(IMPLEMENTATIONS $name {$( $iitem: $imode [$(#[$imeta])*] [$( $ivar: $ityp ),*] {$( $ifuncs )*} )*}); $( - quick_error!(ERROR_CHECK $imode $($ifuncs)*); + impl_error_chain_kind!(ERROR_CHECK $imode $($ifuncs)*); )* }; // Add meta to buffer @@ -66,7 +67,7 @@ macro_rules! quick_error { buf [$( #[$bmeta:meta] )*] queue [ #[$qmeta:meta] $( $tail:tt )*] ) => { - quick_error!(SORT [$( $def )*] + impl_error_chain_kind!(SORT [$( $def )*] items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] buf [$( #[$bmeta] )* #[$qmeta] ] queue [$( $tail )*]); @@ -79,7 +80,7 @@ macro_rules! quick_error { buf [$( #[$bmeta:meta] )*] queue [ $qitem:ident $( $tail:tt )*] ) => { - quick_error!(SORT [$( $def )*] + impl_error_chain_kind!(SORT [$( $def )*] items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] buf [$(#[$bmeta])* => $qitem : UNIT [ ] ] @@ -94,7 +95,7 @@ macro_rules! quick_error { => $bitem:ident: $bmode:tt [$( $bvar:ident: $btyp:ty ),*] ] queue [ #[$qmeta:meta] $( $tail:tt )*] ) => { - quick_error!(SORT [$( $def )*] + impl_error_chain_kind!(SORT [$( $def )*] enum [$( $(#[$emeta])* => $eitem $(( $($etyp),* ))* )* $(#[$bmeta])* => $bitem: $bmode $(( $($btyp),* ))*] items [$($( #[$imeta:meta] )* @@ -111,7 +112,7 @@ macro_rules! quick_error { buf [$( #[$bmeta:meta] )* => $bitem:ident: UNIT [ ] ] queue [($( $qvar:ident: $qtyp:ty ),+) $( $tail:tt )*] ) => { - quick_error!(SORT [$( $def )*] + impl_error_chain_kind!(SORT [$( $def )*] items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] buf [$( #[$bmeta] )* => $bitem: TUPLE [$( $qvar:$qtyp ),*] ] queue [$( $tail )*] @@ -125,7 +126,7 @@ macro_rules! quick_error { buf [$( #[$bmeta:meta] )* => $bitem:ident: UNIT [ ] ] queue [{ $( $qvar:ident: $qtyp:ty ),+} $( $tail:tt )*] ) => { - quick_error!(SORT [$( $def )*] + impl_error_chain_kind!(SORT [$( $def )*] items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] buf [$( #[$bmeta] )* => $bitem: STRUCT [$( $qvar:$qtyp ),*] ] queue [$( $tail )*]); @@ -138,7 +139,7 @@ macro_rules! quick_error { buf [$( #[$bmeta:meta] )* => $bitem:ident: UNIT [ ] ] queue [{$( $qvar:ident: $qtyp:ty ),+ ,} $( $tail:tt )*] ) => { - quick_error!(SORT [$( $def )*] + impl_error_chain_kind!(SORT [$( $def )*] items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] buf [$( #[$bmeta] )* => $bitem: STRUCT [$( $qvar:$qtyp ),*] ] queue [$( $tail )*]); @@ -152,7 +153,7 @@ macro_rules! quick_error { => $bitem:ident: $bmode:tt [$( $bvar:ident: $btyp:ty ),*] ] queue [ {$( $qfuncs:tt )*} $( $tail:tt )*] ) => { - quick_error!(SORT [$( $def )*] + impl_error_chain_kind!(SORT [$( $def )*] items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )* $(#[$bmeta])* => $bitem: $bmode [$( $bvar:$btyp ),*] {$( $qfuncs )*} ] buf [ ] @@ -167,7 +168,7 @@ macro_rules! quick_error { => $bitem:ident: $bmode:tt [$( $bvar:ident: $btyp:ty ),*] ] queue [ $qitem:ident $( $tail:tt )*] ) => { - quick_error!(SORT [$( $def )*] + impl_error_chain_kind!(SORT [$( $def )*] items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )* $(#[$bmeta])* => $bitem: $bmode [$( $bvar:$btyp ),*] {} ] buf [ => $qitem : UNIT [ ] ] @@ -182,7 +183,7 @@ macro_rules! quick_error { => $bitem:ident: $bmode:tt [$( $bvar:ident: $btyp:ty ),*] ] queue [ ] ) => { - quick_error!(SORT [$( $def )*] + impl_error_chain_kind!(SORT [$( $def )*] items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )* $(#[$bmeta])* => $bitem: $bmode [$( $bvar:$btyp ),*] {} ] buf [ ] @@ -200,6 +201,9 @@ macro_rules! quick_error { $(#[$imeta])* $iitem $(($( $ttyp ),*))* $({$( $svar: $styp ),*})*, )* + + #[doc(hidden)] + __Nonexhaustive {} } }; // Private enum (Queue Empty) @@ -223,7 +227,7 @@ macro_rules! quick_error { queue [$( #[$qmeta:meta] )* => $qitem:ident: UNIT [ ] $( $queue:tt )*] ) => { - quick_error!(ENUM_DEFINITION [ $($def)* ] + impl_error_chain_kind!(ENUM_DEFINITION [ $($def)* ] body [$($( #[$imeta] )* => $iitem ($(($( $ttyp ),+))*) {$({$( $svar: $styp ),*})*} )* $( #[$qmeta] )* => $qitem () {} ] queue [ $($queue)* ] @@ -236,7 +240,7 @@ macro_rules! quick_error { queue [$( #[$qmeta:meta] )* => $qitem:ident: TUPLE [$( $qvar:ident: $qtyp:ty ),+] $( $queue:tt )*] ) => { - quick_error!(ENUM_DEFINITION [ $($def)* ] + impl_error_chain_kind!(ENUM_DEFINITION [ $($def)* ] body [$($( #[$imeta] )* => $iitem ($(($( $ttyp ),+))*) {$({$( $svar: $styp ),*})*} )* $( #[$qmeta] )* => $qitem (($( $qtyp ),*)) {} ] queue [ $($queue)* ] @@ -249,7 +253,7 @@ macro_rules! quick_error { queue [$( #[$qmeta:meta] )* => $qitem:ident: STRUCT [$( $qvar:ident: $qtyp:ty ),*] $( $queue:tt )*] ) => { - quick_error!(ENUM_DEFINITION [ $($def)* ] + impl_error_chain_kind!(ENUM_DEFINITION [ $($def)* ] body [$($( #[$imeta] )* => $iitem ($(($( $ttyp ),+))*) {$({$( $svar: $styp ),*})*} )* $( #[$qmeta] )* => $qitem () {{$( $qvar: $qtyp ),*}} ] queue [ $($queue)* ] @@ -260,7 +264,7 @@ macro_rules! quick_error { $item:ident: $imode:tt [$(#[$imeta:meta])*] [$( $var:ident: $typ:ty ),*] {$( $funcs:tt )*} )*} ) => { - #[allow(unused)] + #[allow(unknown_lints, unused, unused_doc_comment)] impl ::std::fmt::Display for $name { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result @@ -268,16 +272,18 @@ macro_rules! quick_error { match *self { $( $(#[$imeta])* - quick_error!(ITEM_PATTERN + impl_error_chain_kind!(ITEM_PATTERN $name $item: $imode [$( ref $var ),*] ) => { - let display_fn = quick_error!(FIND_DISPLAY_IMPL + let display_fn = impl_error_chain_kind!(FIND_DISPLAY_IMPL $name $item: $imode {$( $funcs )*}); display_fn(self, fmt) } )* + + _ => Ok(()) } } } @@ -286,10 +292,10 @@ macro_rules! quick_error { fn description(&self) -> &str { match *self { $( - quick_error!(ITEM_PATTERN + impl_error_chain_kind!(ITEM_PATTERN $name $item: $imode [$( ref $var ),*] ) => { - quick_error!(FIND_DESCRIPTION_IMPL + impl_error_chain_kind!(FIND_DESCRIPTION_IMPL $item: $imode self fmt [$( $var ),*] {$( $funcs )*}) } @@ -299,10 +305,10 @@ macro_rules! quick_error { fn cause(&self) -> Option<&::std::error::Error> { match *self { $( - quick_error!(ITEM_PATTERN + impl_error_chain_kind!(ITEM_PATTERN $name $item: $imode [$( ref $var ),*] ) => { - quick_error!(FIND_CAUSE_IMPL + impl_error_chain_kind!(FIND_CAUSE_IMPL $item: $imode [$( $var ),*] {$( $funcs )*}) } @@ -310,26 +316,28 @@ macro_rules! quick_error { } } }*/ - #[allow(unused)] + #[allow(unknown_lints, unused, unused_doc_comment)] impl $name { /// A string describing the error kind. pub fn description(&self) -> &str { match *self { $( $(#[$imeta])* - quick_error!(ITEM_PATTERN + impl_error_chain_kind!(ITEM_PATTERN $name $item: $imode [$( ref $var ),*] ) => { - quick_error!(FIND_DESCRIPTION_IMPL + impl_error_chain_kind!(FIND_DESCRIPTION_IMPL $item: $imode self fmt [$( $var ),*] {$( $funcs )*}) } )* + + _ => "", } } } $( - quick_error!(FIND_FROM_IMPL + impl_error_chain_kind!(FIND_FROM_IMPL $name $item: $imode [$( $var:$typ ),*] {$( $funcs )*}); )* @@ -337,7 +345,7 @@ macro_rules! quick_error { (FIND_DISPLAY_IMPL $name:ident $item:ident: $imode:tt { display($self_:tt) -> ($( $exprs:tt )*) $( $tail:tt )*} ) => { - |quick_error!(IDENT $self_): &$name, f: &mut ::std::fmt::Formatter| { + |impl_error_chain_kind!(IDENT $self_): &$name, f: &mut ::std::fmt::Formatter| { write!(f, $( $exprs )*) } }; @@ -354,7 +362,7 @@ macro_rules! quick_error { (FIND_DISPLAY_IMPL $name:ident $item:ident: $imode:tt { $t:tt $( $tail:tt )*} ) => { - quick_error!(FIND_DISPLAY_IMPL + impl_error_chain_kind!(FIND_DISPLAY_IMPL $name $item: $imode {$( $tail )*}) }; @@ -375,7 +383,7 @@ macro_rules! quick_error { [$( $var:ident ),*] { $t:tt $( $tail:tt )*} ) => { - quick_error!(FIND_DESCRIPTION_IMPL + impl_error_chain_kind!(FIND_DESCRIPTION_IMPL $item: $imode $me $fmt [$( $var ),*] {$( $tail )*}) }; @@ -395,7 +403,7 @@ macro_rules! quick_error { [$( $var:ident ),*] { $t:tt $( $tail:tt )*} ) => { - quick_error!(FIND_CAUSE_IMPL + impl_error_chain_kind!(FIND_CAUSE_IMPL $item: $imode [$( $var ),*] { $($tail)* }) }; @@ -416,7 +424,7 @@ macro_rules! quick_error { } } )* - quick_error!(FIND_FROM_IMPL + impl_error_chain_kind!(FIND_FROM_IMPL $name $item: $imode [$( $var:$typ ),*] {$( $tail )*}); }; @@ -429,7 +437,7 @@ macro_rules! quick_error { $name::$item } } - quick_error!(FIND_FROM_IMPL + impl_error_chain_kind!(FIND_FROM_IMPL $name $item: UNIT [ ] {$( $tail )*}); }; @@ -442,7 +450,7 @@ macro_rules! quick_error { $name::$item($( $texpr ),*) } } - quick_error!(FIND_FROM_IMPL + impl_error_chain_kind!(FIND_FROM_IMPL $name $item: TUPLE [$( $var:$typ ),*] { $($tail)* }); }; @@ -457,7 +465,7 @@ macro_rules! quick_error { } } } - quick_error!(FIND_FROM_IMPL + impl_error_chain_kind!(FIND_FROM_IMPL $name $item: STRUCT [$( $var:$typ ),*] { $($tail)* }); }; @@ -465,7 +473,7 @@ macro_rules! quick_error { [$( $var:ident: $typ:ty ),*] { $t:tt $( $tail:tt )*} ) => { - quick_error!(FIND_FROM_IMPL + impl_error_chain_kind!(FIND_FROM_IMPL $name $item: $imode [$( $var:$typ ),*] {$( $tail )*} ); @@ -506,24 +514,28 @@ macro_rules! quick_error { // This is to contrast FIND_* clauses which just find stuff they need and // skip everything else completely (ERROR_CHECK $imode:tt display($self_:tt) -> ($( $exprs:tt )*) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; + => { impl_error_chain_kind!(ERROR_CHECK_COMMA $imode $($tail)*); }; (ERROR_CHECK $imode:tt display($pattern: expr) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; + => { impl_error_chain_kind!(ERROR_CHECK_COMMA $imode $($tail)*); }; (ERROR_CHECK $imode:tt display($pattern: expr, $( $exprs:tt )*) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; + => { impl_error_chain_kind!(ERROR_CHECK_COMMA $imode $($tail)*); }; (ERROR_CHECK $imode:tt description($expr:expr) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; + => { impl_error_chain_kind!(ERROR_CHECK_COMMA $imode $($tail)*); }; (ERROR_CHECK $imode:tt cause($expr:expr) $($tail:tt)*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; + => { impl_error_chain_kind!(ERROR_CHECK_COMMA $imode $($tail)*); }; (ERROR_CHECK $imode:tt from() $($tail:tt)*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; + => { impl_error_chain_kind!(ERROR_CHECK_COMMA $imode $($tail)*); }; (ERROR_CHECK $imode:tt from($ftyp:ty) $($tail:tt)*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; + => { impl_error_chain_kind!(ERROR_CHECK_COMMA $imode $($tail)*); }; (ERROR_CHECK TUPLE from($fvar:ident: $ftyp:ty) -> ($( $e:expr ),*) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK TUPLE $($tail)*); }; + => { impl_error_chain_kind!(ERROR_CHECK_COMMA TUPLE $($tail)*); }; (ERROR_CHECK STRUCT from($fvar:ident: $ftyp:ty) -> {$( $v:ident: $e:expr ),*} $( $tail:tt )*) - => { quick_error!(ERROR_CHECK STRUCT $($tail)*); }; + => { impl_error_chain_kind!(ERROR_CHECK_COMMA STRUCT $($tail)*); }; (ERROR_CHECK $imode:tt ) => {}; + (ERROR_CHECK_COMMA $imode:tt , $( $tail:tt )*) + => { impl_error_chain_kind!(ERROR_CHECK $imode $($tail)*); }; + (ERROR_CHECK_COMMA $imode:tt $( $tail:tt )*) + => { impl_error_chain_kind!(ERROR_CHECK $imode $($tail)*); }; // Utility functions (IDENT $ident:ident) => { $ident } } diff --git a/src/vendor/error-chain-0.10.0/src/lib.rs b/src/vendor/error-chain/src/lib.rs similarity index 57% rename from src/vendor/error-chain-0.10.0/src/lib.rs rename to src/vendor/error-chain/src/lib.rs index f88095497a..d0881fcef1 100644 --- a/src/vendor/error-chain-0.10.0/src/lib.rs +++ b/src/vendor/error-chain/src/lib.rs @@ -1,4 +1,6 @@ #![deny(missing_docs)] +#![allow(unknown_lints)] // to be removed when unused_doc_comments lints is merged +#![doc(html_root_url = "https://docs.rs/error-chain/0.11.0")] //! A library for consistent and reliable error handling //! @@ -14,7 +16,7 @@ //! follow the [quickstart.rs] template, and read this [intro] //! to error-chain. //! -//! [quickstart.rs]: https://github.com/brson/error-chain/blob/master/examples/quickstart.rs +//! [quickstart.rs]: https://github.com/rust-lang-nursery/error-chain/blob/master/examples/quickstart.rs //! [intro]: http://brson.github.io/2016/11/30/starting-with-error-chain //! //! ## Why error chain? @@ -22,7 +24,7 @@ //! * error-chain is easy to configure. Handle errors robustly with minimal //! effort. //! * Basic error handling requires no maintenance of custom error types -//! nor the `From` conversions that make `?` work. +//! nor the [`From`] conversions that make `?` work. //! * error-chain scales from simple error handling strategies to more //! rigorous. Return formatted strings for simple errors, only //! introducing error variants and their strong typing as needed for @@ -38,56 +40,56 @@ //! error-chain is based on the following principles: //! //! * No error should ever be discarded. This library primarily -//! makes it easy to "chain" errors with the `chain_err` method. +//! makes it easy to "chain" errors with the [`chain_err`] method. //! * Introducing new errors is trivial. Simple errors can be introduced //! at the error site with just a string. //! * Handling errors is possible with pattern matching. //! * Conversions between error types are done in an automatic and -//! consistent way - `From` conversion behavior is never specified +//! consistent way - [`From`] conversion behavior is never specified //! explicitly. -//! * Errors implement Send. +//! * Errors implement [`Send`]. //! * Errors can carry backtraces. //! //! Similar to other libraries like [error-type] and [quick-error], //! this library introduces the error chaining mechanism originally -//! employed by Cargo. The `error_chain!` macro declares the types +//! employed by Cargo. The [`error_chain!`] macro declares the types //! and implementation boilerplate necessary for fulfilling a //! particular error-handling strategy. Most importantly it defines a -//! custom error type (called `Error` by convention) and the `From` -//! conversions that let the `try!` macro and `?` operator work. +//! custom error type (called [`Error`] by convention) and the [`From`] +//! conversions that let the `?` operator work. //! //! This library differs in a few ways from previous error libs: //! -//! * Instead of defining the custom `Error` type as an enum, it is a -//! struct containing an `ErrorKind` (which defines the -//! `description` and `display` methods for the error), an opaque, -//! optional, boxed `std::error::Error + Send + 'static` object -//! (which defines the `cause`, and establishes the links in the -//! error chain), and a `Backtrace`. -//! * The macro also defines a `ResultExt` trait that defines a -//! `chain_err` method. This method on all `std::error::Error + Send + 'static` +//! * Instead of defining the custom [`Error`] type as an enum, it is a +//! struct containing an [`ErrorKind`][] (which defines the +//! [`description`] and [`display_chain`] methods for the error), an opaque, +//! optional, boxed [`std::error::Error`]` + `[`Send`]` + 'static` object +//! (which defines the [`cause`], and establishes the links in the +//! error chain), and a [`Backtrace`]. +//! * The macro also defines a [`ResultExt`] trait that defines a +//! [`chain_err`] method. This method on all [`std::error::Error`]` + `[`Send`]` + 'static` //! types extends the error chain by boxing the current //! error into an opaque object and putting it inside a new concrete //! error. -//! * It provides automatic `From` conversions between other error types -//! defined by the `error_chain!` that preserve type information, +//! * It provides automatic [`From`] conversions between other error types +//! defined by the [`error_chain!`] that preserve type information, //! and facilitate seamless error composition and matching of composed //! errors. -//! * It provides automatic `From` conversions between any other error -//! type that hides the type of the other error in the `cause` box. +//! * It provides automatic [`From`] conversions between any other error +//! type that hides the type of the other error in the [`cause`] box. //! * If `RUST_BACKTRACE` is enabled, it collects a single backtrace at //! the earliest opportunity and propagates it down the stack through -//! `From` and `ResultExt` conversions. +//! [`From`] and [`ResultExt`] conversions. //! //! To accomplish its goals it makes some tradeoffs: //! -//! * The split between the `Error` and `ErrorKind` types can make it +//! * The split between the [`Error`] and [`ErrorKind`] types can make it //! slightly more cumbersome to instantiate new (unchained) errors, -//! requiring an `Into` or `From` conversion; as well as slightly +//! requiring an [`Into`] or [`From`] conversion; as well as slightly //! more cumbersome to match on errors with another layer of types //! to match. -//! * Because the error type contains `std::error::Error + Send + 'static` objects, -//! it can't implement `PartialEq` for easy comparisons. +//! * Because the error type contains [`std::error::Error`]` + `[`Send`]` + 'static` objects, +//! it can't implement [`PartialEq`] for easy comparisons. //! //! ## Declaring error types //! @@ -96,7 +98,7 @@ //! basis, such as per module. //! //! Assuming you are using crate-level error types, typically you will -//! define an `errors` module and inside it call `error_chain!`: +//! define an `errors` module and inside it call [`error_chain!`]: //! //! ``` //! # #[macro_use] extern crate error_chain; @@ -147,14 +149,20 @@ //! Io(::std::io::Error) #[cfg(unix)]; //! } //! -//! // Define additional `ErrorKind` variants. The syntax here is -//! // the same as `quick_error!`, but the `from()` and `cause()` -//! // syntax is not supported. +//! // Define additional `ErrorKind` variants. Define custom responses with the +//! // `description` and `display` calls. //! errors { //! InvalidToolchainName(t: String) { //! description("invalid toolchain name") //! display("invalid toolchain name: '{}'", t) //! } +//! +//! // You can also add commas after description/display. +//! // This may work better with some editor auto-indentation modes: +//! UnknownToolchainVersion(v: String) { +//! description("unknown toolchain version"), // note the , +//! display("unknown toolchain version: '{}'", v), // trailing comma is allowed +//! } //! } //! } //! @@ -165,9 +173,9 @@ //! be omitted if it is empty. //! //! This populates the module with a number of definitions, -//! the most important of which are the `Error` type -//! and the `ErrorKind` type. An example of generated code can be found in the -//! [example_generated](example_generated) module. +//! the most important of which are the [`Error`] type +//! and the [`ErrorKind`] type. An example of generated code can be found in the +//! [example_generated](example_generated/index.html) module. //! //! ## Returning new errors //! @@ -182,7 +190,7 @@ //! } //! ``` //! -//! Introducing new error chains, with an `ErrorKind`: +//! Introducing new error chains, with an [`ErrorKind`]: //! //! ``` //! # #[macro_use] extern crate error_chain; @@ -196,37 +204,37 @@ //! } //! ``` //! -//! Note that the return type is the typedef `Result`, which is +//! Note that the return type is the typedef [`Result`], which is //! defined by the macro as `pub type Result = //! ::std::result::Result`. Note that in both cases -//! `.into()` is called to convert a type into the `Error` type; both -//! strings and `ErrorKind` have `From` conversions to turn them into -//! `Error`. +//! [`.into()`] is called to convert a type into the [`Error`] type; both +//! strings and [`ErrorKind`] have [`From`] conversions to turn them into +//! [`Error`]. //! -//! When the error is emitted inside a `try!` macro or behind the -//! `?` operator, the explicit conversion isn't needed; `try!` will -//! automatically convert `Err(ErrorKind)` to `Err(Error)`. So the -//! below is equivalent to the previous: +//! When the error is emitted behind the `?` operator, the explicit conversion +//! isn't needed; `Err(ErrorKind)` will automatically be converted to `Err(Error)`. +//! So the below is equivalent to the previous: //! //! ``` //! # #[macro_use] extern crate error_chain; //! # fn main() {} //! # error_chain! { errors { FooError } } //! fn foo() -> Result<()> { -//! Ok(try!(Err(ErrorKind::FooError))) +//! Ok(Err(ErrorKind::FooError)?) //! } //! //! fn bar() -> Result<()> { -//! Ok(try!(Err("bogus!"))) +//! Ok(Err("bogus!")?) //! } //! ``` //! //! ## The `bail!` macro //! //! The above method of introducing new errors works but is a little -//! verbose. Instead we can use the `bail!` macro, which, much like `try!` -//! and `?`, performs an early return with conversions. With `bail!` the -//! previous examples look like: +//! verbose. Instead, we can use the [`bail!`] macro, which performs an early return +//! with conversions done automatically. +//! +//! With [`bail!`] the previous examples look like: //! //! ``` //! # #[macro_use] extern crate error_chain; @@ -250,6 +258,8 @@ //! ``` //! //! ## Chaining errors +//! error-chain supports extending an error chain by appending new errors. +//! This can be done on a Result or on an existing Error. //! //! To extend the error chain: //! @@ -264,17 +274,63 @@ //! # } //! ``` //! -//! `chain_err` can be called on any `Result` type where the contained -//! error type implements `std::error::Error + Send + 'static`. If -//! the `Result` is an `Err` then `chain_err` evaluates the closure, -//! which returns *some type that can be converted to `ErrorKind`*, +//! [`chain_err`] can be called on any [`Result`] type where the contained +//! error type implements [`std::error::Error`]` + `[`Send`]` + 'static`, as long as +//! the [`Result`] type's corresponding [`ResultExt`] trait is in scope. If +//! the [`Result`] is an `Err` then [`chain_err`] evaluates the closure, +//! which returns *some type that can be converted to [`ErrorKind`]*, //! boxes the original error to store as the cause, then returns a new //! error containing the original error. //! +//! Calling [`chain_err`][Error_chain_err] on an existing [`Error`] instance has +//! the same signature and produces the same outcome as being called on a +//! [`Result`] matching the properties described above. This is most useful when +//! partially handling errors using the [`map_err`] function. +//! +//! To chain an error directly, use [`with_chain`]: +//! +//! ``` +//! # #[macro_use] extern crate error_chain; +//! # fn main() {} +//! # error_chain! {} +//! # fn do_something() -> Result<()> { unimplemented!() } +//! # fn test() -> Result<()> { +//! let res: Result<()> = +//! do_something().map_err(|e| Error::with_chain(e, "something went wrong")); +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Linking errors +//! +//! To convert an error from another error chain to this error chain: +//! +//! ``` +//! # #[macro_use] extern crate error_chain; +//! # fn main() {} +//! # mod other { error_chain! {} } +//! error_chain! { +//! links { +//! OtherError(other::Error, other::ErrorKind); +//! } +//! } +//! +//! fn do_other_thing() -> other::Result<()> { unimplemented!() } +//! +//! # fn test() -> Result<()> { +//! let res: Result<()> = do_other_thing().map_err(|e| e.into()); +//! # Ok(()) +//! # } +//! ``` +//! +//! The [`Error`] and [`ErrorKind`] types implements [`From`] for the corresponding +//! types of all linked error chains. Linked errors do not introduce a new +//! cause to the error chain. +//! //! ## Matching errors //! //! error-chain error variants are matched with simple patterns. -//! `Error` is a tuple struct and its first field is the `ErrorKind`, +//! [`Error`] is a tuple struct and its first field is the [`ErrorKind`], //! making dispatching on error kinds relatively compact: //! //! ``` @@ -292,6 +348,7 @@ //! match Error::from("error!") { //! Error(ErrorKind::InvalidToolchainName(_), _) => { } //! Error(ErrorKind::Msg(_), _) => { } +//! _ => { } //! } //! # } //! ``` @@ -327,22 +384,69 @@ //! # } //! ``` //! +//! ## Inspecting errors +//! +//! An error-chain error contains information about the error itself, a backtrace, and the chain +//! of causing errors. For reporting purposes, this information can be accessed as follows. +//! +//! ``` +//! # #[macro_use] extern crate error_chain; +//! use error_chain::ChainedError; // for e.display_chain() +//! +//! error_chain! { +//! errors { +//! InvalidToolchainName(t: String) { +//! description("invalid toolchain name") +//! display("invalid toolchain name: '{}'", t) +//! } +//! } +//! } +//! +//! # fn main() { +//! // Generate an example error to inspect: +//! let e = "xyzzy".parse::() +//! .chain_err(|| ErrorKind::InvalidToolchainName("xyzzy".to_string())) +//! .unwrap_err(); +//! +//! // Get the brief description of the error: +//! assert_eq!(e.description(), "invalid toolchain name"); +//! +//! // Get the display version of the error: +//! assert_eq!(e.to_string(), "invalid toolchain name: 'xyzzy'"); +//! +//! // Get the full cause and backtrace: +//! println!("{}", e.display_chain().to_string()); +//! // Error: invalid toolchain name: 'xyzzy' +//! // Caused by: invalid digit found in string +//! // stack backtrace: +//! // 0: 0x7fa9f684fc94 - backtrace::backtrace::libunwind::trace +//! // at src/backtrace/libunwind.rs:53 +//! // - backtrace::backtrace::trace +//! // at src/backtrace/mod.rs:42 +//! // 1: 0x7fa9f6850b0e - backtrace::capture::{{impl}}::new +//! // at out/capture.rs:79 +//! // [..] +//! # } +//! ``` +//! +//! The [`Error`] and [`ErrorKind`] types also allow programmatic access to these elements. +//! //! ## Foreign links //! //! Errors that do not conform to the same conventions as this library //! can still be included in the error chain. They are considered "foreign //! errors", and are declared using the `foreign_links` block of the -//! `error_chain!` macro. `Error`s are automatically created from -//! foreign errors by the `try!` macro. +//! [`error_chain!`] macro. [`Error`]s are automatically created from +//! foreign errors by the `?` operator. //! //! Foreign links and regular links have one crucial difference: -//! `From` conversions for regular links *do not introduce a new error +//! [`From`] conversions for regular links *do not introduce a new error //! into the error chain*, while conversions for foreign links *always //! introduce a new error into the error chain*. So for the example -//! above all errors deriving from the `temp::Error` type will be -//! presented to the user as a new `ErrorKind::Temp` variant, and the -//! cause will be the original `temp::Error` error. In contrast, when -//! `rustup_utils::Error` is converted to `Error` the two `ErrorKind`s +//! above all errors deriving from the [`std::fmt::Error`] type will be +//! presented to the user as a new [`ErrorKind`] variant, and the +//! cause will be the original [`std::fmt::Error`] error. In contrast, when +//! `other_error::Error` is converted to `Error` the two `ErrorKind`s //! are converted between each other to create a new `Error` but the //! old error is discarded; there is no "cause" created from the //! original error. @@ -351,19 +455,87 @@ //! //! If the `RUST_BACKTRACE` environment variable is set to anything //! but ``0``, the earliest non-foreign error to be generated creates -//! a single backtrace, which is passed through all `From` conversions -//! and `chain_err` invocations of compatible types. To read the -//! backtrace just call the `backtrace()` method. +//! a single backtrace, which is passed through all [`From`] conversions +//! and [`chain_err`] invocations of compatible types. To read the +//! backtrace just call the [`backtrace`] method. //! //! Backtrace generation can be disabled by turning off the `backtrace` feature. //! +//! The Backtrace contains a Vec of [`BacktraceFrame`]s that can be operated +//! on directly. For example, to only see the files and line numbers of code +//! within your own project. +//! +//! ``` +//! # #[macro_use] +//! # extern crate error_chain; +//! # mod errors { +//! # error_chain! { +//! # foreign_links { +//! # Io(::std::io::Error); +//! # } +//! # } +//! # } +//! # use errors::*; +//! # #[cfg(feature="backtrace")] +//! # fn main() { +//! if let Err(ref e) = open_file() { +//! if let Some(backtrace) = e.backtrace() { +//! let frames = backtrace.frames(); +//! for frame in frames.iter() { +//! for symbol in frame.symbols().iter() { +//! if let (Some(file), Some(lineno)) = (symbol.filename(), symbol.lineno()) { +//! if file.display().to_string()[0..3] == "src".to_string(){ +//! println!("{}:{}", file.display().to_string(), lineno); +//! } +//! } +//! } +//! } +//! } +//! }; +//! # } +//! # #[cfg(not(feature="backtrace"))] +//! # fn main() { } +//! +//! fn open_file() -> Result<()> { +//! std::fs::File::open("does_not_exist")?; +//! Ok(()) +//! } +//! ``` +//! //! ## Iteration //! -//! The `iter` method returns an iterator over the chain of error boxes. +//! The [`iter`] method returns an iterator over the chain of error boxes. //! //! [error-type]: https://github.com/DanielKeep/rust-error-type //! [quick-error]: https://github.com/tailhook/quick-error +//! [`display_chain`]: trait.ChainedError.html#method.display_chain +//! [`error_chain!`]: macro.error_chain.html +//! [`bail!`]: macro.bail.html +//! [`Backtrace`]: struct.Backtrace.html + +//! [`Error`]: example_generated/struct.Error.html +//! [`with_chain`]: example_generated/struct.Error.html#method.with_chain +//! [Error_chain_err]: example_generated/struct.Error.html#method.chain_err +//! [`cause`]: example_generated/struct.Error.html#method.cause +//! [`backtrace`]: example_generated/struct.Error.html#method.backtrace +//! [`iter`]: example_generated/struct.Error.html#method.iter +//! [`ErrorKind`]: example_generated/enum.ErrorKind.html +//! [`description`]: example_generated/enum.ErrorKind.html#method.description +//! [`Result`]: example_generated/type.Result.html +//! [`ResultExt`]: example_generated/trait.ResultExt.html +//! [`chain_err`]: example_generated/trait.ResultExt.html#tymethod.chain_err + +//! [`std::error::Error`]: https://doc.rust-lang.org/std/error/trait.Error.html +//! [`Send`]: https://doc.rust-lang.org/std/marker/trait.Send.html +//! [`Into`]: https://doc.rust-lang.org/std/convert/trait.Into.html +//! [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html +//! [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html +//! [`std::fmt::Error`]: https://doc.rust-lang.org/std/fmt/struct.Error.html +//! [`.into()`]: https://doc.rust-lang.org/std/convert/trait.Into.html#tymethod.into +//! [`map_err`]: https://doc.rust-lang.org/std/result/enum.Result.html#method.map_err +//! [`BacktraceFrame`]: https://docs.rs/backtrace/0.3.2/backtrace/struct.BacktraceFrame.html + #[cfg(feature = "backtrace")] extern crate backtrace; @@ -381,7 +553,7 @@ pub use backtrace::Backtrace; pub type Backtrace = (); #[macro_use] -mod quick_error; +mod impl_error_chain_kind; #[macro_use] mod error_chain; #[macro_use] @@ -390,10 +562,18 @@ pub use quick_main::ExitCode; #[cfg(feature = "example_generated")] pub mod example_generated; +#[derive(Debug)] /// Iterator over the error chain using the `Error::cause()` method. -pub struct ErrorChainIter<'a>(pub Option<&'a error::Error>); +pub struct Iter<'a>(Option<&'a error::Error>); -impl<'a> Iterator for ErrorChainIter<'a> { +impl<'a> Iter<'a> { + /// Returns a new iterator over the error chain using `Error::cause()`. + pub fn new(err: Option<&'a error::Error>) -> Iter<'a> { + Iter(err) + } +} + +impl<'a> Iterator for Iter<'a> { type Item = &'a error::Error; fn next<'b>(&'b mut self) -> Option<&'a error::Error> { @@ -413,9 +593,29 @@ impl<'a> Iterator for ErrorChainIter<'a> { #[cfg(feature = "backtrace")] #[doc(hidden)] pub fn make_backtrace() -> Option> { - match std::env::var_os("RUST_BACKTRACE") { - Some(ref val) if val != "0" => Some(Arc::new(Backtrace::new())), - _ => None, + use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering}; + + // The lowest bit indicates whether the value was computed, + // while the second lowest bit is the actual "enabled" bit. + static BACKTRACE_ENABLED_CACHE: AtomicUsize = ATOMIC_USIZE_INIT; + + let enabled = match BACKTRACE_ENABLED_CACHE.load(Ordering::Relaxed) { + 0 => { + let enabled = match std::env::var_os("RUST_BACKTRACE") { + Some(ref val) if val != "0" => true, + _ => false + }; + let encoded = ((enabled as usize) << 1) | 1; + BACKTRACE_ENABLED_CACHE.store(encoded, Ordering::Relaxed); + enabled + } + encoded => (encoded >> 1) != 0 + }; + + if enabled { + Some(Arc::new(Backtrace::new())) + } else { + None } } @@ -438,7 +638,7 @@ pub trait ChainedError: error::Error + Send + 'static { fn kind(&self) -> &Self::ErrorKind; /// Iterates over the error chain. - fn iter(&self) -> ErrorChainIter; + fn iter(&self) -> Iter; /// Returns the backtrace associated with this error. fn backtrace(&self) -> Option<&Backtrace>; @@ -447,10 +647,15 @@ pub trait ChainedError: error::Error + Send + 'static { /// context of this error. /// /// The full cause chain and backtrace, if present, will be printed. - fn display<'a>(&'a self) -> Display<'a, Self> { - Display(self) + fn display_chain<'a>(&'a self) -> DisplayChain<'a, Self> { + DisplayChain(self) } + /// Extends the error chain with a new entry. + fn chain_err(self, error: F) -> Self + where F: FnOnce() -> EK, + EK: Into; + /// Creates an error from its parts. #[doc(hidden)] fn new(kind: Self::ErrorKind, state: State) -> Self where Self: Sized; @@ -465,12 +670,13 @@ pub trait ChainedError: error::Error + Send + 'static { /// A struct which formats an error for output. #[derive(Debug)] -pub struct Display<'a, T: 'a + ?Sized>(&'a T); +pub struct DisplayChain<'a, T: 'a + ?Sized>(&'a T); -impl<'a, T> fmt::Display for Display<'a, T> +impl<'a, T> fmt::Display for DisplayChain<'a, T> where T: ChainedError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + // Keep `try!` for 1.10 support try!(writeln!(fmt, "Error: {}", self.0)); for e in self.0.iter().skip(1) { diff --git a/src/vendor/error-chain-0.10.0/src/quick_main.rs b/src/vendor/error-chain/src/quick_main.rs similarity index 88% rename from src/vendor/error-chain-0.10.0/src/quick_main.rs rename to src/vendor/error-chain/src/quick_main.rs index d400353a94..f81e7d704a 100644 --- a/src/vendor/error-chain-0.10.0/src/quick_main.rs +++ b/src/vendor/error-chain/src/quick_main.rs @@ -1,4 +1,4 @@ -/// Convenient wrapper to be able to use `try!` and such in the main. You can +/// Convenient wrapper to be able to use `?` and such in the main. You can /// use it with a separated function: /// /// ``` @@ -47,7 +47,7 @@ macro_rules! quick_main { ::std::process::exit(match $main() { Ok(ret) => $crate::ExitCode::code(ret), Err(ref e) => { - write!(&mut ::std::io::stderr(), "{}", $crate::ChainedError::display(e)) + write!(&mut ::std::io::stderr(), "{}", $crate::ChainedError::display_chain(e)) .expect("Error writing to stderr"); 1 @@ -65,9 +65,13 @@ pub trait ExitCode { } impl ExitCode for i32 { - fn code(self) -> i32 { self } + fn code(self) -> i32 { + self + } } impl ExitCode for () { - fn code(self) -> i32 { 0 } + fn code(self) -> i32 { + 0 + } } diff --git a/src/vendor/error-chain-0.10.0/tests/quick_main.rs b/src/vendor/error-chain/tests/quick_main.rs similarity index 84% rename from src/vendor/error-chain-0.10.0/tests/quick_main.rs rename to src/vendor/error-chain/tests/quick_main.rs index 3acda0f2a3..4ada3b4e03 100644 --- a/src/vendor/error-chain-0.10.0/tests/quick_main.rs +++ b/src/vendor/error-chain/tests/quick_main.rs @@ -24,7 +24,5 @@ mod i32 { mod closure { use super::*; - quick_main!(|| -> Result<()> { - Ok(()) - }); + quick_main!(|| -> Result<()> { Ok(()) }); } diff --git a/src/vendor/error-chain-0.10.0/tests/tests.rs b/src/vendor/error-chain/tests/tests.rs similarity index 76% rename from src/vendor/error-chain-0.10.0/tests/tests.rs rename to src/vendor/error-chain/tests/tests.rs index 154ede5be8..e603e02af8 100644 --- a/src/vendor/error-chain-0.10.0/tests/tests.rs +++ b/src/vendor/error-chain/tests/tests.rs @@ -1,7 +1,4 @@ #![allow(dead_code)] -//#![feature(trace_macros)] -// -//trace_macros!(true); #[macro_use] extern crate error_chain; @@ -197,41 +194,35 @@ fn order_test_8() { #[test] fn empty() { - error_chain! { }; + error_chain!{}; } #[test] #[cfg(feature = "backtrace")] fn has_backtrace_depending_on_env() { - use std::env; + use std::process::Command; + use std::path::Path; - error_chain! { - types {} - links {} - foreign_links {} - errors { - MyError - } - } - - let original_value = env::var_os("RUST_BACKTRACE"); + let cmd_path = if cfg!(windows) { + Path::new("./target/debug/has_backtrace.exe") + } else { + Path::new("./target/debug/has_backtrace") + }; + let mut cmd = Command::new(cmd_path); // missing RUST_BACKTRACE and RUST_BACKTRACE=0 - env::remove_var("RUST_BACKTRACE"); - let err = Error::from(ErrorKind::MyError); - assert!(err.backtrace().is_none()); - env::set_var("RUST_BACKTRACE", "0"); - let err = Error::from(ErrorKind::MyError); - assert!(err.backtrace().is_none()); + cmd.env_remove("RUST_BACKTRACE"); + assert_eq!(cmd.status().unwrap().code().unwrap(), 0); + + cmd.env("RUST_BACKTRACE", "0"); + assert_eq!(cmd.status().unwrap().code().unwrap(), 0); // RUST_BACKTRACE set to anything but 0 - env::set_var("RUST_BACKTRACE", "yes"); - let err = Error::from(ErrorKind::MyError); - assert!(err.backtrace().is_some()); + cmd.env("RUST_BACKTRACE", "yes"); + assert_eq!(cmd.status().unwrap().code().unwrap(), 1); - if let Some(var) = original_value { - env::set_var("RUST_BACKTRACE", var); - } + cmd.env("RUST_BACKTRACE", "1"); + assert_eq!(cmd.status().unwrap().code().unwrap(), 1); } #[test] @@ -251,10 +242,30 @@ fn chain_err() { let _: Result<()> = Err(Error::from_kind(ErrorKind::Test)).chain_err(|| ""); } +/// Verify that an error chain is extended one by `Error::chain_err`, with +/// the new error added to the end. +#[test] +fn error_chain_err() { + error_chain! { + errors { + Test + } + } + + let base = Error::from(ErrorKind::Test); + let ext = base.chain_err(|| "Test passes"); + + if let Error(ErrorKind::Msg(_), _) = ext { + // pass + } else { + panic!("The error should be wrapped. {:?}", ext); + } +} + #[test] fn links() { mod test { - error_chain! {} + error_chain!{} } error_chain! { @@ -273,7 +284,7 @@ mod foreign_link_test { // signature of the public foreign_link_error_path #[derive(Debug)] pub struct ForeignError { - cause: ForeignErrorCause + cause: ForeignErrorCause, } impl ::std::error::Error for ForeignError { @@ -281,7 +292,9 @@ mod foreign_link_test { "Foreign error description" } - fn cause(&self) -> Option<&::std::error::Error> { Some(&self.cause) } + fn cause(&self) -> Option<&::std::error::Error> { + Some(&self.cause) + } } impl fmt::Display for ForeignError { @@ -298,7 +311,9 @@ mod foreign_link_test { "Foreign error cause description" } - fn cause(&self) -> Option<&::std::error::Error> { None } + fn cause(&self) -> Option<&::std::error::Error> { + None + } } impl fmt::Display for ForeignErrorCause { @@ -322,43 +337,32 @@ mod foreign_link_test { #[test] fn display_underlying_error() { let chained_error = try_foreign_error().err().unwrap(); - assert_eq!( - format!("{}", ForeignError{ cause: ForeignErrorCause{} }), - format!("{}", chained_error) - ); + assert_eq!(format!("{}", ForeignError { cause: ForeignErrorCause {} }), + format!("{}", chained_error)); } #[test] fn finds_cause() { let chained_error = try_foreign_error().err().unwrap(); - assert_eq!( - format!("{}", ForeignErrorCause{}), - format!("{}", ::std::error::Error::cause(&chained_error).unwrap()) - ); + assert_eq!(format!("{}", ForeignErrorCause {}), + format!("{}", ::std::error::Error::cause(&chained_error).unwrap())); } #[test] fn iterates() { let chained_error = try_foreign_error().err().unwrap(); let mut error_iter = chained_error.iter(); - assert_eq!( - format!("{}", ForeignError{ cause: ForeignErrorCause{} }), - format!("{}", error_iter.next().unwrap()) - ); - assert_eq!( - format!("{}", ForeignErrorCause{}), - format!("{}", error_iter.next().unwrap()) - ); - assert_eq!( - format!("{:?}", None as Option<&::std::error::Error>), - format!("{:?}", error_iter.next()) - ); + assert!(!format!("{:?}", error_iter).is_empty()); + assert_eq!(format!("{}", ForeignError { cause: ForeignErrorCause {} }), + format!("{}", error_iter.next().unwrap())); + assert_eq!(format!("{}", ForeignErrorCause {}), + format!("{}", error_iter.next().unwrap())); + assert_eq!(format!("{:?}", None as Option<&::std::error::Error>), + format!("{:?}", error_iter.next())); } fn try_foreign_error() -> Result<()> { - try!(Err(ForeignError{ - cause: ForeignErrorCause{} - })); + Err(ForeignError { cause: ForeignErrorCause {} })?; Ok(()) } } @@ -370,9 +374,7 @@ mod attributes_test { #[cfg(not(test))] mod inner { - error_chain! { - - } + error_chain!{} } error_chain! { @@ -420,7 +422,7 @@ fn without_result() { #[test] fn documentation() { mod inner { - error_chain! {} + error_chain!{} } error_chain! { @@ -444,13 +446,13 @@ mod multiple_error_same_mod { MyError, MyErrorKind, MyResultExt, MyResult; } } - error_chain! {} + error_chain!{} } #[doc(test)] #[deny(dead_code)] mod allow_dead_code { - error_chain! {} + error_chain!{} } // Make sure links actually work! @@ -483,8 +485,8 @@ fn error_patterns() { // Tuples look nice when matching errors match Error::from("Test") { - Error(ErrorKind::Msg(_), _) => { - } + Error(ErrorKind::Msg(_), _) => {}, + _ => {}, } } @@ -559,7 +561,7 @@ fn types_declarations() { #[test] /// Calling chain_err over a `Result` containing an error to get a chained error -//// and constructing a MyError directly, passing it an error should be equivalent. +/// and constructing a MyError directly, passing it an error should be equivalent. fn rewrapping() { use std::env::VarError::{self, NotPresent, NotUnicode}; @@ -587,9 +589,40 @@ fn rewrapping() { NotUnicode(_) => Err(e).chain_err(|| "env var was bork文字化ã"), }); - assert_eq!( - format!("{}", our_error_a.unwrap_err()), - format!("{}", our_error_b.unwrap_err()) - ); + assert_eq!(format!("{}", our_error_a.unwrap_err()), + format!("{}", our_error_b.unwrap_err())); + +} +#[test] +fn comma_in_errors_impl() { + error_chain! { + links { } + + foreign_links { } + + errors { + HttpStatus(e: u32) { + description("http request returned an unsuccessful status code"), + display("http request returned an unsuccessful status code: {}", e) + } + } + }; +} + + +#[test] +fn trailing_comma_in_errors_impl() { + error_chain! { + links { } + + foreign_links { } + + errors { + HttpStatus(e: u32) { + description("http request returned an unsuccessful status code"), + display("http request returned an unsuccessful status code: {}", e), + } + } + }; } diff --git a/src/vendor/filetime/.cargo-checksum.json b/src/vendor/filetime/.cargo-checksum.json index 674ae31b29..5c4fe3fe7b 100644 --- a/src/vendor/filetime/.cargo-checksum.json +++ b/src/vendor/filetime/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"c8cfe2c700e7b1d6500d0ad8084694be7009095e9572aaf54bf695c1fe7822d6","Cargo.toml":"4e414fe72ef2afcae81fb5a89f39e59ec40844272b589381746623f612333305","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"fef1998633eb2f460e6b12bc1133a21f5674e0b53ae5914ba1e53f1b63a185c3","appveyor.yml":"da991211b72fa6f231af7adb84c9fb72f5a9131d1c0a3d47b8ceffe5a82c8542","src/lib.rs":"8fa03e69ab113e5a30c742f60b6beddc0b77ef41a1eb45e82f9df867c9265815"},"package":"5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"fed0342a81e321ed9d4189905a9987d60d5b83b3c097dd3dc62d81461c8b65d8","Cargo.toml":"34c5b16ec63d0079f74203650c1766d928384f16801472f0e19912dee8e3fd9e","Cargo.toml.orig":"6ed10b4b57d08fa891d9c8d8579509107b2046965db9c76145b6ede345c9a570","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"fef1998633eb2f460e6b12bc1133a21f5674e0b53ae5914ba1e53f1b63a185c3","appveyor.yml":"3a74394c3e3ef3b2c7d9458f526e47587977e98d026b63f00be29479075ff597","src/lib.rs":"12dff581c45905398c5eab919cda969d811d1e56a486aee13b12027d7840a5f7","src/redox.rs":"7b6376069ea06e6593058699af40fa780c3d621c010d31bfa6c1bfae18de28f4","src/unix.rs":"943587ee42bfcb684cb0abf8567d478dc21006e01f96fb86c76e5d0e2b7932dd","src/windows.rs":"37dd08863698a5c32c103374ad1ed776405b75038c88bc48b7db28ad9c5bf86b"},"package":"6ab199bf38537c6f38792669e081e0bb278b9b7405bba2642e4e5d15bf732c0e"} \ No newline at end of file diff --git a/src/vendor/filetime/.travis.yml b/src/vendor/filetime/.travis.yml index 001cdd259e..ba7721842c 100644 --- a/src/vendor/filetime/.travis.yml +++ b/src/vendor/filetime/.travis.yml @@ -4,23 +4,23 @@ rust: - beta - nightly sudo: false +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH script: - cargo build --verbose - cargo test --verbose - cargo doc --no-deps -after_success: | - [ $TRAVIS_BRANCH = master ] && - [ $TRAVIS_PULL_REQUEST = false ] && - echo '' > target/doc/index.html && - pip install ghp-import --user $USER && - $HOME/.local/bin/ghp-import -n target/doc && - git push -qf https://${TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages +after_success: + - travis-cargo --only nightly doc-upload +env: + global: + secure: "MIaQyJIgy7VUoYHY7KbYtxpN/t+a2eWk8PihYctz+F+1PANgD6KbgbF9JW4ip5MYaehk7UWs++kPkHiAvJFKWo2BnQswc4digTUa9Vc2n8/C0d0Ec2zn1EUXH4fkeaMvIffn8QQUPwuNdE6khifSfG8Y42siqJF4zkuPKwaLrUo=" + + + notifications: email: on_success: never -env: - global: - secure: dsIj09BQvGF872zKmqzG+WwCl7gfqwsnxcm3GZlAMgyLYm4juvHOwCRhIERCN3BCxPvdlSRKhe9Rwmp1RkiKuqTK3ITUTAy29Maf2vuL1T+zcdpZE0t6JSCU1gbEwzCA2foB1jzgy7Q47EzeJusmGNwibscjYmXKlH6JCFwTobM= os: - linux - osx diff --git a/src/vendor/filetime/Cargo.toml b/src/vendor/filetime/Cargo.toml index 971eaf6014..2cb3dd0cc3 100644 --- a/src/vendor/filetime/Cargo.toml +++ b/src/vendor/filetime/Cargo.toml @@ -1,19 +1,29 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] name = "filetime" +version = "0.1.12" authors = ["Alex Crichton "] -version = "0.1.10" -license = "MIT/Apache-2.0" +description = "Platform-agnostic accessors of timestamps in File metadata\n" +homepage = "https://github.com/alexcrichton/filetime" +documentation = "http://alexcrichton.com/filetime" readme = "README.md" keywords = ["timestamp", "mtime"] +license = "MIT/Apache-2.0" repository = "https://github.com/alexcrichton/filetime" -homepage = "https://github.com/alexcrichton/filetime" -documentation = "http://alexcrichton.com/filetime" -description = """ -Platform-agnostic accessors of timestamps in File metadata -""" - -[dependencies] -libc = "0.2" - -[dev-dependencies] -tempdir = "0.3" +[dev-dependencies.tempdir] +version = "0.3" +[target."cfg(target_os = \"redox\")".dependencies.redox_syscall] +version = "0.1" +[target."cfg(unix)".dependencies.libc] +version = "0.2" diff --git a/src/vendor/filetime/Cargo.toml.orig b/src/vendor/filetime/Cargo.toml.orig new file mode 100644 index 0000000000..33a92ed312 --- /dev/null +++ b/src/vendor/filetime/Cargo.toml.orig @@ -0,0 +1,22 @@ +[package] +name = "filetime" +authors = ["Alex Crichton "] +version = "0.1.12" +license = "MIT/Apache-2.0" +readme = "README.md" +keywords = ["timestamp", "mtime"] +repository = "https://github.com/alexcrichton/filetime" +homepage = "https://github.com/alexcrichton/filetime" +documentation = "http://alexcrichton.com/filetime" +description = """ +Platform-agnostic accessors of timestamps in File metadata +""" + +[target.'cfg(unix)'.dependencies] +libc = "0.2" + +[target.'cfg(target_os = "redox")'.dependencies] +redox_syscall = "0.1" + +[dev-dependencies] +tempdir = "0.3" diff --git a/src/vendor/filetime/appveyor.yml b/src/vendor/filetime/appveyor.yml index 6a1b8dc19c..4a6104291e 100644 --- a/src/vendor/filetime/appveyor.yml +++ b/src/vendor/filetime/appveyor.yml @@ -14,4 +14,4 @@ install: build: false test_script: - - cargo test --verbose + - cargo test --verbose --target %TARGET% diff --git a/src/vendor/filetime/src/lib.rs b/src/vendor/filetime/src/lib.rs index aa6bec1dfe..dff3a2de98 100644 --- a/src/vendor/filetime/src/lib.rs +++ b/src/vendor/filetime/src/lib.rs @@ -5,7 +5,7 @@ //! //! # Installation //! -//! Add this to you `Cargo.toml`: +//! Add this to your `Cargo.toml`: //! //! ```toml //! [dependencies] @@ -34,16 +34,23 @@ //! println!("{}", mtime.seconds()); //! ``` -extern crate libc; - -#[cfg(unix)] use std::os::unix::prelude::*; -#[cfg(windows)] use std::os::windows::prelude::*; - use std::fmt; use std::fs; use std::io; use std::path::Path; +#[cfg(all(unix, not(target_os = "redox")))] +#[path = "unix.rs"] +mod imp; + +#[cfg(target_os = "redox")] +#[path = "redox.rs"] +mod imp; + +#[cfg(target_os = "windows")] +#[path = "windows.rs"] +mod imp; + /// A helper structure to represent a timestamp for a file. /// /// The actual value contined within is platform-specific and does not have the @@ -83,15 +90,7 @@ impl FileTime { /// The returned value corresponds to the `mtime` field of `stat` on Unix /// platforms and the `ftLastWriteTime` field on Windows platforms. pub fn from_last_modification_time(meta: &fs::Metadata) -> FileTime { - #[cfg(unix)] - fn imp(meta: &fs::Metadata) -> FileTime { - FileTime::from_os_repr(meta.mtime() as u64, meta.mtime_nsec() as u32) - } - #[cfg(windows)] - fn imp(meta: &fs::Metadata) -> FileTime { - FileTime::from_os_repr(meta.last_write_time()) - } - imp(meta) + imp::from_last_modification_time(meta) } /// Creates a new timestamp from the last access time listed in the @@ -100,15 +99,7 @@ impl FileTime { /// The returned value corresponds to the `atime` field of `stat` on Unix /// platforms and the `ftLastAccessTime` field on Windows platforms. pub fn from_last_access_time(meta: &fs::Metadata) -> FileTime { - #[cfg(unix)] - fn imp(meta: &fs::Metadata) -> FileTime { - FileTime::from_os_repr(meta.atime() as u64, meta.atime_nsec() as u32) - } - #[cfg(windows)] - fn imp(meta: &fs::Metadata) -> FileTime { - FileTime::from_os_repr(meta.last_access_time()) - } - imp(meta) + imp::from_last_access_time(meta) } /// Creates a new timestamp from the creation time listed in the specified @@ -119,55 +110,7 @@ impl FileTime { /// that not all Unix platforms have this field available and may return /// `None` in some circumstances. pub fn from_creation_time(meta: &fs::Metadata) -> Option { - macro_rules! birthtim { - ($(($e:expr, $i:ident)),*) => { - #[cfg(any($(target_os = $e),*))] - fn imp(meta: &fs::Metadata) -> Option { - $( - #[cfg(target_os = $e)] - use std::os::$i::fs::MetadataExt; - )* - let raw = meta.as_raw_stat(); - Some(FileTime::from_os_repr(raw.st_birthtime as u64, - raw.st_birthtime_nsec as u32)) - } - - #[cfg(all(not(windows), - $(not(target_os = $e)),*))] - fn imp(_meta: &fs::Metadata) -> Option { - None - } - } - } - - birthtim! { - ("bitrig", bitrig), - ("freebsd", freebsd), - ("ios", ios), - ("macos", macos), - ("openbsd", openbsd) - } - - #[cfg(windows)] - fn imp(meta: &fs::Metadata) -> Option { - Some(FileTime::from_os_repr(meta.last_access_time())) - } - imp(meta) - } - - #[cfg(windows)] - fn from_os_repr(time: u64) -> FileTime { - // Windows write times are in 100ns intervals, so do a little math to - // get it into the right representation. - FileTime { - seconds: time / (1_000_000_000 / 100), - nanos: ((time % (1_000_000_000 / 100)) * 100) as u32, - } - } - - #[cfg(unix)] - fn from_os_repr(seconds: u64, nanos: u32) -> FileTime { - FileTime { seconds: seconds, nanos: nanos } + imp::from_creation_time(meta) } /// Returns the whole number of seconds represented by this timestamp. @@ -205,84 +148,51 @@ impl fmt::Display for FileTime { /// This function will set the `atime` and `mtime` metadata fields for a file /// on the local filesystem, returning any error encountered. pub fn set_file_times

(p: P, atime: FileTime, mtime: FileTime) - -> io::Result<()> where P: AsRef { - set_file_times_(p.as_ref(), atime, mtime) + -> io::Result<()> + where P: AsRef +{ + imp::set_file_times(p.as_ref(), atime, mtime) } -#[cfg(unix)] -fn set_file_times_(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { - use std::ffi::CString; - use libc::{timeval, time_t, suseconds_t, utimes}; - - let times = [to_timeval(&atime), to_timeval(&mtime)]; - let p = try!(CString::new(p.as_os_str().as_bytes())); - return unsafe { - if utimes(p.as_ptr() as *const _, times.as_ptr()) == 0 { - Ok(()) - } else { - Err(io::Error::last_os_error()) - } - }; - - fn to_timeval(ft: &FileTime) -> timeval { - timeval { - tv_sec: ft.seconds() as time_t, - tv_usec: (ft.nanoseconds() / 1000) as suseconds_t, - } - } -} - -#[cfg(windows)] -#[allow(bad_style)] -fn set_file_times_(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { - use std::fs::OpenOptions; - - type BOOL = i32; - type HANDLE = *mut u8; - type DWORD = u32; - #[repr(C)] - struct FILETIME { - dwLowDateTime: u32, - dwHighDateTime: u32, - } - extern "system" { - fn SetFileTime(hFile: HANDLE, - lpCreationTime: *const FILETIME, - lpLastAccessTime: *const FILETIME, - lpLastWriteTime: *const FILETIME) -> BOOL; - } - - let f = try!(OpenOptions::new().write(true).open(p)); - let atime = to_filetime(&atime); - let mtime = to_filetime(&mtime); - return unsafe { - let ret = SetFileTime(f.as_raw_handle() as *mut _, - 0 as *const _, - &atime, &mtime); - if ret != 0 { - Ok(()) - } else { - Err(io::Error::last_os_error()) - } - }; - - fn to_filetime(ft: &FileTime) -> FILETIME { - let intervals = ft.seconds() * (1_000_000_000 / 100) + - ((ft.nanoseconds() as u64) / 100); - FILETIME { - dwLowDateTime: intervals as DWORD, - dwHighDateTime: (intervals >> 32) as DWORD, - } - } +/// Set the last access and modification times for a file on the filesystem. +/// This function does not follow symlink. +/// +/// This function will set the `atime` and `mtime` metadata fields for a file +/// on the local filesystem, returning any error encountered. +pub fn set_symlink_file_times

(p: P, atime: FileTime, mtime: FileTime) + -> io::Result<()> + where P: AsRef +{ + imp::set_symlink_file_times(p.as_ref(), atime, mtime) } #[cfg(test)] mod tests { extern crate tempdir; + use std::io; + use std::path::Path; use std::fs::{self, File}; use self::tempdir::TempDir; - use super::{FileTime, set_file_times}; + use super::{FileTime, set_file_times, set_symlink_file_times}; + + #[cfg(unix)] + fn make_symlink(src: P, dst: Q) -> io::Result<()> + where P: AsRef, + Q: AsRef, + { + use std::os::unix::fs::symlink; + symlink(src, dst) + } + + #[cfg(windows)] + fn make_symlink(src: P, dst: Q) -> io::Result<()> + where P: AsRef, + Q: AsRef, + { + use std::os::windows::fs::symlink_file; + symlink_file(src, dst) + } #[test] fn set_file_times_test() { @@ -301,5 +211,72 @@ mod tests { let metadata = fs::metadata(&path).unwrap(); let mtime = FileTime::from_last_modification_time(&metadata); assert_eq!(mtime, new_mtime); + + let spath = td.path().join("bar.txt"); + make_symlink(&path, &spath).unwrap(); + let metadata = fs::symlink_metadata(&spath).unwrap(); + let smtime = FileTime::from_last_modification_time(&metadata); + + set_file_times(&spath, atime, mtime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let cur_mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, cur_mtime); + + let metadata = fs::symlink_metadata(&spath).unwrap(); + let cur_mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(smtime, cur_mtime); + + set_file_times(&spath, atime, new_mtime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let metadata = fs::symlink_metadata(&spath).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, smtime); + } + + #[test] + fn set_symlink_file_times_test() { + let td = TempDir::new("filetime").unwrap(); + let path = td.path().join("foo.txt"); + File::create(&path).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + let atime = FileTime::from_last_access_time(&metadata); + set_symlink_file_times(&path, atime, mtime).unwrap(); + + let new_mtime = FileTime::from_seconds_since_1970(10_000, 0); + set_symlink_file_times(&path, atime, new_mtime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let spath = td.path().join("bar.txt"); + make_symlink(&path, &spath).unwrap(); + + let metadata = fs::symlink_metadata(&spath).unwrap(); + let smtime = FileTime::from_last_modification_time(&metadata); + let satime = FileTime::from_last_access_time(&metadata); + set_symlink_file_times(&spath, smtime, satime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let new_smtime = FileTime::from_seconds_since_1970(20_000, 0); + set_symlink_file_times(&spath, atime, new_smtime).unwrap(); + + let metadata = fs::metadata(&spath).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let metadata = fs::symlink_metadata(&spath).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_smtime); } } diff --git a/src/vendor/filetime/src/redox.rs b/src/vendor/filetime/src/redox.rs new file mode 100644 index 0000000000..d4d16d51e1 --- /dev/null +++ b/src/vendor/filetime/src/redox.rs @@ -0,0 +1,57 @@ +extern crate syscall; + +use std::fs; +use std::io; +use std::os::unix::prelude::*; +use std::path::Path; + +use FileTime; + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + let fd = syscall::open(p.as_os_str().as_bytes(), 0) + .map_err(|err| io::Error::from_raw_os_error(err.errno))?; + set_file_times_redox(fd, atime, mtime) +} + +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + let fd = syscall::open(p.as_os_str().as_bytes(), syscall::O_NOFOLLOW) + .map_err(|err| io::Error::from_raw_os_error(err.errno))?; + set_file_times_redox(fd, atime, mtime) +} + +fn set_file_times_redox(fd: usize, atime: FileTime, mtime: FileTime) -> io::Result<()> { + use self::syscall::TimeSpec; + + fn to_timespec(ft: &FileTime) -> TimeSpec { + TimeSpec { + tv_sec: ft.seconds() as i64, + tv_nsec: ft.nanoseconds() as i32 + } + } + + let times = [to_timespec(&atime), to_timespec(&mtime)]; + let res = syscall::futimens(fd, ×); + let _ = syscall::close(fd); + match res { + Ok(_) => Ok(()), + Err(err) => Err(io::Error::from_raw_os_error(err.errno)) + } +} + +pub fn from_last_modification_time(meta: &fs::Metadata) -> FileTime { + FileTime { + seconds: meta.mtime() as u64, + nanos: meta.mtime_nsec() as u32, + } +} + +pub fn from_last_access_time(meta: &fs::Metadata) -> FileTime { + FileTime { + seconds: meta.atime() as u64, + nanos: meta.atime_nsec() as u32, + } +} + +pub fn from_creation_time(_meta: &fs::Metadata) -> Option { + None +} diff --git a/src/vendor/filetime/src/unix.rs b/src/vendor/filetime/src/unix.rs new file mode 100644 index 0000000000..41e9ccac15 --- /dev/null +++ b/src/vendor/filetime/src/unix.rs @@ -0,0 +1,94 @@ +extern crate libc; + +use std::ffi::CString; +use std::fs; +use std::io; +use std::os::unix::prelude::*; +use std::path::Path; + +use self::libc::{c_int, c_char, timeval, time_t, suseconds_t}; + +use FileTime; + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_file_times_u(p, atime, mtime, libc::utimes) +} + +#[cfg(target_os = "android")] +pub fn set_symlink_file_times(_p: &Path, _atime: FileTime, _mtime: FileTime) -> io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "not supported on Android")) +} + +#[cfg(not(target_os = "android"))] +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_file_times_u(p, atime, mtime, libc::lutimes) +} + +fn set_file_times_u(p: &Path, + atime: FileTime, + mtime: FileTime, + utimes: unsafe extern fn(*const c_char, *const timeval) -> c_int) + -> io::Result<()> +{ + let times = [to_timeval(&atime), to_timeval(&mtime)]; + let p = try!(CString::new(p.as_os_str().as_bytes())); + return if unsafe { utimes(p.as_ptr() as *const _, times.as_ptr()) == 0 } { + Ok(()) + } else { + Err(io::Error::last_os_error()) + }; + + fn to_timeval(ft: &FileTime) -> timeval { + timeval { + tv_sec: ft.seconds() as time_t, + tv_usec: (ft.nanoseconds() / 1000) as suseconds_t, + } + } +} + +pub fn from_last_modification_time(meta: &fs::Metadata) -> FileTime { + FileTime { + seconds: meta.mtime() as u64, + nanos: meta.mtime_nsec() as u32, + } +} + +pub fn from_last_access_time(meta: &fs::Metadata) -> FileTime { + FileTime { + seconds: meta.atime() as u64, + nanos: meta.atime_nsec() as u32, + } +} + +pub fn from_creation_time(meta: &fs::Metadata) -> Option { + macro_rules! birthtim { + ($(($e:expr, $i:ident)),*) => { + #[cfg(any($(target_os = $e),*))] + fn imp(meta: &fs::Metadata) -> Option { + $( + #[cfg(target_os = $e)] + use std::os::$i::fs::MetadataExt; + )* + Some(FileTime { + seconds: meta.st_birthtime() as u64, + nanos: meta.st_birthtime_nsec() as u32, + }) + } + + #[cfg(all($(not(target_os = $e)),*))] + fn imp(_meta: &fs::Metadata) -> Option { + None + } + } + } + + birthtim! { + ("bitrig", bitrig), + ("freebsd", freebsd), + ("ios", ios), + ("macos", macos), + ("openbsd", openbsd) + } + + imp(meta) +} diff --git a/src/vendor/filetime/src/windows.rs b/src/vendor/filetime/src/windows.rs new file mode 100644 index 0000000000..f3eb2bf449 --- /dev/null +++ b/src/vendor/filetime/src/windows.rs @@ -0,0 +1,87 @@ +#![allow(bad_style)] + +use std::fs::{self, OpenOptions}; +use std::io; +use std::os::windows::prelude::*; +use std::path::Path; + +use FileTime; + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_file_times_w(p, atime, mtime, OpenOptions::new()) +} + +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + use std::os::windows::fs::OpenOptionsExt; + const FILE_FLAG_OPEN_REPARSE_POINT: u32 = 0x00200000; + + let mut options = OpenOptions::new(); + options.custom_flags(FILE_FLAG_OPEN_REPARSE_POINT); + set_file_times_w(p, atime, mtime, options) +} + +pub fn set_file_times_w(p: &Path, + atime: FileTime, + mtime: FileTime, + mut options: OpenOptions) -> io::Result<()> { + type BOOL = i32; + type HANDLE = *mut u8; + type DWORD = u32; + + #[repr(C)] + struct FILETIME { + dwLowDateTime: u32, + dwHighDateTime: u32, + } + + extern "system" { + fn SetFileTime(hFile: HANDLE, + lpCreationTime: *const FILETIME, + lpLastAccessTime: *const FILETIME, + lpLastWriteTime: *const FILETIME) -> BOOL; + } + + let f = try!(options.write(true).open(p)); + let atime = to_filetime(&atime); + let mtime = to_filetime(&mtime); + return unsafe { + let ret = SetFileTime(f.as_raw_handle() as *mut _, + 0 as *const _, + &atime, &mtime); + if ret != 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } + }; + + fn to_filetime(ft: &FileTime) -> FILETIME { + let intervals = ft.seconds() * (1_000_000_000 / 100) + + ((ft.nanoseconds() as u64) / 100); + FILETIME { + dwLowDateTime: intervals as DWORD, + dwHighDateTime: (intervals >> 32) as DWORD, + } + } +} + +pub fn from_last_modification_time(meta: &fs::Metadata) -> FileTime { + from_intervals(meta.last_write_time()) +} + +pub fn from_last_access_time(meta: &fs::Metadata) -> FileTime { + from_intervals(meta.last_access_time()) +} + +pub fn from_creation_time(meta: &fs::Metadata) -> Option { + Some(from_intervals(meta.creation_time())) +} + +fn from_intervals(ticks: u64) -> FileTime { + // Windows write times are in 100ns intervals, so do a little math to + // get it into the right representation. + FileTime { + seconds: ticks / (1_000_000_000 / 100), + nanos: ((ticks % (1_000_000_000 / 100)) * 100) as u32, + } +} diff --git a/src/vendor/flate2/.cargo-checksum.json b/src/vendor/flate2/.cargo-checksum.json index 876c967457..4cac1510d4 100644 --- a/src/vendor/flate2/.cargo-checksum.json +++ b/src/vendor/flate2/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"e33073c310c4bc45c4017b793e710c689fbf4fcbd618b3dee92a6343364ece0f","Cargo.toml":"7cccd970edc30578b4fa9fb27762ec77fbd31df0281d0c1d0a69ea16896216eb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"b875d1d0ca491813f3a2469ea69d37dd667ad0c70f895e6bbda540c99e291154","appveyor.yml":"a39fd7664bf538147bfb494b9f9c96d6f898d9896a5d5e1806019f399cc44bff","src/bufreader.rs":"0a1213858056c8154066f45df7b261c052c6a2c55ec88bc21f56ad3f2748d8c4","src/crc.rs":"869b4cc79a61b6dda02253430c4ca09fd1efee51509a7552bdc0c8dc5bd2b6bb","src/deflate.rs":"4783d36a1a323c0004f2ebd3af7f6ac9c949cd1e7dc5fc54eacdc5d944098271","src/ffi.rs":"7bd962e85dc05b18c878dc3d5abb054f2c41f7b20a793d2ddbba755b7661fcfa","src/gz.rs":"d4fae4fbcf17b7ad7b233eee2027cfbeb7ed138b1efdf64068fdbcc7e1ade26e","src/lib.rs":"572a609457d29bc93340d65f561454c07d3638b4fb741906d1949232d410dc73","src/mem.rs":"51fa61421f1b235ace54596b3a0e4dcd2a67324dcd16d6552d7d6a7368935e50","src/zio.rs":"1d1fc161e40939b1f84da98a433d3b0adbf9e29baee578042c2919b123d0b66d","src/zlib.rs":"ecc52ab6b8034f0fdb89d3a01b75cc181160e44225160800448d183fed4ef2ee","tests/corrupt-file.gz":"083dd284aa1621916a2d0f66ea048c8d3ba7a722b22d0d618722633f51e7d39c","tests/good-file.gz":"87296963e53024a74752179ce7e54087565d358a85d3e65c3b37ef36eaa3d4a6","tests/good-file.txt":"bc4e03658a441fe2ad2df7cd2197144b87e41696f01e327b380e869cd9b485a0","tests/gunzip.rs":"3d2e0a80756474dc2b08f20071685117083765c4f1763456c676f1feeaff35e9","tests/multi.gz":"efa3341da052f95056314cc6920e02a3da15bdef30234b2552fb407812db5cc6","tests/multi.txt":"dbea9325179efe46ea2add94f7b6b745ca983fabb208dc6d34aa064623d7ee23","tests/tokio.rs":"d19defd6c369877f406ed8bd74683a257fde7de51f4161004cea4260faf4464d"},"package":"36df0166e856739905cd3d7e0b210fe818592211a008862599845e012d8d304c"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"98cf8c8df11438fb438cfe8226d1123bce6680832a4accf91f83614a10363466","Cargo.toml":"b143aa0bd29dbdd83e67e0d28f91f398e0a006d41a899a088c1b560af2bad5a8","Cargo.toml.orig":"edbd2097e392206d1175a453fc0932ea4431179b3ec78565a4c9c49e668c985b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"2accc2c811f3cfa17753c9cbe1cb9856f6a5f52705dafc355e1d5494d2a81e2f","appveyor.yml":"7293e690b6b1fb75cbab1f72ffc5077dd145c806282969b595cc1a82ce1a2e62","examples/deflatedecoder-bufread.rs":"9066719a7b91c57792c1e305a808c12727c93938e936ac2ae16b0e174b82138f","examples/deflatedecoder-read.rs":"39ee43782a5bee7f808da7d32e1b5cd8b51e8be5e344ac11ad9be8a300bc7c85","examples/deflatedecoder-write.rs":"fd084e864a6f6ea6597b8baa2c6f2ab59c2d07da1c3aeb094ec4391c0266d0ad","examples/deflateencoder-bufread.rs":"9c1bd6d215095e338c5a6fc2b3c3b4685f836cae8f102a229d8571c17e2ece6a","examples/deflateencoder-read.rs":"805b677b584c3209ba46e7b4e9706e93d15ebe5f26e086760931fdecef14aeff","examples/deflateencoder-write.rs":"fa402135fa3d009aba4d492ff1287dc2a7cfd745c6fff37d3eb12fef924bf7f4","examples/flatereadext.rs":"656b9bc5352d30a4e6ba7d33a187bc4c1adc28ccb4bd7bffd67fa74c75da3a95","examples/gzbuilder.rs":"2bccaf7f3fb81d1251f410323a3f9a98d838754ce3d370e16ed15c97be5d91bc","examples/gzdecoder-bufread.rs":"76b208873eb2461d213cdf8b20c4062c8fc5dd17a874df12f120415b43d81da3","examples/gzdecoder-read.rs":"e90f4b05312341bdd3b9995b919903efd5be75a7815473d472c1a494446acac2","examples/gzencoder-bufread.rs":"d403cb34700a0c654e11d111e4f862b7a0ae88d05df39a2c8ecd163fe3bcf2f0","examples/gzencoder-read.rs":"c97c87b059da9c46e0d0ddf06ea4db7c22a3dac9e699ab81dbb2d4a8b8681d3a","examples/gzencoder-write.rs":"5ff4f9ea1f9facad50d08ef1687bb785b2f657d6897b7253cd1af32e912e6284","examples/gzmultidecoder-bufread.rs":"23a92037081675fbab811b1d1ac1037130f4f8a4f2f0642c0bf063da835c0c94","examples/gzmultidecoder-read.rs":"d34e50a1fcd5d6c8c25cb203561fd44f698679ba624fe1ecfbfe7b67adff40ba","examples/hello_world.txt":"d2a84f4b8b650937ec8f73cd8be2c74add5a911ba64df27458ed8229da804a26","examples/zlibdecoder-bufread.rs":"b0d5646607d29a3b6d1add6ce002fab781a55463e9bb749ffff22492e2be0618","examples/zlibdecoder-read.rs":"5f5497f7534750229c182a90e17f2522f46ded904ebf942169fcc86fc2503abd","examples/zlibdecoder-write.rs":"755f6beec5d25874611101d6785b4245f405364e21e45f54ff6915d11a563f53","examples/zlibencoder-bufread.rs":"e7ab4059b98c583382a9d8ec21210329dd150c534af864d23e6beed58bd94ac1","examples/zlibencoder-read.rs":"61853ef3c41e94f45eecedbb90964175ce13390e1a5a01c13326af474714e962","examples/zlibencoder-write.rs":"ec2b7bdacc5b517029c1bd7a3ce6fd399a8f760503a98e9d31dea50f69296e0a","src/bufreader.rs":"ebfbaf104b292a64e8b46076c1bbb680c2941753c02928e85cb5215897891d4a","src/crc.rs":"fc9147392c4b87255950e9bb65e75f1ddebe3d45bfc985980a3937e80f345aab","src/deflate/bufread.rs":"9bbf8b1111f112db2a6bed9b797b2acf055611f605294be29010b8fb7cb67ada","src/deflate/mod.rs":"1f8280ea4ab35c91f886b0373eacbc89da89d53876d4473aa4ab32a07dd098aa","src/deflate/read.rs":"88c2ac0e393fbd8604639b4730f377eeb8797366dd729d52e14236c964af2b02","src/deflate/write.rs":"db6a4782fb4d46fbc87344e59fe8b4155522a76f8ea9d9bdea8bb1398b639b49","src/ffi.rs":"0cab9ad8d3c0c2558cc2b3e9d0faa9c7fc94a0518896fb1a130e573f72aad05e","src/gz/bufread.rs":"b9f038d1ace807e97c4408ea0d1e5bc615697b7fc87fe4dc4183d99b6270b9c2","src/gz/mod.rs":"8a59e00a571f59bab43722000de3b128d443ff869c25e8d7a7f0eaa180055be9","src/gz/read.rs":"4e0c3560de39d7481484f4df6a6f012979421e257eebce247dab5f54a49407e7","src/gz/write.rs":"f5558769630a7f8d8d61a29f8625570ee50fa898b1aa3f3b7d24fcf50ecb2736","src/lib.rs":"aca4a46ac726cce6c67bf06719722b96006faf5330a8596099a9135fa54cfde9","src/mem.rs":"d8bf64e13e0a0bce49d4e3ee2522283f2d4541e2049aa928774d964398358ea6","src/zio.rs":"1076ceab3cee8a455ef8d199f66c0d0f19b1ae512421a776aaa50c9f6bacace5","src/zlib/bufread.rs":"464083100ca54e20643c7acfb6cd6ea96c928dca42667593997aa29d9d0de823","src/zlib/mod.rs":"6c959d5f7ba78ce42a2337327e1f730b0c529b853a13fbd8a59e9ba150763fa2","src/zlib/read.rs":"e963274f6013cb81b52510e7074f5c6554954a44b0391491cb831fff8c190290","src/zlib/write.rs":"0ce320af2aad71fe7ac91ba258ca348641dd7627bb51756dfb30e2e9ca9d9ad8","tests/corrupt-file.gz":"083dd284aa1621916a2d0f66ea048c8d3ba7a722b22d0d618722633f51e7d39c","tests/early-flush.rs":"6ff08e3e16722a61d141fc503a438bbb84c5beb3bdd28bbac9dfa03d3562cc3d","tests/good-file.gz":"87296963e53024a74752179ce7e54087565d358a85d3e65c3b37ef36eaa3d4a6","tests/good-file.txt":"bc4e03658a441fe2ad2df7cd2197144b87e41696f01e327b380e869cd9b485a0","tests/gunzip.rs":"3d2e0a80756474dc2b08f20071685117083765c4f1763456c676f1feeaff35e9","tests/multi.gz":"efa3341da052f95056314cc6920e02a3da15bdef30234b2552fb407812db5cc6","tests/multi.txt":"dbea9325179efe46ea2add94f7b6b745ca983fabb208dc6d34aa064623d7ee23","tests/tokio.rs":"d19defd6c369877f406ed8bd74683a257fde7de51f4161004cea4260faf4464d","tests/zero-write.rs":"027330824a7141963657efa26d99d7503a16492ed8779d7e316e22bba0e7f190"},"package":"e6234dd4468ae5d1e2dbb06fe2b058696fdc50a339c68a393aefbf00bc81e423"} \ No newline at end of file diff --git a/src/vendor/flate2/.travis.yml b/src/vendor/flate2/.travis.yml index 5b3a7ba31a..eb722100d5 100644 --- a/src/vendor/flate2/.travis.yml +++ b/src/vendor/flate2/.travis.yml @@ -9,13 +9,13 @@ before_script: script: - export CARGO_TARGET_DIR=`pwd`/target - cargo build --verbose + - rustdoc --test README.md -L target/debug/deps --extern flate2=target/debug/libflate2.rlib - cargo test --verbose - cargo test --verbose --features zlib - cargo test --verbose --features tokio - cargo test --verbose --features 'tokio zlib' - cargo test --verbose --features zlib --no-default-features - cargo clean && cargo build - - rustdoc --test README.md -L target/debug -L target/debug/deps - cargo doc --no-deps - cargo doc --no-deps --manifest-path=miniz-sys/Cargo.toml after_success: diff --git a/src/vendor/flate2/Cargo.toml b/src/vendor/flate2/Cargo.toml index 7b4bf47268..d1d100bb67 100644 --- a/src/vendor/flate2/Cargo.toml +++ b/src/vendor/flate2/Cargo.toml @@ -1,36 +1,61 @@ -[package] +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) +[package] name = "flate2" +version = "0.2.20" authors = ["Alex Crichton "] -version = "0.2.19" -license = "MIT/Apache-2.0" +description = "Bindings to miniz.c for DEFLATE compression and decompression exposed as\nReader/Writer streams. Contains bindings for zlib, deflate, and gzip-based\nstreams.\n" +homepage = "https://github.com/alexcrichton/flate2-rs" +documentation = "https://docs.rs/flate2" readme = "README.md" keywords = ["gzip", "flate", "zlib", "encoding"] categories = ["compression", "api-bindings"] +license = "MIT/Apache-2.0" repository = "https://github.com/alexcrichton/flate2-rs" -homepage = "https://github.com/alexcrichton/flate2-rs" -documentation = "https://docs.rs/flate2" -description = """ -Bindings to miniz.c for DEFLATE compression and decompression exposed as -Reader/Writer streams. Contains bindings for zlib, deflate, and gzip-based -streams. -""" - -[workspace] - -[dependencies] -libc = "0.2" -miniz-sys = { path = "miniz-sys", version = "0.1.7", optional = true } -libz-sys = { version = "1.0", optional = true } -tokio-io = { version = "0.1", optional = true } -futures = { version = "0.1", optional = true } - -[dev-dependencies] -rand = "0.3" -quickcheck = { version = "0.4", default-features = false } -tokio-core = "0.1" +[dependencies.libc] +version = "0.2" + +[dependencies.libz-sys] +version = "1.0" +optional = true + +[dependencies.miniz-sys] +version = "0.1.7" +optional = true + +[dependencies.futures] +version = "0.1" +optional = true + +[dependencies.tokio-io] +version = "0.1" +optional = true +[dev-dependencies.rand] +version = "0.3" + +[dev-dependencies.quickcheck] +version = "0.4" +default-features = false + +[dev-dependencies.tokio-core] +version = "0.1" [features] default = ["miniz-sys"] zlib = ["libz-sys"] tokio = ["tokio-io", "futures"] +[badges.travis-ci] +repository = "alexcrichton/flate2-rs" + +[badges.appveyor] +repository = "alexcrichton/flate2-rs" diff --git a/src/vendor/flate2/Cargo.toml.orig b/src/vendor/flate2/Cargo.toml.orig new file mode 100644 index 0000000000..48731b468c --- /dev/null +++ b/src/vendor/flate2/Cargo.toml.orig @@ -0,0 +1,40 @@ +[package] + +name = "flate2" +authors = ["Alex Crichton "] +version = "0.2.20" +license = "MIT/Apache-2.0" +readme = "README.md" +keywords = ["gzip", "flate", "zlib", "encoding"] +categories = ["compression", "api-bindings"] +repository = "https://github.com/alexcrichton/flate2-rs" +homepage = "https://github.com/alexcrichton/flate2-rs" +documentation = "https://docs.rs/flate2" +description = """ +Bindings to miniz.c for DEFLATE compression and decompression exposed as +Reader/Writer streams. Contains bindings for zlib, deflate, and gzip-based +streams. +""" + +[workspace] + +[dependencies] +libc = "0.2" +miniz-sys = { path = "miniz-sys", version = "0.1.7", optional = true } +libz-sys = { version = "1.0", optional = true } +tokio-io = { version = "0.1", optional = true } +futures = { version = "0.1", optional = true } + +[dev-dependencies] +rand = "0.3" +quickcheck = { version = "0.4", default-features = false } +tokio-core = "0.1" + +[features] +default = ["miniz-sys"] +zlib = ["libz-sys"] +tokio = ["tokio-io", "futures"] + +[badges] +travis-ci = { repository = "alexcrichton/flate2-rs" } +appveyor = { repository = "alexcrichton/flate2-rs" } diff --git a/src/vendor/flate2/README.md b/src/vendor/flate2/README.md index 887f7b5d97..c8937912f6 100644 --- a/src/vendor/flate2/README.md +++ b/src/vendor/flate2/README.md @@ -2,8 +2,8 @@ [![Build Status](https://travis-ci.org/alexcrichton/flate2-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/flate2-rs) [![Build status](https://ci.appveyor.com/api/projects/status/9tatexq47i3ee13k?svg=true)](https://ci.appveyor.com/project/alexcrichton/flate2-rs) - -[Documentation](https://docs.rs/flate2) +[![Crates.io](https://img.shields.io/crates/v/flate2.svg?maxAge=2592000)](https://crates.io/crates/flate2) +[![Documentation](https://docs.rs/flate2/badge.svg)](https://docs.rs/flate2) A streaming compression/decompression library for Rust. The underlying implementation by default uses [`miniz`](https://code.google.com/p/miniz/) but diff --git a/src/vendor/flate2/appveyor.yml b/src/vendor/flate2/appveyor.yml index f5d21540d3..0a140f7bc3 100644 --- a/src/vendor/flate2/appveyor.yml +++ b/src/vendor/flate2/appveyor.yml @@ -1,13 +1,19 @@ environment: matrix: - TARGET: x86_64-pc-windows-msvc + - TARGET: x86_64-pc-windows-gnu - TARGET: i686-pc-windows-msvc - TARGET: i686-pc-windows-gnu install: + - ps: >- + If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') { + $Env:PATH += ';C:\msys64\mingw64\bin' + } ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') { + $Env:PATH += ';C:\MinGW\bin' + } - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin - - SET PATH=%PATH%;C:\MinGW\bin - rustc -V - cargo -V diff --git a/src/vendor/flate2/examples/deflatedecoder-bufread.rs b/src/vendor/flate2/examples/deflatedecoder-bufread.rs new file mode 100644 index 0000000000..117297feba --- /dev/null +++ b/src/vendor/flate2/examples/deflatedecoder-bufread.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::DeflateEncoder; +use flate2::bufread::DeflateDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = DeflateEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_reader(bytes).unwrap()); +} + +// Uncompresses a Deflate Encoded vector of bytes and returns a string or error +// Here &[u8] implements Read +fn decode_reader(bytes: Vec) -> io::Result { + let mut deflater = DeflateDecoder::new(&bytes[..]); + let mut s = String::new(); + deflater.read_to_string(&mut s)?; + Ok(s) +} diff --git a/src/vendor/flate2/examples/deflatedecoder-read.rs b/src/vendor/flate2/examples/deflatedecoder-read.rs new file mode 100644 index 0000000000..2d26857802 --- /dev/null +++ b/src/vendor/flate2/examples/deflatedecoder-read.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::DeflateEncoder; +use flate2::read::DeflateDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = DeflateEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_reader(bytes).unwrap()); +} + +// Uncompresses a Deflate Encoded vector of bytes and returns a string or error +// Here &[u8] implements Read +fn decode_reader(bytes: Vec) -> io::Result { + let mut deflater = DeflateDecoder::new(&bytes[..]); + let mut s = String::new(); + deflater.read_to_string(&mut s)?; + Ok(s) +} diff --git a/src/vendor/flate2/examples/deflatedecoder-write.rs b/src/vendor/flate2/examples/deflatedecoder-write.rs new file mode 100644 index 0000000000..46791e623b --- /dev/null +++ b/src/vendor/flate2/examples/deflatedecoder-write.rs @@ -0,0 +1,26 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::DeflateEncoder; +use flate2::write::DeflateDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = DeflateEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_reader(bytes).unwrap()); +} + +// Uncompresses a Deflate Encoded vector of bytes and returns a string or error +// Here Vec implements Write +fn decode_reader(bytes: Vec) -> io::Result { + let mut writer = Vec::new(); + let mut deflater = DeflateDecoder::new(writer); + deflater.write(&bytes[..])?; + writer = deflater.finish()?; + let return_string = String::from_utf8(writer).expect("String parsing error"); + Ok(return_string) +} diff --git a/src/vendor/flate2/examples/deflateencoder-bufread.rs b/src/vendor/flate2/examples/deflateencoder-bufread.rs new file mode 100644 index 0000000000..4ec2ed1c72 --- /dev/null +++ b/src/vendor/flate2/examples/deflateencoder-bufread.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::bufread::DeflateEncoder; +use std::fs::File; +use std::io::BufReader; + +// Open file and debug print the contents compressed with Deflate +fn main() { + println!("{:?}", open_hello_world().unwrap()); +} + +// Opens sample file, compresses the contents and returns a Vector or error +// File wrapped in a BufReader implements Bufread +fn open_hello_world() -> io::Result> { + let f = File::open("examples/hello_world.txt")?; + let b = BufReader::new(f); + let mut deflater = DeflateEncoder::new(b, Compression::Fast); + let mut buffer = Vec::new(); + deflater.read_to_end(&mut buffer)?; + Ok(buffer) +} diff --git a/src/vendor/flate2/examples/deflateencoder-read.rs b/src/vendor/flate2/examples/deflateencoder-read.rs new file mode 100644 index 0000000000..bf1bcc6a66 --- /dev/null +++ b/src/vendor/flate2/examples/deflateencoder-read.rs @@ -0,0 +1,20 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::read::DeflateEncoder; + +// Print the Deflate compressed representation of hello world +fn main() { + println!("{:?}", deflateencoder_read_hello_world().unwrap()); +} + +// Return a vector containing the Defalte compressed version of hello world +fn deflateencoder_read_hello_world() -> io::Result> { + let mut ret_vec = [0;100]; + let c = b"hello world"; + let mut deflater = DeflateEncoder::new(&c[..], Compression::Fast); + let count = deflater.read(&mut ret_vec)?; + Ok(ret_vec[0..count].to_vec()) +} diff --git a/src/vendor/flate2/examples/deflateencoder-write.rs b/src/vendor/flate2/examples/deflateencoder-write.rs new file mode 100644 index 0000000000..f511aec4b9 --- /dev/null +++ b/src/vendor/flate2/examples/deflateencoder-write.rs @@ -0,0 +1,12 @@ +extern crate flate2; + +use std::io::prelude::*; +use flate2::Compression; +use flate2::write::DeflateEncoder; + +// Vec implements Write to print the compressed bytes of sample string +fn main() { + let mut e = DeflateEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + println!("{:?}", e.finish().unwrap()); +} diff --git a/src/vendor/flate2/examples/flatereadext.rs b/src/vendor/flate2/examples/flatereadext.rs new file mode 100644 index 0000000000..a3a9b04449 --- /dev/null +++ b/src/vendor/flate2/examples/flatereadext.rs @@ -0,0 +1,22 @@ +extern crate flate2; + +use flate2::{FlateReadExt, Compression}; +use std::io::prelude::*; +use std::io; +use std::fs::File; + +fn main() { + println!("{}", run().unwrap()); +} + +fn run() -> io::Result { + let f = File::open("examples/hello_world.txt")?; + + //gz_encode method comes from FlateReadExt and applies to a std::fs::File + let data = f.gz_encode(Compression::Default); + let mut buffer = String::new(); + + //gz_decode method comes from FlateReadExt and applies to a &[u8] + &data.gz_decode()?.read_to_string(&mut buffer)?; + Ok(buffer) +} diff --git a/src/vendor/flate2/examples/gzbuilder.rs b/src/vendor/flate2/examples/gzbuilder.rs new file mode 100644 index 0000000000..a61a03f4f1 --- /dev/null +++ b/src/vendor/flate2/examples/gzbuilder.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use std::fs::File; +use flate2::GzBuilder; +use flate2::Compression; + +// Open file and debug print the contents compressed with gzip +fn main() { + sample_builder().unwrap(); +} + +// GzBuilder opens a file and writes a sample string using Builder pattern +fn sample_builder() -> Result<(), io::Error> { + let f = File::create("examples/hello_world.gz")?; + let mut gz = GzBuilder::new() + .filename("hello_world.txt") + .comment("test file, please delete") + .write(f, Compression::Default); + gz.write(b"hello world")?; + gz.finish()?; + Ok(()) +} diff --git a/src/vendor/flate2/examples/gzdecoder-bufread.rs b/src/vendor/flate2/examples/gzdecoder-bufread.rs new file mode 100644 index 0000000000..77f3251948 --- /dev/null +++ b/src/vendor/flate2/examples/gzdecoder-bufread.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::GzEncoder; +use flate2::bufread::GzDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = GzEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_reader(bytes).unwrap()); +} + +// Uncompresses a Gz Encoded vector of bytes and returns a string or error +// Here &[u8] implements BufRead +fn decode_reader(bytes: Vec) -> io::Result { + let mut gz = GzDecoder::new(&bytes[..])?; + let mut s = String::new(); + gz.read_to_string(&mut s)?; + Ok(s) +} diff --git a/src/vendor/flate2/examples/gzdecoder-read.rs b/src/vendor/flate2/examples/gzdecoder-read.rs new file mode 100644 index 0000000000..843d7bd6ad --- /dev/null +++ b/src/vendor/flate2/examples/gzdecoder-read.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::GzEncoder; +use flate2::read::GzDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = GzEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_reader(bytes).unwrap()); +} + +// Uncompresses a Gz Encoded vector of bytes and returns a string or error +// Here &[u8] implements Read +fn decode_reader(bytes: Vec) -> io::Result { + let mut gz = GzDecoder::new(&bytes[..])?; + let mut s = String::new(); + gz.read_to_string(&mut s)?; + Ok(s) +} diff --git a/src/vendor/flate2/examples/gzencoder-bufread.rs b/src/vendor/flate2/examples/gzencoder-bufread.rs new file mode 100644 index 0000000000..8b320d8137 --- /dev/null +++ b/src/vendor/flate2/examples/gzencoder-bufread.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::bufread::GzEncoder; +use std::fs::File; +use std::io::BufReader; + +// Open file and debug print the contents compressed with gzip +fn main() { + println!("{:?}", open_hello_world().unwrap()); +} + +// Opens sample file, compresses the contents and returns a Vector or error +// File wrapped in a BufReader implements Bufread +fn open_hello_world() -> io::Result> { + let f = File::open("examples/hello_world.txt")?; + let b = BufReader::new(f); + let mut gz = GzEncoder::new(b, Compression::Fast); + let mut buffer = Vec::new(); + gz.read_to_end(&mut buffer)?; + Ok(buffer) +} diff --git a/src/vendor/flate2/examples/gzencoder-read.rs b/src/vendor/flate2/examples/gzencoder-read.rs new file mode 100644 index 0000000000..65dd26bc49 --- /dev/null +++ b/src/vendor/flate2/examples/gzencoder-read.rs @@ -0,0 +1,20 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::read::GzEncoder; + +// Print the GZ compressed representation of hello world +fn main() { + println!("{:?}", gzencoder_read_hello_world().unwrap()); +} + +// Return a vector containing the GZ compressed version of hello world +fn gzencoder_read_hello_world() -> io::Result> { + let mut ret_vec = [0;100]; + let c = b"hello world"; + let mut z = GzEncoder::new(&c[..], Compression::Fast); + let count = z.read(&mut ret_vec)?; + Ok(ret_vec[0..count].to_vec()) +} diff --git a/src/vendor/flate2/examples/gzencoder-write.rs b/src/vendor/flate2/examples/gzencoder-write.rs new file mode 100644 index 0000000000..b423b52414 --- /dev/null +++ b/src/vendor/flate2/examples/gzencoder-write.rs @@ -0,0 +1,12 @@ +extern crate flate2; + +use std::io::prelude::*; +use flate2::Compression; +use flate2::write::GzEncoder; + +// Vec implements Write to print the compressed bytes of sample string +fn main() { + let mut e = GzEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + println!("{:?}", e.finish().unwrap()); +} diff --git a/src/vendor/flate2/examples/gzmultidecoder-bufread.rs b/src/vendor/flate2/examples/gzmultidecoder-bufread.rs new file mode 100644 index 0000000000..d5db3673e4 --- /dev/null +++ b/src/vendor/flate2/examples/gzmultidecoder-bufread.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::GzEncoder; +use flate2::bufread::MultiGzDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = GzEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_reader(bytes).unwrap()); +} + +// Uncompresses a Gz Encoded vector of bytes and returns a string or error +// Here &[u8] implements BufRead +fn decode_reader(bytes: Vec) -> io::Result { + let mut gz = MultiGzDecoder::new(&bytes[..])?; + let mut s = String::new(); + gz.read_to_string(&mut s)?; + Ok(s) +} diff --git a/src/vendor/flate2/examples/gzmultidecoder-read.rs b/src/vendor/flate2/examples/gzmultidecoder-read.rs new file mode 100644 index 0000000000..9263292c92 --- /dev/null +++ b/src/vendor/flate2/examples/gzmultidecoder-read.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::GzEncoder; +use flate2::read::MultiGzDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = GzEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_reader(bytes).unwrap()); +} + +// Uncompresses a Gz Encoded vector of bytes and returns a string or error +// Here &[u8] implements Read +fn decode_reader(bytes: Vec) -> io::Result { + let mut gz = MultiGzDecoder::new(&bytes[..])?; + let mut s = String::new(); + gz.read_to_string(&mut s)?; + Ok(s) +} diff --git a/src/vendor/flate2/examples/hello_world.txt b/src/vendor/flate2/examples/hello_world.txt new file mode 100644 index 0000000000..557db03de9 --- /dev/null +++ b/src/vendor/flate2/examples/hello_world.txt @@ -0,0 +1 @@ +Hello World diff --git a/src/vendor/flate2/examples/zlibdecoder-bufread.rs b/src/vendor/flate2/examples/zlibdecoder-bufread.rs new file mode 100644 index 0000000000..7216f801e8 --- /dev/null +++ b/src/vendor/flate2/examples/zlibdecoder-bufread.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::ZlibEncoder; +use flate2::bufread::ZlibDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_bufreader(bytes).unwrap()); +} + +// Uncompresses a Zlib Encoded vector of bytes and returns a string or error +// Here &[u8] implements BufRead +fn decode_bufreader(bytes: Vec) -> io::Result { + let mut z = ZlibDecoder::new(&bytes[..]); + let mut s = String::new(); + z.read_to_string(&mut s)?; + Ok(s) +} diff --git a/src/vendor/flate2/examples/zlibdecoder-read.rs b/src/vendor/flate2/examples/zlibdecoder-read.rs new file mode 100644 index 0000000000..ab82413fbd --- /dev/null +++ b/src/vendor/flate2/examples/zlibdecoder-read.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::ZlibEncoder; +use flate2::read::ZlibDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_reader(bytes).unwrap()); +} + +// Uncompresses a Zlib Encoded vector of bytes and returns a string or error +// Here &[u8] implements Read +fn decode_reader(bytes: Vec) -> io::Result { + let mut z = ZlibDecoder::new(&bytes[..]); + let mut s = String::new(); + z.read_to_string(&mut s)?; + Ok(s) +} diff --git a/src/vendor/flate2/examples/zlibdecoder-write.rs b/src/vendor/flate2/examples/zlibdecoder-write.rs new file mode 100644 index 0000000000..c70ded3c1c --- /dev/null +++ b/src/vendor/flate2/examples/zlibdecoder-write.rs @@ -0,0 +1,26 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::write::ZlibEncoder; +use flate2::write::ZlibDecoder; + +// Compress a sample string and print it after transformation. +fn main() { + let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + let bytes = e.finish().unwrap(); + println!("{}", decode_reader(bytes).unwrap()); +} + +// Uncompresses a Zlib Encoded vector of bytes and returns a string or error +// Here Vec implements Write +fn decode_reader(bytes: Vec) -> io::Result { + let mut writer = Vec::new(); + let mut z = ZlibDecoder::new(writer); + z.write(&bytes[..])?; + writer = z.finish()?; + let return_string = String::from_utf8(writer).expect("String parsing error"); + Ok(return_string) +} diff --git a/src/vendor/flate2/examples/zlibencoder-bufread.rs b/src/vendor/flate2/examples/zlibencoder-bufread.rs new file mode 100644 index 0000000000..39d93611ac --- /dev/null +++ b/src/vendor/flate2/examples/zlibencoder-bufread.rs @@ -0,0 +1,24 @@ +extern crate flate2; + +use std::io::prelude::*; +use std::io; +use flate2::Compression; +use flate2::bufread::ZlibEncoder; +use std::fs::File; +use std::io::BufReader; + +// Open file and debug print the contents compressed with zlib +fn main() { + println!("{:?}", open_hello_world().unwrap()); +} + +// Opens sample file, compresses the contents and returns a Vector or error +// File wrapped in a BufReader implements Bufread +fn open_hello_world() -> io::Result> { + let f = File::open("examples/hello_world.txt")?; + let b = BufReader::new(f); + let mut z = ZlibEncoder::new(b, Compression::Fast); + let mut buffer = Vec::new(); + z.read_to_end(&mut buffer)?; + Ok(buffer) +} diff --git a/src/vendor/flate2/examples/zlibencoder-read.rs b/src/vendor/flate2/examples/zlibencoder-read.rs new file mode 100644 index 0000000000..3aef92ca49 --- /dev/null +++ b/src/vendor/flate2/examples/zlibencoder-read.rs @@ -0,0 +1,21 @@ +extern crate flate2; + +use std::io::prelude::*; +use flate2::Compression; +use flate2::read::ZlibEncoder; +use std::fs::File; + +// Open file and debug print the compressed contents +fn main() { + println!("{:?}", open_hello_world().unwrap()); +} + +// Opens sample file, compresses the contents and returns a Vector or error +// File implements Read +fn open_hello_world() -> std::io::Result> { + let f = File::open("examples/hello_world.txt")?; + let mut z = ZlibEncoder::new(f, Compression::Fast); + let mut buffer = [0;50]; + let byte_count = z.read(&mut buffer)?; + Ok(buffer[0..byte_count].to_vec()) +} diff --git a/src/vendor/flate2/examples/zlibencoder-write.rs b/src/vendor/flate2/examples/zlibencoder-write.rs new file mode 100644 index 0000000000..9152b30eab --- /dev/null +++ b/src/vendor/flate2/examples/zlibencoder-write.rs @@ -0,0 +1,12 @@ +extern crate flate2; + +use std::io::prelude::*; +use flate2::Compression; +use flate2::write::ZlibEncoder; + +// Vec implements Write to print the compressed bytes of sample string +fn main() { + let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); + e.write(b"Hello World").unwrap(); + println!("{:?}", e.finish().unwrap()); +} diff --git a/src/vendor/flate2/src/bufreader.rs b/src/vendor/flate2/src/bufreader.rs index fbe0152e34..a4bd8c7962 100644 --- a/src/vendor/flate2/src/bufreader.rs +++ b/src/vendor/flate2/src/bufreader.rs @@ -20,6 +20,15 @@ pub struct BufReader { cap: usize, } +impl ::std::fmt::Debug for BufReader where R: ::std::fmt::Debug { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error>{ + fmt.debug_struct("BufReader") + .field("reader", &self.inner) + .field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len())) + .finish() + } +} + impl BufReader { pub fn new(inner: R) -> BufReader { BufReader::with_buf(vec![0; 32 * 1024], inner) @@ -33,7 +42,9 @@ impl BufReader { cap: 0, } } +} +impl BufReader { pub fn get_ref(&self) -> &R { &self.inner } diff --git a/src/vendor/flate2/src/crc.rs b/src/vendor/flate2/src/crc.rs index 3975222697..c6e93ac1f2 100644 --- a/src/vendor/flate2/src/crc.rs +++ b/src/vendor/flate2/src/crc.rs @@ -6,13 +6,19 @@ use libc; use ffi; -/// The CRC calculated by a CrcReader. +/// The CRC calculated by a [`CrcReader`]. +/// +/// [`CrcReader`]: struct.CrcReader.html +#[derive(Debug)] pub struct Crc { crc: libc::c_ulong, amt: u32, } -/// A wrapper around a `std::io::Read` that calculates the CRC. +/// A wrapper around a [`Read`] that calculates the CRC. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +#[derive(Debug)] pub struct CrcReader { inner: R, crc: Crc, @@ -68,7 +74,9 @@ impl CrcReader { crc: Crc::new(), } } +} +impl CrcReader { /// Get the Crc for this CrcReader. pub fn crc(&self) -> &Crc { &self.crc diff --git a/src/vendor/flate2/src/deflate.rs b/src/vendor/flate2/src/deflate.rs deleted file mode 100644 index 3b4571ab1e..0000000000 --- a/src/vendor/flate2/src/deflate.rs +++ /dev/null @@ -1,857 +0,0 @@ -//! DEFLATE compression and decompression of streams - -use std::io::prelude::*; -use std::io; -use std::mem; - -#[cfg(feature = "tokio")] -use futures::Poll; -#[cfg(feature = "tokio")] -use tokio_io::{AsyncRead, AsyncWrite}; - -use bufreader::BufReader; -use zio; -use {Compress, Decompress}; - -/// A DEFLATE encoder, or compressor. -/// -/// This structure implements a `Write` interface and takes a stream of -/// uncompressed data, writing the compressed data to the wrapped writer. -pub struct EncoderWriter { - inner: zio::Writer, -} - -/// A DEFLATE encoder, or compressor. -/// -/// This structure implements a `Read` interface and will read uncompressed -/// data from an underlying stream and emit a stream of compressed data. -pub struct EncoderReader { - inner: EncoderReaderBuf>, -} - -/// A DEFLATE encoder, or compressor. -/// -/// This structure implements a `BufRead` interface and will read uncompressed -/// data from an underlying stream and emit a stream of compressed data. -pub struct EncoderReaderBuf { - obj: R, - data: Compress, -} - -/// A DEFLATE decoder, or decompressor. -/// -/// This structure implements a `Read` interface and takes a stream of -/// compressed data as input, providing the decompressed data when read from. -pub struct DecoderReader { - inner: DecoderReaderBuf>, -} - -/// A DEFLATE decoder, or decompressor. -/// -/// This structure implements a `BufRead` interface and takes a stream of -/// compressed data as input, providing the decompressed data when read from. -pub struct DecoderReaderBuf { - obj: R, - data: Decompress, -} - -/// A DEFLATE decoder, or decompressor. -/// -/// This structure implements a `Write` and will emit a stream of decompressed -/// data when fed a stream of compressed data. -pub struct DecoderWriter { - inner: zio::Writer, -} - -impl EncoderWriter { - /// Creates a new encoder which will write compressed data to the stream - /// given at the given compression level. - /// - /// When this encoder is dropped or unwrapped the final pieces of data will - /// be flushed. - pub fn new(w: W, level: ::Compression) -> EncoderWriter { - EncoderWriter { - inner: zio::Writer::new(w, Compress::new(level, false)), - } - } - - /// Acquires a reference to the underlying writer. - pub fn get_ref(&self) -> &W { - self.inner.get_ref() - } - - /// Acquires a mutable reference to the underlying writer. - /// - /// Note that mutating the output/input state of the stream may corrupt this - /// object, so care must be taken when using this method. - pub fn get_mut(&mut self) -> &mut W { - self.inner.get_mut() - } - - /// Resets the state of this encoder entirely, swapping out the output - /// stream for another. - /// - /// This function will finish encoding the current stream into the current - /// output stream before swapping out the two output streams. If the stream - /// cannot be finished an error is returned. - /// - /// After the current stream has been finished, this will reset the internal - /// state of this encoder and replace the output stream with the one - /// provided, returning the previous output stream. Future data written to - /// this encoder will be the compressed into the stream `w` provided. - pub fn reset(&mut self, w: W) -> io::Result { - try!(self.inner.finish()); - self.inner.data.reset(); - Ok(self.inner.replace(w)) - } - - /// Attempt to finish this output stream, writing out final chunks of data. - /// - /// Note that this function can only be used once data has finished being - /// written to the output stream. After this function is called then further - /// calls to `write` may result in a panic. - /// - /// # Panics - /// - /// Attempts to write data to this stream may result in a panic after this - /// function is called. - pub fn try_finish(&mut self) -> io::Result<()> { - self.inner.finish() - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream, close off the compressed - /// stream and, if successful, return the contained writer. - /// - /// Note that this function may not be suitable to call in a situation where - /// the underlying stream is an asynchronous I/O stream. To finish a stream - /// the `try_finish` (or `shutdown`) method should be used instead. To - /// re-acquire ownership of a stream it is safe to call this method after - /// `try_finish` or `shutdown` has returned `Ok`. - pub fn finish(mut self) -> io::Result { - try!(self.inner.finish()); - Ok(self.inner.take_inner()) - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream and then return the contained - /// writer if the flush succeeded. - /// The compressed stream will not closed but only flushed. This - /// means that obtained byte array can by extended by another deflated - /// stream. To close the stream add the two bytes 0x3 and 0x0. - pub fn flush_finish(mut self) -> io::Result { - try!(self.inner.flush()); - Ok(self.inner.take_inner()) - } - - /// Returns the number of bytes that have been written to this compresor. - /// - /// Note that not all bytes written to this object may be accounted for, - /// there may still be some active buffering. - pub fn total_in(&self) -> u64 { - self.inner.data.total_in() - } - - /// Returns the number of bytes that the compressor has produced. - /// - /// Note that not all bytes may have been written yet, some may still be - /// buffered. - pub fn total_out(&self) -> u64 { - self.inner.data.total_out() - } -} - -impl Write for EncoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for EncoderWriter { - fn shutdown(&mut self) -> Poll<(), io::Error> { - try_nb!(self.inner.finish()); - self.inner.get_mut().shutdown() - } -} - -impl Read for EncoderWriter { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.get_mut().read(buf) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for EncoderWriter { -} - -impl EncoderReader { - /// Creates a new encoder which will read uncompressed data from the given - /// stream and emit the compressed stream. - pub fn new(r: R, level: ::Compression) -> EncoderReader { - EncoderReader { - inner: EncoderReaderBuf::new(BufReader::new(r), level), - } - } - - /// Resets the state of this encoder entirely, swapping out the input - /// stream for another. - /// - /// This function will reset the internal state of this encoder and replace - /// the input stream with the one provided, returning the previous input - /// stream. Future data read from this encoder will be the compressed - /// version of `r`'s data. - /// - /// Note that there may be currently buffered data when this function is - /// called, and in that case the buffered data is discarded. - pub fn reset(&mut self, r: R) -> R { - self.inner.data.reset(); - self.inner.obj.reset(r) - } - - /// Acquires a reference to the underlying reader - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this encoder, returning the underlying reader. - /// - /// Note that there may be buffered bytes which are not re-acquired as part - /// of this transition. It's recommended to only call this function after - /// EOF has been reached. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - /// Returns the number of bytes that have been read into this compressor. - /// - /// Note that not all bytes read from the underlying object may be accounted - /// for, there may still be some active buffering. - pub fn total_in(&self) -> u64 { - self.inner.data.total_in() - } - - /// Returns the number of bytes that the compressor has produced. - /// - /// Note that not all bytes may have been read yet, some may still be - /// buffered. - pub fn total_out(&self) -> u64 { - self.inner.data.total_out() - } -} - -impl Read for EncoderReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.read(buf) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for EncoderReader { -} - -impl Write for EncoderReader { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for EncoderReader { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.get_mut().shutdown() - } -} - -impl EncoderReaderBuf { - /// Creates a new encoder which will read uncompressed data from the given - /// stream and emit the compressed stream. - pub fn new(r: R, level: ::Compression) -> EncoderReaderBuf { - EncoderReaderBuf { - obj: r, - data: Compress::new(level, false), - } - } - - /// Resets the state of this encoder entirely, swapping out the input - /// stream for another. - /// - /// This function will reset the internal state of this encoder and replace - /// the input stream with the one provided, returning the previous input - /// stream. Future data read from this encoder will be the compressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.data.reset(); - mem::replace(&mut self.obj, r) - } - - /// Acquires a reference to the underlying reader - pub fn get_ref(&self) -> &R { - &self.obj - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - &mut self.obj - } - - /// Consumes this encoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.obj - } - - /// Returns the number of bytes that have been read into this compressor. - /// - /// Note that not all bytes read from the underlying object may be accounted - /// for, there may still be some active buffering. - pub fn total_in(&self) -> u64 { - self.data.total_in() - } - - /// Returns the number of bytes that the compressor has produced. - /// - /// Note that not all bytes may have been read yet, some may still be - /// buffered. - pub fn total_out(&self) -> u64 { - self.data.total_out() - } -} - -impl Read for EncoderReaderBuf { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - zio::read(&mut self.obj, &mut self.data, buf) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for EncoderReaderBuf { -} - -impl Write for EncoderReaderBuf { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for EncoderReaderBuf { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.get_mut().shutdown() - } -} - -impl DecoderReader { - /// Creates a new decoder which will decompress data read from the given - /// stream. - pub fn new(r: R) -> DecoderReader { - DecoderReader::new_with_buf(r, vec![0; 32 * 1024]) - } - - /// Same as `new`, but the intermediate buffer for data is specified. - /// - /// Note that the capacity of the intermediate buffer is never increased, - /// and it is recommended for it to be large. - pub fn new_with_buf(r: R, buf: Vec) -> DecoderReader { - DecoderReader { - inner: DecoderReaderBuf::new(BufReader::with_buf(buf, r)) - } - } - - /// Resets the state of this decoder entirely, swapping out the input - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// input stream with the one provided, returning the previous input - /// stream. Future data read from this decoder will be the decompressed - /// version of `r`'s data. - /// - /// Note that there may be currently buffered data when this function is - /// called, and in that case the buffered data is discarded. - pub fn reset(&mut self, r: R) -> R { - self.inner.data = Decompress::new(false); - self.inner.obj.reset(r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this decoder, returning the underlying reader. - /// - /// Note that there may be buffered bytes which are not re-acquired as part - /// of this transition. It's recommended to only call this function after - /// EOF has been reached. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - /// Returns the number of bytes that the decompressor has consumed. - /// - /// Note that this will likely be smaller than what the decompressor - /// actually read from the underlying stream due to buffering. - pub fn total_in(&self) -> u64 { - self.inner.total_in() - } - - /// Returns the number of bytes that the decompressor has produced. - pub fn total_out(&self) -> u64 { - self.inner.total_out() - } -} - -impl Read for DecoderReader { - fn read(&mut self, into: &mut [u8]) -> io::Result { - self.inner.read(into) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for DecoderReader { -} - -impl Write for DecoderReader { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for DecoderReader { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.get_mut().shutdown() - } -} - -impl DecoderReaderBuf { - /// Creates a new decoder which will decompress data read from the given - /// stream. - pub fn new(r: R) -> DecoderReaderBuf { - DecoderReaderBuf { - obj: r, - data: Decompress::new(false), - } - } - - /// Resets the state of this decoder entirely, swapping out the input - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// input stream with the one provided, returning the previous input - /// stream. Future data read from this decoder will be the decompressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.data = Decompress::new(false); - mem::replace(&mut self.obj, r) - } - - /// Resets the state of this decoder's data - /// - /// This will reset the internal state of this decoder. It will continue - /// reading from the same stream. - pub fn reset_data(&mut self) { - self.data = Decompress::new(false); - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - &self.obj - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - &mut self.obj - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.obj - } - - /// Returns the number of bytes that the decompressor has consumed. - /// - /// Note that this will likely be smaller than what the decompressor - /// actually read from the underlying stream due to buffering. - pub fn total_in(&self) -> u64 { - self.data.total_in() - } - - /// Returns the number of bytes that the decompressor has produced. - pub fn total_out(&self) -> u64 { - self.data.total_out() - } -} - -impl Read for DecoderReaderBuf { - fn read(&mut self, into: &mut [u8]) -> io::Result { - zio::read(&mut self.obj, &mut self.data, into) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for DecoderReaderBuf { -} - -impl Write for DecoderReaderBuf { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for DecoderReaderBuf { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.get_mut().shutdown() - } -} - -impl DecoderWriter { - /// Creates a new decoder which will write uncompressed data to the stream. - /// - /// When this encoder is dropped or unwrapped the final pieces of data will - /// be flushed. - pub fn new(w: W) -> DecoderWriter { - DecoderWriter { - inner: zio::Writer::new(w, Decompress::new(false)), - } - } - - /// Acquires a reference to the underlying writer. - pub fn get_ref(&self) -> &W { - self.inner.get_ref() - } - - /// Acquires a mutable reference to the underlying writer. - /// - /// Note that mutating the output/input state of the stream may corrupt this - /// object, so care must be taken when using this method. - pub fn get_mut(&mut self) -> &mut W { - self.inner.get_mut() - } - - /// Resets the state of this decoder entirely, swapping out the output - /// stream for another. - /// - /// This function will finish encoding the current stream into the current - /// output stream before swapping out the two output streams. If the stream - /// cannot be finished an error is returned. - /// - /// This will then reset the internal state of this decoder and replace the - /// output stream with the one provided, returning the previous output - /// stream. Future data written to this decoder will be decompressed into - /// the output stream `w`. - pub fn reset(&mut self, w: W) -> io::Result { - try!(self.inner.finish()); - self.inner.data = Decompress::new(false); - Ok(self.inner.replace(w)) - } - - /// Attempt to finish this output stream, writing out final chunks of data. - /// - /// Note that this function can only be used once data has finished being - /// written to the output stream. After this function is called then further - /// calls to `write` may result in a panic. - /// - /// # Panics - /// - /// Attempts to write data to this stream may result in a panic after this - /// function is called. - pub fn try_finish(&mut self) -> io::Result<()> { - self.inner.finish() - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream and then return the contained - /// writer if the flush succeeded. - /// - /// Note that this function may not be suitable to call in a situation where - /// the underlying stream is an asynchronous I/O stream. To finish a stream - /// the `try_finish` (or `shutdown`) method should be used instead. To - /// re-acquire ownership of a stream it is safe to call this method after - /// `try_finish` or `shutdown` has returned `Ok`. - pub fn finish(mut self) -> io::Result { - try!(self.inner.finish()); - Ok(self.inner.take_inner()) - } - - /// Returns the number of bytes that the decompressor has consumed for - /// decompression. - /// - /// Note that this will likely be smaller than the number of bytes - /// successfully written to this stream due to internal buffering. - pub fn total_in(&self) -> u64 { - self.inner.data.total_in() - } - - /// Returns the number of bytes that the decompressor has written to its - /// output stream. - pub fn total_out(&self) -> u64 { - self.inner.data.total_out() - } -} - -impl Write for DecoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for DecoderWriter { - fn shutdown(&mut self) -> Poll<(), io::Error> { - try_nb!(self.inner.finish()); - self.inner.get_mut().shutdown() - } -} - -impl Read for DecoderWriter { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.get_mut().read(buf) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for DecoderWriter { -} - -#[cfg(test)] -mod tests { - use std::io::prelude::*; - - use rand::{thread_rng, Rng}; - - use deflate::{EncoderWriter, EncoderReader, DecoderReader, DecoderWriter}; - use Compression::Default; - - #[test] - fn roundtrip() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let result = w.finish().unwrap(); - let mut r = DecoderReader::new(&result[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == real); - } - - #[test] - fn drop_writes() { - let mut data = Vec::new(); - EncoderWriter::new(&mut data, Default).write_all(b"foo").unwrap(); - let mut r = DecoderReader::new(&data[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == b"foo"); - } - - #[test] - fn total_in() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let mut result = w.finish().unwrap(); - - let result_len = result.len(); - - for _ in 0..200 { - result.extend(v.iter().map(|x| *x)); - } - - let mut r = DecoderReader::new(&result[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == real); - assert_eq!(r.total_in(), result_len as u64); - } - - #[test] - fn roundtrip2() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert_eq!(ret, v); - } - - #[test] - fn roundtrip3() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); - w.write_all(&v).unwrap(); - let w = w.finish().unwrap().finish().unwrap(); - assert!(w == v); - } - - #[test] - fn reset_writer() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(Vec::new(), Default); - w.write_all(&v).unwrap(); - let a = w.reset(Vec::new()).unwrap(); - w.write_all(&v).unwrap(); - let b = w.finish().unwrap(); - - let mut w = EncoderWriter::new(Vec::new(), Default); - w.write_all(&v).unwrap(); - let c = w.finish().unwrap(); - assert!(a == b && b == c); - } - - #[test] - fn reset_reader() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); - let mut r = EncoderReader::new(&v[..], Default); - r.read_to_end(&mut a).unwrap(); - r.reset(&v[..]); - r.read_to_end(&mut b).unwrap(); - - let mut r = EncoderReader::new(&v[..], Default); - r.read_to_end(&mut c).unwrap(); - assert!(a == b && b == c); - } - - #[test] - fn reset_decoder() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(Vec::new(), Default); - w.write_all(&v).unwrap(); - let data = w.finish().unwrap(); - - { - let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); - let mut r = DecoderReader::new(&data[..]); - r.read_to_end(&mut a).unwrap(); - r.reset(&data); - r.read_to_end(&mut b).unwrap(); - - let mut r = DecoderReader::new(&data[..]); - r.read_to_end(&mut c).unwrap(); - assert!(a == b && b == c && c == v); - } - - { - let mut w = DecoderWriter::new(Vec::new()); - w.write_all(&data).unwrap(); - let a = w.reset(Vec::new()).unwrap(); - w.write_all(&data).unwrap(); - let b = w.finish().unwrap(); - - let mut w = DecoderWriter::new(Vec::new()); - w.write_all(&data).unwrap(); - let c = w.finish().unwrap(); - assert!(a == b && b == c && c == v); - } - } - - #[test] - fn zero_length_read_with_data() { - let m = vec![3u8; 128 * 1024 + 1]; - let mut c = EncoderReader::new(&m[..], ::Compression::Default); - - let mut result = Vec::new(); - c.read_to_end(&mut result).unwrap(); - - let mut d = DecoderReader::new(&result[..]); - let mut data = Vec::new(); - assert!(d.read(&mut data).unwrap() == 0); - } - - #[test] - fn qc_reader() { - ::quickcheck::quickcheck(test as fn(_) -> _); - - fn test(v: Vec) -> bool { - let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); - let mut v2 = Vec::new(); - r.read_to_end(&mut v2).unwrap(); - v == v2 - } - } - - #[test] - fn qc_writer() { - ::quickcheck::quickcheck(test as fn(_) -> _); - - fn test(v: Vec) -> bool { - let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); - w.write_all(&v).unwrap(); - v == w.finish().unwrap().finish().unwrap() - } - } -} diff --git a/src/vendor/flate2/src/deflate/bufread.rs b/src/vendor/flate2/src/deflate/bufread.rs new file mode 100644 index 0000000000..9a9a47ec61 --- /dev/null +++ b/src/vendor/flate2/src/deflate/bufread.rs @@ -0,0 +1,269 @@ +use std::io::prelude::*; +use std::io; +use std::mem; + +#[cfg(feature = "tokio")] +use futures::Poll; +#[cfg(feature = "tokio")] +use tokio_io::{AsyncRead, AsyncWrite}; + +use zio; +use {Compress, Decompress}; + +/// A DEFLATE encoder, or compressor. +/// +/// This structure implements a [`BufRead`] interface and will read uncompressed +/// data from an underlying stream and emit a stream of compressed data. +/// +/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// use flate2::Compression; +/// use flate2::bufread::DeflateEncoder; +/// use std::fs::File; +/// use std::io::BufReader; +/// +/// # fn main() { +/// # println!("{:?}", open_hello_world().unwrap()); +/// # } +/// # +/// // Opens sample file, compresses the contents and returns a Vector +/// fn open_hello_world() -> io::Result> { +/// let f = File::open("examples/hello_world.txt")?; +/// let b = BufReader::new(f); +/// let mut deflater = DeflateEncoder::new(b, Compression::Fast); +/// let mut buffer = Vec::new(); +/// deflater.read_to_end(&mut buffer)?; +/// Ok(buffer) +/// } +/// ``` +#[derive(Debug)] +pub struct DeflateEncoder { + obj: R, + data: Compress, +} + + +impl DeflateEncoder { + /// Creates a new encoder which will read uncompressed data from the given + /// stream and emit the compressed stream. + pub fn new(r: R, level: ::Compression) -> DeflateEncoder { + DeflateEncoder { + obj: r, + data: Compress::new(level, false), + } + } +} + +pub fn reset_encoder_data(zlib: &mut DeflateEncoder) { + zlib.data.reset(); +} + +impl DeflateEncoder { + /// Resets the state of this encoder entirely, swapping out the input + /// stream for another. + /// + /// This function will reset the internal state of this encoder and replace + /// the input stream with the one provided, returning the previous input + /// stream. Future data read from this encoder will be the compressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + reset_encoder_data(self); + mem::replace(&mut self.obj, r) + } + + /// Acquires a reference to the underlying reader + pub fn get_ref(&self) -> &R { + &self.obj + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + &mut self.obj + } + + /// Consumes this encoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.obj + } + + /// Returns the number of bytes that have been read into this compressor. + /// + /// Note that not all bytes read from the underlying object may be accounted + /// for, there may still be some active buffering. + pub fn total_in(&self) -> u64 { + self.data.total_in() + } + + /// Returns the number of bytes that the compressor has produced. + /// + /// Note that not all bytes may have been read yet, some may still be + /// buffered. + pub fn total_out(&self) -> u64 { + self.data.total_out() + } +} + +impl Read for DeflateEncoder { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + zio::read(&mut self.obj, &mut self.data, buf) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for DeflateEncoder {} + +impl Write for DeflateEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for DeflateEncoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.get_mut().shutdown() + } +} + +/// A DEFLATE decoder, or decompressor. +/// +/// This structure implements a [`BufRead`] interface and takes a stream of +/// compressed data as input, providing the decompressed data when read from. +/// +/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::DeflateEncoder; +/// use flate2::bufread::DeflateDecoder; +/// +/// # fn main() { +/// # let mut e = DeflateEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_reader(bytes).unwrap()); +/// # } +/// // Uncompresses a Deflate Encoded vector of bytes and returns a string or error +/// // Here &[u8] implements Read +/// fn decode_reader(bytes: Vec) -> io::Result { +/// let mut deflater = DeflateDecoder::new(&bytes[..]); +/// let mut s = String::new(); +/// deflater.read_to_string(&mut s)?; +/// Ok(s) +/// } +/// ``` +#[derive(Debug)] +pub struct DeflateDecoder { + obj: R, + data: Decompress, +} + +pub fn reset_decoder_data(zlib: &mut DeflateDecoder) { + zlib.data = Decompress::new(false); +} + +impl DeflateDecoder { + /// Creates a new decoder which will decompress data read from the given + /// stream. + pub fn new(r: R) -> DeflateDecoder { + DeflateDecoder { + obj: r, + data: Decompress::new(false), + } + } +} + +impl DeflateDecoder { + /// Resets the state of this decoder entirely, swapping out the input + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// input stream with the one provided, returning the previous input + /// stream. Future data read from this decoder will be the decompressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + reset_decoder_data(self); + mem::replace(&mut self.obj, r) + } + + /// Resets the state of this decoder's data + /// + /// This will reset the internal state of this decoder. It will continue + /// reading from the same stream. + pub fn reset_data(&mut self) { + reset_decoder_data(self); + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + &self.obj + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + &mut self.obj + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.obj + } + + /// Returns the number of bytes that the decompressor has consumed. + /// + /// Note that this will likely be smaller than what the decompressor + /// actually read from the underlying stream due to buffering. + pub fn total_in(&self) -> u64 { + self.data.total_in() + } + + /// Returns the number of bytes that the decompressor has produced. + pub fn total_out(&self) -> u64 { + self.data.total_out() + } +} + +impl Read for DeflateDecoder { + fn read(&mut self, into: &mut [u8]) -> io::Result { + zio::read(&mut self.obj, &mut self.data, into) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for DeflateDecoder {} + +impl Write for DeflateDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for DeflateDecoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.get_mut().shutdown() + } +} diff --git a/src/vendor/flate2/src/deflate/mod.rs b/src/vendor/flate2/src/deflate/mod.rs new file mode 100644 index 0000000000..7bacfa7c8a --- /dev/null +++ b/src/vendor/flate2/src/deflate/mod.rs @@ -0,0 +1,198 @@ +pub mod bufread; +pub mod read; +pub mod write; + +#[cfg(test)] +mod tests { + use std::io::prelude::*; + + use rand::{thread_rng, Rng}; + + use super::{read, write}; + use Compression::Default; + + #[test] + fn roundtrip() { + let mut real = Vec::new(); + let mut w = write::DeflateEncoder::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let result = w.finish().unwrap(); + let mut r = read::DeflateDecoder::new(&result[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == real); + } + + #[test] + fn drop_writes() { + let mut data = Vec::new(); + write::DeflateEncoder::new(&mut data, Default) + .write_all(b"foo") + .unwrap(); + let mut r = read::DeflateDecoder::new(&data[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == b"foo"); + } + + #[test] + fn total_in() { + let mut real = Vec::new(); + let mut w = write::DeflateEncoder::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let mut result = w.finish().unwrap(); + + let result_len = result.len(); + + for _ in 0..200 { + result.extend(v.iter().map(|x| *x)); + } + + let mut r = read::DeflateDecoder::new(&result[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == real); + assert_eq!(r.total_in(), result_len as u64); + } + + #[test] + fn roundtrip2() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut r = read::DeflateDecoder::new(read::DeflateEncoder::new(&v[..], Default)); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert_eq!(ret, v); + } + + #[test] + fn roundtrip3() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = write::DeflateEncoder::new(write::DeflateDecoder::new(Vec::new()), Default); + w.write_all(&v).unwrap(); + let w = w.finish().unwrap().finish().unwrap(); + assert!(w == v); + } + + #[test] + fn reset_writer() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = write::DeflateEncoder::new(Vec::new(), Default); + w.write_all(&v).unwrap(); + let a = w.reset(Vec::new()).unwrap(); + w.write_all(&v).unwrap(); + let b = w.finish().unwrap(); + + let mut w = write::DeflateEncoder::new(Vec::new(), Default); + w.write_all(&v).unwrap(); + let c = w.finish().unwrap(); + assert!(a == b && b == c); + } + + #[test] + fn reset_reader() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); + let mut r = read::DeflateEncoder::new(&v[..], Default); + r.read_to_end(&mut a).unwrap(); + r.reset(&v[..]); + r.read_to_end(&mut b).unwrap(); + + let mut r = read::DeflateEncoder::new(&v[..], Default); + r.read_to_end(&mut c).unwrap(); + assert!(a == b && b == c); + } + + #[test] + fn reset_decoder() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = write::DeflateEncoder::new(Vec::new(), Default); + w.write_all(&v).unwrap(); + let data = w.finish().unwrap(); + + { + let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); + let mut r = read::DeflateDecoder::new(&data[..]); + r.read_to_end(&mut a).unwrap(); + r.reset(&data); + r.read_to_end(&mut b).unwrap(); + + let mut r = read::DeflateDecoder::new(&data[..]); + r.read_to_end(&mut c).unwrap(); + assert!(a == b && b == c && c == v); + } + + { + let mut w = write::DeflateDecoder::new(Vec::new()); + w.write_all(&data).unwrap(); + let a = w.reset(Vec::new()).unwrap(); + w.write_all(&data).unwrap(); + let b = w.finish().unwrap(); + + let mut w = write::DeflateDecoder::new(Vec::new()); + w.write_all(&data).unwrap(); + let c = w.finish().unwrap(); + assert!(a == b && b == c && c == v); + } + } + + #[test] + fn zero_length_read_with_data() { + let m = vec![3u8; 128 * 1024 + 1]; + let mut c = read::DeflateEncoder::new(&m[..], ::Compression::Default); + + let mut result = Vec::new(); + c.read_to_end(&mut result).unwrap(); + + let mut d = read::DeflateDecoder::new(&result[..]); + let mut data = Vec::new(); + assert!(d.read(&mut data).unwrap() == 0); + } + + #[test] + fn qc_reader() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let mut r = read::DeflateDecoder::new(read::DeflateEncoder::new(&v[..], Default)); + let mut v2 = Vec::new(); + r.read_to_end(&mut v2).unwrap(); + v == v2 + } + } + + #[test] + fn qc_writer() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let mut w = write::DeflateEncoder::new(write::DeflateDecoder::new(Vec::new()), Default); + w.write_all(&v).unwrap(); + v == w.finish().unwrap().finish().unwrap() + } + } +} diff --git a/src/vendor/flate2/src/deflate/read.rs b/src/vendor/flate2/src/deflate/read.rs new file mode 100644 index 0000000000..e2363c9ccc --- /dev/null +++ b/src/vendor/flate2/src/deflate/read.rs @@ -0,0 +1,268 @@ +use std::io::prelude::*; +use std::io; + +#[cfg(feature = "tokio")] +use futures::Poll; +#[cfg(feature = "tokio")] +use tokio_io::{AsyncRead, AsyncWrite}; + +use bufreader::BufReader; +use super::bufread; + +/// A DEFLATE encoder, or compressor. +/// +/// This structure implements a [`Read`] interface and will read uncompressed +/// data from an underlying stream and emit a stream of compressed data. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// use flate2::Compression; +/// use flate2::read::DeflateEncoder; +/// +/// # fn main() { +/// # println!("{:?}", deflateencoder_read_hello_world().unwrap()); +/// # } +/// # +/// // Return a vector containing the Deflate compressed version of hello world +/// fn deflateencoder_read_hello_world() -> io::Result> { +/// let mut ret_vec = [0;100]; +/// let c = b"hello world"; +/// let mut deflater = DeflateEncoder::new(&c[..], Compression::Fast); +/// let count = deflater.read(&mut ret_vec)?; +/// Ok(ret_vec[0..count].to_vec()) +/// } +/// ``` +#[derive(Debug)] +pub struct DeflateEncoder { + inner: bufread::DeflateEncoder>, +} + +impl DeflateEncoder { + /// Creates a new encoder which will read uncompressed data from the given + /// stream and emit the compressed stream. + pub fn new(r: R, level: ::Compression) -> DeflateEncoder { + DeflateEncoder { + inner: bufread::DeflateEncoder::new(BufReader::new(r), level), + } + } +} + +impl DeflateEncoder { + /// Resets the state of this encoder entirely, swapping out the input + /// stream for another. + /// + /// This function will reset the internal state of this encoder and replace + /// the input stream with the one provided, returning the previous input + /// stream. Future data read from this encoder will be the compressed + /// version of `r`'s data. + /// + /// Note that there may be currently buffered data when this function is + /// called, and in that case the buffered data is discarded. + pub fn reset(&mut self, r: R) -> R { + super::bufread::reset_encoder_data(&mut self.inner); + self.inner.get_mut().reset(r) + } + + /// Acquires a reference to the underlying reader + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this encoder, returning the underlying reader. + /// + /// Note that there may be buffered bytes which are not re-acquired as part + /// of this transition. It's recommended to only call this function after + /// EOF has been reached. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } + + /// Returns the number of bytes that have been read into this compressor. + /// + /// Note that not all bytes read from the underlying object may be accounted + /// for, there may still be some active buffering. + pub fn total_in(&self) -> u64 { + self.inner.total_in() + } + + /// Returns the number of bytes that the compressor has produced. + /// + /// Note that not all bytes may have been read yet, some may still be + /// buffered. + pub fn total_out(&self) -> u64 { + self.inner.total_out() + } +} + +impl Read for DeflateEncoder { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.read(buf) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for DeflateEncoder {} + +impl Write for DeflateEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for DeflateEncoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.get_mut().shutdown() + } +} + +/// A DEFLATE decoder, or decompressor. +/// +/// This structure implements a [`Read`] interface and takes a stream of +/// compressed data as input, providing the decompressed data when read from. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::DeflateEncoder; +/// use flate2::read::DeflateDecoder; +/// +/// # fn main() { +/// # let mut e = DeflateEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_reader(bytes).unwrap()); +/// # } +/// // Uncompresses a Deflate Encoded vector of bytes and returns a string or error +/// // Here &[u8] implements Read +/// fn decode_reader(bytes: Vec) -> io::Result { +/// let mut deflater = DeflateDecoder::new(&bytes[..]); +/// let mut s = String::new(); +/// deflater.read_to_string(&mut s)?; +/// Ok(s) +/// } +/// ``` +#[derive(Debug)] +pub struct DeflateDecoder { + inner: bufread::DeflateDecoder>, +} + + + +impl DeflateDecoder { + /// Creates a new decoder which will decompress data read from the given + /// stream. + pub fn new(r: R) -> DeflateDecoder { + DeflateDecoder::new_with_buf(r, vec![0; 32 * 1024]) + } + + /// Same as `new`, but the intermediate buffer for data is specified. + /// + /// Note that the capacity of the intermediate buffer is never increased, + /// and it is recommended for it to be large. + pub fn new_with_buf(r: R, buf: Vec) -> DeflateDecoder { + DeflateDecoder { + inner: bufread::DeflateDecoder::new(BufReader::with_buf(buf, r)), + } + } +} + +impl DeflateDecoder { + /// Resets the state of this decoder entirely, swapping out the input + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// input stream with the one provided, returning the previous input + /// stream. Future data read from this decoder will be the decompressed + /// version of `r`'s data. + /// + /// Note that there may be currently buffered data when this function is + /// called, and in that case the buffered data is discarded. + pub fn reset(&mut self, r: R) -> R { + super::bufread::reset_decoder_data(&mut self.inner); + self.inner.get_mut().reset(r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this decoder, returning the underlying reader. + /// + /// Note that there may be buffered bytes which are not re-acquired as part + /// of this transition. It's recommended to only call this function after + /// EOF has been reached. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } + + /// Returns the number of bytes that the decompressor has consumed. + /// + /// Note that this will likely be smaller than what the decompressor + /// actually read from the underlying stream due to buffering. + pub fn total_in(&self) -> u64 { + self.inner.total_in() + } + + /// Returns the number of bytes that the decompressor has produced. + pub fn total_out(&self) -> u64 { + self.inner.total_out() + } +} + +impl Read for DeflateDecoder { + fn read(&mut self, into: &mut [u8]) -> io::Result { + self.inner.read(into) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for DeflateDecoder {} + +impl Write for DeflateDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for DeflateDecoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.get_mut().shutdown() + } +} diff --git a/src/vendor/flate2/src/deflate/write.rs b/src/vendor/flate2/src/deflate/write.rs new file mode 100644 index 0000000000..596450348a --- /dev/null +++ b/src/vendor/flate2/src/deflate/write.rs @@ -0,0 +1,350 @@ +use std::io::prelude::*; +use std::io; + +#[cfg(feature = "tokio")] +use futures::Poll; +#[cfg(feature = "tokio")] +use tokio_io::{AsyncRead, AsyncWrite}; + +use zio; +use {Compress, Decompress}; + +/// A DEFLATE encoder, or compressor. +/// +/// This structure implements a [`Write`] interface and takes a stream of +/// uncompressed data, writing the compressed data to the wrapped writer. +/// +/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use flate2::Compression; +/// use flate2::write::DeflateEncoder; +/// +/// // Vec implements Write to print the compressed bytes of sample string +/// # fn main() { +/// +/// let mut e = DeflateEncoder::new(Vec::new(), Compression::Default); +/// e.write(b"Hello World").unwrap(); +/// println!("{:?}", e.finish().unwrap()); +/// # } +/// ``` +#[derive(Debug)] +pub struct DeflateEncoder { + inner: zio::Writer, +} + +impl DeflateEncoder { + /// Creates a new encoder which will write compressed data to the stream + /// given at the given compression level. + /// + /// When this encoder is dropped or unwrapped the final pieces of data will + /// be flushed. + pub fn new(w: W, level: ::Compression) -> DeflateEncoder { + DeflateEncoder { + inner: zio::Writer::new(w, Compress::new(level, false)), + } + } + + /// Acquires a reference to the underlying writer. + pub fn get_ref(&self) -> &W { + self.inner.get_ref() + } + + /// Acquires a mutable reference to the underlying writer. + /// + /// Note that mutating the output/input state of the stream may corrupt this + /// object, so care must be taken when using this method. + pub fn get_mut(&mut self) -> &mut W { + self.inner.get_mut() + } + + /// Resets the state of this encoder entirely, swapping out the output + /// stream for another. + /// + /// This function will finish encoding the current stream into the current + /// output stream before swapping out the two output streams. If the stream + /// cannot be finished an error is returned. + /// + /// After the current stream has been finished, this will reset the internal + /// state of this encoder and replace the output stream with the one + /// provided, returning the previous output stream. Future data written to + /// this encoder will be the compressed into the stream `w` provided. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn reset(&mut self, w: W) -> io::Result { + try!(self.inner.finish()); + self.inner.data.reset(); + Ok(self.inner.replace(w)) + } + + /// Attempt to finish this output stream, writing out final chunks of data. + /// + /// Note that this function can only be used once data has finished being + /// written to the output stream. After this function is called then further + /// calls to `write` may result in a panic. + /// + /// # Panics + /// + /// Attempts to write data to this stream may result in a panic after this + /// function is called. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn try_finish(&mut self) -> io::Result<()> { + self.inner.finish() + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream, close off the compressed + /// stream and, if successful, return the contained writer. + /// + /// Note that this function may not be suitable to call in a situation where + /// the underlying stream is an asynchronous I/O stream. To finish a stream + /// the `try_finish` (or `shutdown`) method should be used instead. To + /// re-acquire ownership of a stream it is safe to call this method after + /// `try_finish` or `shutdown` has returned `Ok`. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn finish(mut self) -> io::Result { + try!(self.inner.finish()); + Ok(self.inner.take_inner()) + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream and then return the contained + /// writer if the flush succeeded. + /// The compressed stream will not closed but only flushed. This + /// means that obtained byte array can by extended by another deflated + /// stream. To close the stream add the two bytes 0x3 and 0x0. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn flush_finish(mut self) -> io::Result { + try!(self.inner.flush()); + Ok(self.inner.take_inner()) + } + + /// Returns the number of bytes that have been written to this compresor. + /// + /// Note that not all bytes written to this object may be accounted for, + /// there may still be some active buffering. + pub fn total_in(&self) -> u64 { + self.inner.data.total_in() + } + + /// Returns the number of bytes that the compressor has produced. + /// + /// Note that not all bytes may have been written yet, some may still be + /// buffered. + pub fn total_out(&self) -> u64 { + self.inner.data.total_out() + } +} + +impl Write for DeflateEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for DeflateEncoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + try_nb!(self.inner.finish()); + self.inner.get_mut().shutdown() + } +} + +impl Read for DeflateEncoder { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.get_mut().read(buf) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for DeflateEncoder {} + +/// A DEFLATE decoder, or decompressor. +/// +/// This structure implements a [`Write`] and will emit a stream of decompressed +/// data when fed a stream of compressed data. +/// +/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::DeflateEncoder; +/// use flate2::write::DeflateDecoder; +/// +/// # fn main() { +/// # let mut e = DeflateEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_writer(bytes).unwrap()); +/// # } +/// // Uncompresses a Deflate Encoded vector of bytes and returns a string or error +/// // Here Vec implements Write +/// fn decode_writer(bytes: Vec) -> io::Result { +/// let mut writer = Vec::new(); +/// let mut deflater = DeflateDecoder::new(writer); +/// deflater.write(&bytes[..])?; +/// writer = deflater.finish()?; +/// let return_string = String::from_utf8(writer).expect("String parsing error"); +/// Ok(return_string) +/// } +/// ``` +#[derive(Debug)] +pub struct DeflateDecoder { + inner: zio::Writer, +} + + +impl DeflateDecoder { + /// Creates a new decoder which will write uncompressed data to the stream. + /// + /// When this encoder is dropped or unwrapped the final pieces of data will + /// be flushed. + pub fn new(w: W) -> DeflateDecoder { + DeflateDecoder { + inner: zio::Writer::new(w, Decompress::new(false)), + } + } + + /// Acquires a reference to the underlying writer. + pub fn get_ref(&self) -> &W { + self.inner.get_ref() + } + + /// Acquires a mutable reference to the underlying writer. + /// + /// Note that mutating the output/input state of the stream may corrupt this + /// object, so care must be taken when using this method. + pub fn get_mut(&mut self) -> &mut W { + self.inner.get_mut() + } + + /// Resets the state of this decoder entirely, swapping out the output + /// stream for another. + /// + /// This function will finish encoding the current stream into the current + /// output stream before swapping out the two output streams. + /// + /// This will then reset the internal state of this decoder and replace the + /// output stream with the one provided, returning the previous output + /// stream. Future data written to this decoder will be decompressed into + /// the output stream `w`. + /// + /// # Errors + /// + /// This function will perform I/O to finish the stream, and if that I/O + /// returns an error then that will be returned from this function. + pub fn reset(&mut self, w: W) -> io::Result { + try!(self.inner.finish()); + self.inner.data = Decompress::new(false); + Ok(self.inner.replace(w)) + } + + /// Attempt to finish this output stream, writing out final chunks of data. + /// + /// Note that this function can only be used once data has finished being + /// written to the output stream. After this function is called then further + /// calls to `write` may result in a panic. + /// + /// # Panics + /// + /// Attempts to write data to this stream may result in a panic after this + /// function is called. + /// + /// # Errors + /// + /// This function will perform I/O to finish the stream, returning any + /// errors which happen. + pub fn try_finish(&mut self) -> io::Result<()> { + self.inner.finish() + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream and then return the contained + /// writer if the flush succeeded. + /// + /// Note that this function may not be suitable to call in a situation where + /// the underlying stream is an asynchronous I/O stream. To finish a stream + /// the `try_finish` (or `shutdown`) method should be used instead. To + /// re-acquire ownership of a stream it is safe to call this method after + /// `try_finish` or `shutdown` has returned `Ok`. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn finish(mut self) -> io::Result { + try!(self.inner.finish()); + Ok(self.inner.take_inner()) + } + + /// Returns the number of bytes that the decompressor has consumed for + /// decompression. + /// + /// Note that this will likely be smaller than the number of bytes + /// successfully written to this stream due to internal buffering. + pub fn total_in(&self) -> u64 { + self.inner.data.total_in() + } + + /// Returns the number of bytes that the decompressor has written to its + /// output stream. + pub fn total_out(&self) -> u64 { + self.inner.data.total_out() + } +} + +impl Write for DeflateDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for DeflateDecoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + try_nb!(self.inner.finish()); + self.inner.get_mut().shutdown() + } +} + +impl Read for DeflateDecoder { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.get_mut().read(buf) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for DeflateDecoder {} diff --git a/src/vendor/flate2/src/ffi.rs b/src/vendor/flate2/src/ffi.rs index 575effc70f..92a30a6a7e 100644 --- a/src/vendor/flate2/src/ffi.rs +++ b/src/vendor/flate2/src/ffi.rs @@ -69,6 +69,12 @@ mod imp { inner: Box, } + impl ::std::fmt::Debug for StreamWrapper{ + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error>{ + write!(f, "StreamWrapper") + } + } + impl Default for StreamWrapper { fn default() -> StreamWrapper { StreamWrapper { @@ -105,6 +111,12 @@ mod imp { inner: mz_stream, } + impl ::std::fmt::Debug for StreamWrapper{ + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error>{ + write!(f, "StreamWrapper") + } + } + impl Default for StreamWrapper { fn default() -> StreamWrapper { StreamWrapper { diff --git a/src/vendor/flate2/src/gz.rs b/src/vendor/flate2/src/gz.rs deleted file mode 100644 index 42bc9c0e87..0000000000 --- a/src/vendor/flate2/src/gz.rs +++ /dev/null @@ -1,1049 +0,0 @@ -//! gzip compression/decompression -//! -//! [1]: http://www.gzip.org/zlib/rfc-gzip.html - -use std::cmp; -use std::env; -use std::ffi::CString; -use std::io::prelude::*; -use std::io; -use std::mem; - -#[cfg(feature = "tokio")] -use futures::Poll; -#[cfg(feature = "tokio")] -use tokio_io::{AsyncRead, AsyncWrite}; - -use {Compression, Compress}; -use bufreader::BufReader; -use crc::{CrcReader, Crc}; -use deflate; -use zio; - -static FHCRC: u8 = 1 << 1; -static FEXTRA: u8 = 1 << 2; -static FNAME: u8 = 1 << 3; -static FCOMMENT: u8 = 1 << 4; - -/// A gzip streaming encoder -/// -/// This structure exposes a `Write` interface that will emit compressed data -/// to the underlying writer `W`. -pub struct EncoderWriter { - inner: zio::Writer, - crc: Crc, - crc_bytes_written: usize, - header: Vec, -} - -/// A gzip streaming encoder -/// -/// This structure exposes a `Read` interface that will read uncompressed data -/// from the underlying reader and expose the compressed version as a `Read` -/// interface. -pub struct EncoderReader { - inner: EncoderReaderBuf>, -} - -/// A gzip streaming encoder -/// -/// This structure exposes a `Read` interface that will read uncompressed data -/// from the underlying reader and expose the compressed version as a `Read` -/// interface. -pub struct EncoderReaderBuf { - inner: deflate::EncoderReaderBuf>, - header: Vec, - pos: usize, - eof: bool, -} - -/// A builder structure to create a new gzip Encoder. -/// -/// This structure controls header configuration options such as the filename. -pub struct Builder { - extra: Option>, - filename: Option, - comment: Option, - mtime: u32, -} - -/// A gzip streaming decoder -/// -/// This structure exposes a `Read` interface that will consume compressed -/// data from the underlying reader and emit uncompressed data. -pub struct DecoderReader { - inner: DecoderReaderBuf>, -} - -/// A gzip streaming decoder that decodes all members of a multistream -/// -/// A gzip member consists of a header, compressed data and a trailer. The [gzip -/// specification](https://tools.ietf.org/html/rfc1952), however, allows multiple -/// gzip members to be joined in a single stream. `MultiDecoderReader` will -/// decode all consecutive members while `DecoderReader` will only decompress the -/// first gzip member. The multistream format is commonly used in bioinformatics, -/// for example when using the BGZF compressed data. -/// -/// This structure exposes a `Read` interface that will consume all gzip members -/// from the underlying reader and emit uncompressed data. -pub struct MultiDecoderReader { - inner: MultiDecoderReaderBuf>, -} - -/// A gzip streaming decoder -/// -/// This structure exposes a `Read` interface that will consume compressed -/// data from the underlying reader and emit uncompressed data. -pub struct DecoderReaderBuf { - inner: CrcReader>, - header: Header, - finished: bool, -} - -/// A gzip streaming decoder that decodes all members of a multistream -/// -/// A gzip member consists of a header, compressed data and a trailer. The [gzip -/// specification](https://tools.ietf.org/html/rfc1952), however, allows multiple -/// gzip members to be joined in a single stream. `MultiDecoderReaderBuf` will -/// decode all consecutive members while `DecoderReaderBuf` will only decompress -/// the first gzip member. The multistream format is commonly used in -/// bioinformatics, for example when using the BGZF compressed data. -/// -/// This structure exposes a `Read` interface that will consume all gzip members -/// from the underlying reader and emit uncompressed data. -pub struct MultiDecoderReaderBuf { - inner: CrcReader>, - header: Header, - finished: bool, -} - -/// A structure representing the header of a gzip stream. -/// -/// The header can contain metadata about the file that was compressed, if -/// present. -pub struct Header { - extra: Option>, - filename: Option>, - comment: Option>, - mtime: u32, -} - -impl Builder { - /// Create a new blank builder with no header by default. - pub fn new() -> Builder { - Builder { - extra: None, - filename: None, - comment: None, - mtime: 0, - } - } - - /// Configure the `mtime` field in the gzip header. - pub fn mtime(mut self, mtime: u32) -> Builder { - self.mtime = mtime; - self - } - - /// Configure the `extra` field in the gzip header. - pub fn extra(mut self, extra: Vec) -> Builder { - self.extra = Some(extra); - self - } - - /// Configure the `filename` field in the gzip header. - pub fn filename(mut self, filename: &[u8]) -> Builder { - self.filename = Some(CString::new(filename).unwrap()); - self - } - - /// Configure the `comment` field in the gzip header. - pub fn comment(mut self, comment: &[u8]) -> Builder { - self.comment = Some(CString::new(comment).unwrap()); - self - } - - /// Consume this builder, creating a writer encoder in the process. - /// - /// The data written to the returned encoder will be compressed and then - /// written out to the supplied parameter `w`. - pub fn write(self, w: W, lvl: Compression) -> EncoderWriter { - EncoderWriter { - inner: zio::Writer::new(w, Compress::new(lvl, false)), - crc: Crc::new(), - header: self.into_header(lvl), - crc_bytes_written: 0, - } - } - - /// Consume this builder, creating a reader encoder in the process. - /// - /// Data read from the returned encoder will be the compressed version of - /// the data read from the given reader. - pub fn read(self, r: R, lvl: Compression) -> EncoderReader { - EncoderReader { - inner: self.buf_read(BufReader::new(r), lvl), - } - } - - /// Consume this builder, creating a reader encoder in the process. - /// - /// Data read from the returned encoder will be the compressed version of - /// the data read from the given reader. - pub fn buf_read(self, r: R, lvl: Compression) -> EncoderReaderBuf - where R: BufRead - { - let crc = CrcReader::new(r); - EncoderReaderBuf { - inner: deflate::EncoderReaderBuf::new(crc, lvl), - header: self.into_header(lvl), - pos: 0, - eof: false, - } - } - - fn into_header(self, lvl: Compression) -> Vec { - let Builder { extra, filename, comment, mtime } = self; - let mut flg = 0; - let mut header = vec![0u8; 10]; - match extra { - Some(v) => { - flg |= FEXTRA; - header.push((v.len() >> 0) as u8); - header.push((v.len() >> 8) as u8); - header.extend(v); - } - None => {} - } - match filename { - Some(filename) => { - flg |= FNAME; - header.extend(filename.as_bytes_with_nul().iter().map(|x| *x)); - } - None => {} - } - match comment { - Some(comment) => { - flg |= FCOMMENT; - header.extend(comment.as_bytes_with_nul().iter().map(|x| *x)); - } - None => {} - } - header[0] = 0x1f; - header[1] = 0x8b; - header[2] = 8; - header[3] = flg; - header[4] = (mtime >> 0) as u8; - header[5] = (mtime >> 8) as u8; - header[6] = (mtime >> 16) as u8; - header[7] = (mtime >> 24) as u8; - header[8] = match lvl { - Compression::Best => 2, - Compression::Fast => 4, - _ => 0, - }; - header[9] = match env::consts::OS { - "linux" => 3, - "macos" => 7, - "win32" => 0, - _ => 255, - }; - return header; - } -} - -impl EncoderWriter { - /// Creates a new encoder which will use the given compression level. - /// - /// The encoder is not configured specially for the emitted header. For - /// header configuration, see the `Builder` type. - /// - /// The data written to the returned encoder will be compressed and then - /// written to the stream `w`. - pub fn new(w: W, level: Compression) -> EncoderWriter { - Builder::new().write(w, level) - } - - /// Acquires a reference to the underlying writer. - pub fn get_ref(&self) -> &W { - self.inner.get_ref() - } - - /// Acquires a mutable reference to the underlying writer. - /// - /// Note that mutation of the writer may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut W { - self.inner.get_mut() - } - - /// Attempt to finish this output stream, writing out final chunks of data. - /// - /// Note that this function can only be used once data has finished being - /// written to the output stream. After this function is called then further - /// calls to `write` may result in a panic. - /// - /// # Panics - /// - /// Attempts to write data to this stream may result in a panic after this - /// function is called. - pub fn try_finish(&mut self) -> io::Result<()> { - try!(self.write_header()); - try!(self.inner.finish()); - - while self.crc_bytes_written < 8 { - let (sum, amt) = (self.crc.sum() as u32, self.crc.amount()); - let buf = [(sum >> 0) as u8, - (sum >> 8) as u8, - (sum >> 16) as u8, - (sum >> 24) as u8, - (amt >> 0) as u8, - (amt >> 8) as u8, - (amt >> 16) as u8, - (amt >> 24) as u8]; - let mut inner = self.inner.get_mut(); - let n = try!(inner.write(&buf[self.crc_bytes_written..])); - self.crc_bytes_written += n; - } - Ok(()) - } - - /// Finish encoding this stream, returning the underlying writer once the - /// encoding is done. - /// - /// Note that this function may not be suitable to call in a situation where - /// the underlying stream is an asynchronous I/O stream. To finish a stream - /// the `try_finish` (or `shutdown`) method should be used instead. To - /// re-acquire ownership of a stream it is safe to call this method after - /// `try_finish` or `shutdown` has returned `Ok`. - pub fn finish(mut self) -> io::Result { - try!(self.try_finish()); - Ok(self.inner.take_inner()) - } - - fn write_header(&mut self) -> io::Result<()> { - while self.header.len() > 0 { - let n = try!(self.inner.get_mut().write(&self.header)); - self.header.drain(..n); - } - Ok(()) - } -} - -impl Write for EncoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - assert_eq!(self.crc_bytes_written, 0); - try!(self.write_header()); - let n = try!(self.inner.write(buf)); - self.crc.update(&buf[..n]); - Ok(n) - } - - fn flush(&mut self) -> io::Result<()> { - assert_eq!(self.crc_bytes_written, 0); - self.inner.flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for EncoderWriter { - fn shutdown(&mut self) -> Poll<(), io::Error> { - try_nb!(self.try_finish()); - self.get_mut().shutdown() - } -} - -impl Read for EncoderWriter { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.get_mut().read(buf) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for EncoderWriter { -} - -impl Drop for EncoderWriter { - fn drop(&mut self) { - if self.inner.is_present() { - let _ = self.try_finish(); - } - } -} - -impl EncoderReader { - /// Creates a new encoder which will use the given compression level. - /// - /// The encoder is not configured specially for the emitted header. For - /// header configuration, see the `Builder` type. - /// - /// The data read from the stream `r` will be compressed and available - /// through the returned reader. - pub fn new(r: R, level: Compression) -> EncoderReader { - Builder::new().read(r, level) - } - - /// Acquires a reference to the underlying reader. - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying reader. - /// - /// Note that mutation of the reader may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Returns the underlying stream, consuming this encoder - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } -} - -fn copy(into: &mut [u8], from: &[u8], pos: &mut usize) -> usize { - let min = cmp::min(into.len(), from.len() - *pos); - for (slot, val) in into.iter_mut().zip(from[*pos..*pos + min].iter()) { - *slot = *val; - } - *pos += min; - return min; -} - -impl Read for EncoderReader { - fn read(&mut self, mut into: &mut [u8]) -> io::Result { - self.inner.read(into) - } -} - -impl Write for EncoderReader { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -impl EncoderReaderBuf { - /// Creates a new encoder which will use the given compression level. - /// - /// The encoder is not configured specially for the emitted header. For - /// header configuration, see the `Builder` type. - /// - /// The data read from the stream `r` will be compressed and available - /// through the returned reader. - pub fn new(r: R, level: Compression) -> EncoderReaderBuf { - Builder::new().buf_read(r, level) - } - - /// Acquires a reference to the underlying reader. - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying reader. - /// - /// Note that mutation of the reader may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Returns the underlying stream, consuming this encoder - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - fn read_footer(&mut self, into: &mut [u8]) -> io::Result { - if self.pos == 8 { - return Ok(0); - } - let crc = self.inner.get_ref().crc(); - let ref arr = [(crc.sum() >> 0) as u8, - (crc.sum() >> 8) as u8, - (crc.sum() >> 16) as u8, - (crc.sum() >> 24) as u8, - (crc.amount() >> 0) as u8, - (crc.amount() >> 8) as u8, - (crc.amount() >> 16) as u8, - (crc.amount() >> 24) as u8]; - Ok(copy(into, arr, &mut self.pos)) - } -} - -impl Read for EncoderReaderBuf { - fn read(&mut self, mut into: &mut [u8]) -> io::Result { - let mut amt = 0; - if self.eof { - return self.read_footer(into); - } else if self.pos < self.header.len() { - amt += copy(into, &self.header, &mut self.pos); - if amt == into.len() { - return Ok(amt); - } - let tmp = into; - into = &mut tmp[amt..]; - } - match try!(self.inner.read(into)) { - 0 => { - self.eof = true; - self.pos = 0; - self.read_footer(into) - } - n => Ok(amt + n), - } - } -} - -impl Write for EncoderReaderBuf { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -impl DecoderReader { - /// Creates a new decoder from the given reader, immediately parsing the - /// gzip header. - /// - /// If an error is encountered when parsing the gzip header, an error is - /// returned. - pub fn new(r: R) -> io::Result> { - DecoderReaderBuf::new(BufReader::new(r)).map(|r| { - DecoderReader { inner: r } - }) - } - - /// Returns the header associated with this stream. - pub fn header(&self) -> &Header { - self.inner.header() - } - - /// Acquires a reference to the underlying reader. - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream. - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } -} - -impl Read for DecoderReader { - fn read(&mut self, into: &mut [u8]) -> io::Result { - self.inner.read(into) - } -} - -impl Write for DecoderReader { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -impl MultiDecoderReader { - /// Creates a new decoder from the given reader, immediately parsing the - /// (first) gzip header. If the gzip stream contains multiple members all will - /// be decoded. - /// - /// If an error is encountered when parsing the gzip header, an error is - /// returned. - pub fn new(r: R) -> io::Result> { - MultiDecoderReaderBuf::new(BufReader::new(r)).map(|r| { - MultiDecoderReader { inner: r } - }) - } - - /// Returns the current header associated with this stream. - pub fn header(&self) -> &Header { - self.inner.header() - } - - /// Acquires a reference to the underlying reader. - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream. - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } -} - -impl Read for MultiDecoderReader { - fn read(&mut self, into: &mut [u8]) -> io::Result { - self.inner.read(into) - } -} - -impl Write for MultiDecoderReader { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -impl DecoderReaderBuf { - /// Creates a new decoder from the given reader, immediately parsing the - /// gzip header. - /// - /// If an error is encountered when parsing the gzip header, an error is - /// returned. - pub fn new(mut r: R) -> io::Result> { - let header = try!(read_gz_header(&mut r)); - - let flate = deflate::DecoderReaderBuf::new(r); - return Ok(DecoderReaderBuf { - inner: CrcReader::new(flate), - header: header, - finished: false, - }); - } - - - /// Returns the header associated with this stream. - pub fn header(&self) -> &Header { - &self.header - } - - /// Acquires a reference to the underlying reader. - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream. - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - fn finish(&mut self) -> io::Result<()> { - if self.finished { - return Ok(()); - } - let ref mut buf = [0u8; 8]; - { - let mut len = 0; - - while len < buf.len() { - match try!(self.inner.get_mut().get_mut().read(&mut buf[len..])) { - 0 => return Err(corrupt()), - n => len += n, - } - } - } - - let crc = ((buf[0] as u32) << 0) | ((buf[1] as u32) << 8) | - ((buf[2] as u32) << 16) | - ((buf[3] as u32) << 24); - let amt = ((buf[4] as u32) << 0) | ((buf[5] as u32) << 8) | - ((buf[6] as u32) << 16) | - ((buf[7] as u32) << 24); - if crc != self.inner.crc().sum() as u32 { - return Err(corrupt()); - } - if amt != self.inner.crc().amount() { - return Err(corrupt()); - } - self.finished = true; - Ok(()) - } -} - -impl Read for DecoderReaderBuf { - fn read(&mut self, into: &mut [u8]) -> io::Result { - match try!(self.inner.read(into)) { - 0 => { - try!(self.finish()); - Ok(0) - } - n => Ok(n), - } - } -} - -impl Write for DecoderReaderBuf { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -impl MultiDecoderReaderBuf { - /// Creates a new decoder from the given reader, immediately parsing the - /// (first) gzip header. If the gzip stream contains multiple members all will - /// be decoded. - /// - /// If an error is encountered when parsing the gzip header, an error is - /// returned. - pub fn new(mut r: R) -> io::Result> { - let header = try!(read_gz_header(&mut r)); - - let flate = deflate::DecoderReaderBuf::new(r); - return Ok(MultiDecoderReaderBuf { - inner: CrcReader::new(flate), - header: header, - finished: false, - }); - } - - - /// Returns the current header associated with this stream. - pub fn header(&self) -> &Header { - &self.header - } - - /// Acquires a reference to the underlying reader. - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream. - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - fn finish_member(&mut self) -> io::Result { - if self.finished { - return Ok(0); - } - let ref mut buf = [0u8; 8]; - { - let mut len = 0; - - while len < buf.len() { - match try!(self.inner.get_mut().get_mut().read(&mut buf[len..])) { - 0 => return Err(corrupt()), - n => len += n, - } - } - } - - let crc = ((buf[0] as u32) << 0) | ((buf[1] as u32) << 8) | - ((buf[2] as u32) << 16) | - ((buf[3] as u32) << 24); - let amt = ((buf[4] as u32) << 0) | ((buf[5] as u32) << 8) | - ((buf[6] as u32) << 16) | - ((buf[7] as u32) << 24); - if crc != self.inner.crc().sum() as u32 { - return Err(corrupt()); - } - if amt != self.inner.crc().amount() { - return Err(corrupt()); - } - let remaining = match self.inner.get_mut().get_mut().fill_buf() { - Ok(b) => { - if b.is_empty() { - self.finished = true; - return Ok(0); - } else { - b.len() - } - }, - Err(e) => return Err(e) - }; - - let next_header = try!(read_gz_header(self.inner.get_mut().get_mut())); - mem::replace(&mut self.header, next_header); - self.inner.reset(); - self.inner.get_mut().reset_data(); - - Ok(remaining) - } -} - -impl Read for MultiDecoderReaderBuf { - fn read(&mut self, into: &mut [u8]) -> io::Result { - match try!(self.inner.read(into)) { - 0 => { - match self.finish_member() { - Ok(0) => Ok(0), - Ok(_) => self.read(into), - Err(e) => Err(e) - } - }, - n => Ok(n), - } - } -} - -impl Write for MultiDecoderReaderBuf { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -impl Header { - /// Returns the `filename` field of this gzip stream's header, if present. - pub fn filename(&self) -> Option<&[u8]> { - self.filename.as_ref().map(|s| &s[..]) - } - - /// Returns the `extra` field of this gzip stream's header, if present. - pub fn extra(&self) -> Option<&[u8]> { - self.extra.as_ref().map(|s| &s[..]) - } - - /// Returns the `comment` field of this gzip stream's header, if present. - pub fn comment(&self) -> Option<&[u8]> { - self.comment.as_ref().map(|s| &s[..]) - } - - /// Returns the `mtime` field of this gzip stream's header, if present. - pub fn mtime(&self) -> u32 { - self.mtime - } -} - -fn corrupt() -> io::Error { - io::Error::new(io::ErrorKind::InvalidInput, - "corrupt gzip stream does not have a matching checksum") -} - -fn bad_header() -> io::Error { - io::Error::new(io::ErrorKind::InvalidInput, "invalid gzip header") -} - -fn read_le_u16(r: &mut R) -> io::Result { - let mut b = [0; 2]; - try!(r.read_exact(&mut b)); - Ok((b[0] as u16) | ((b[1] as u16) << 8)) -} - -fn read_gz_header(r: &mut R) -> io::Result

{ - let mut crc_reader = CrcReader::new(r); - let mut header = [0; 10]; - try!(crc_reader.read_exact(&mut header)); - - let id1 = header[0]; - let id2 = header[1]; - if id1 != 0x1f || id2 != 0x8b { - return Err(bad_header()); - } - let cm = header[2]; - if cm != 8 { - return Err(bad_header()); - } - - let flg = header[3]; - let mtime = ((header[4] as u32) << 0) | ((header[5] as u32) << 8) | - ((header[6] as u32) << 16) | - ((header[7] as u32) << 24); - let _xfl = header[8]; - let _os = header[9]; - - let extra = if flg & FEXTRA != 0 { - let xlen = try!(read_le_u16(&mut crc_reader)); - let mut extra = vec![0; xlen as usize]; - try!(crc_reader.read_exact(&mut extra)); - Some(extra) - } else { - None - }; - let filename = if flg & FNAME != 0 { - // wow this is slow - let mut b = Vec::new(); - for byte in crc_reader.by_ref().bytes() { - let byte = try!(byte); - if byte == 0 { - break; - } - b.push(byte); - } - Some(b) - } else { - None - }; - let comment = if flg & FCOMMENT != 0 { - // wow this is slow - let mut b = Vec::new(); - for byte in crc_reader.by_ref().bytes() { - let byte = try!(byte); - if byte == 0 { - break; - } - b.push(byte); - } - Some(b) - } else { - None - }; - - if flg & FHCRC != 0 { - let calced_crc = crc_reader.crc().sum() as u16; - let stored_crc = try!(read_le_u16(&mut crc_reader)); - if calced_crc != stored_crc { - return Err(corrupt()); - } - } - - Ok(Header { - extra: extra, - filename: filename, - comment: comment, - mtime: mtime, - }) -} - -#[cfg(test)] -mod tests { - use std::io::prelude::*; - - use super::{EncoderWriter, EncoderReader, DecoderReader, Builder}; - use Compression::Default; - use rand::{thread_rng, Rng}; - - #[test] - fn roundtrip() { - let mut e = EncoderWriter::new(Vec::new(), Default); - e.write_all(b"foo bar baz").unwrap(); - let inner = e.finish().unwrap(); - let mut d = DecoderReader::new(&inner[..]).unwrap(); - let mut s = String::new(); - d.read_to_string(&mut s).unwrap(); - assert_eq!(s, "foo bar baz"); - } - - #[test] - fn roundtrip_zero() { - let e = EncoderWriter::new(Vec::new(), Default); - let inner = e.finish().unwrap(); - let mut d = DecoderReader::new(&inner[..]).unwrap(); - let mut s = String::new(); - d.read_to_string(&mut s).unwrap(); - assert_eq!(s, ""); - } - - #[test] - fn roundtrip_big() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let result = w.finish().unwrap(); - let mut r = DecoderReader::new(&result[..]).unwrap(); - let mut v = Vec::new(); - r.read_to_end(&mut v).unwrap(); - assert!(v == real); - } - - #[test] - fn roundtrip_big2() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)) - .unwrap(); - let mut res = Vec::new(); - r.read_to_end(&mut res).unwrap(); - assert!(res == v); - } - - #[test] - fn fields() { - let r = vec![0, 2, 4, 6]; - let e = Builder::new() - .filename(b"foo.rs") - .comment(b"bar") - .extra(vec![0, 1, 2, 3]) - .read(&r[..], Default); - let mut d = DecoderReader::new(e).unwrap(); - assert_eq!(d.header().filename(), Some(&b"foo.rs"[..])); - assert_eq!(d.header().comment(), Some(&b"bar"[..])); - assert_eq!(d.header().extra(), Some(&b"\x00\x01\x02\x03"[..])); - let mut res = Vec::new(); - d.read_to_end(&mut res).unwrap(); - assert_eq!(res, vec![0, 2, 4, 6]); - - } - - #[test] - fn keep_reading_after_end() { - let mut e = EncoderWriter::new(Vec::new(), Default); - e.write_all(b"foo bar baz").unwrap(); - let inner = e.finish().unwrap(); - let mut d = DecoderReader::new(&inner[..]).unwrap(); - let mut s = String::new(); - d.read_to_string(&mut s).unwrap(); - assert_eq!(s, "foo bar baz"); - d.read_to_string(&mut s).unwrap(); - assert_eq!(s, "foo bar baz"); - } - - #[test] - fn qc_reader() { - ::quickcheck::quickcheck(test as fn(_) -> _); - - fn test(v: Vec) -> bool { - let r = EncoderReader::new(&v[..], Default); - let mut r = DecoderReader::new(r).unwrap(); - let mut v2 = Vec::new(); - r.read_to_end(&mut v2).unwrap(); - v == v2 - } - } - - #[test] - fn flush_after_write() { - let mut f = EncoderWriter::new(Vec::new(), Default); - write!(f, "Hello world").unwrap(); - f.flush().unwrap(); - } -} diff --git a/src/vendor/flate2/src/gz/bufread.rs b/src/vendor/flate2/src/gz/bufread.rs new file mode 100644 index 0000000000..22be33ae35 --- /dev/null +++ b/src/vendor/flate2/src/gz/bufread.rs @@ -0,0 +1,547 @@ +use std::cmp; +use std::io::prelude::*; +use std::io; +use std::mem; + +use super::{Builder, Header}; +use super::{FCOMMENT, FEXTRA, FHCRC, FNAME}; +use Compression; +use crc::CrcReader; +use deflate; + +fn copy(into: &mut [u8], from: &[u8], pos: &mut usize) -> usize { + let min = cmp::min(into.len(), from.len() - *pos); + for (slot, val) in into.iter_mut().zip(from[*pos..*pos + min].iter()) { + *slot = *val; + } + *pos += min; + return min; +} +fn corrupt() -> io::Error { + io::Error::new( + io::ErrorKind::InvalidInput, + "corrupt gzip stream does not have a matching checksum", + ) +} + +fn bad_header() -> io::Error { + io::Error::new(io::ErrorKind::InvalidInput, "invalid gzip header") +} + +fn read_le_u16(r: &mut R) -> io::Result { + let mut b = [0; 2]; + try!(r.read_exact(&mut b)); + Ok((b[0] as u16) | ((b[1] as u16) << 8)) +} + +fn read_gz_header(r: &mut R) -> io::Result
{ + let mut crc_reader = CrcReader::new(r); + let mut header = [0; 10]; + try!(crc_reader.read_exact(&mut header)); + + let id1 = header[0]; + let id2 = header[1]; + if id1 != 0x1f || id2 != 0x8b { + return Err(bad_header()); + } + let cm = header[2]; + if cm != 8 { + return Err(bad_header()); + } + + let flg = header[3]; + let mtime = ((header[4] as u32) << 0) | ((header[5] as u32) << 8) | ((header[6] as u32) << 16) | + ((header[7] as u32) << 24); + let _xfl = header[8]; + let _os = header[9]; + + let extra = if flg & FEXTRA != 0 { + let xlen = try!(read_le_u16(&mut crc_reader)); + let mut extra = vec![0; xlen as usize]; + try!(crc_reader.read_exact(&mut extra)); + Some(extra) + } else { + None + }; + let filename = if flg & FNAME != 0 { + // wow this is slow + let mut b = Vec::new(); + for byte in crc_reader.by_ref().bytes() { + let byte = try!(byte); + if byte == 0 { + break; + } + b.push(byte); + } + Some(b) + } else { + None + }; + let comment = if flg & FCOMMENT != 0 { + // wow this is slow + let mut b = Vec::new(); + for byte in crc_reader.by_ref().bytes() { + let byte = try!(byte); + if byte == 0 { + break; + } + b.push(byte); + } + Some(b) + } else { + None + }; + + if flg & FHCRC != 0 { + let calced_crc = crc_reader.crc().sum() as u16; + let stored_crc = try!(read_le_u16(&mut crc_reader)); + if calced_crc != stored_crc { + return Err(corrupt()); + } + } + + Ok(Header { + extra: extra, + filename: filename, + comment: comment, + mtime: mtime, + }) +} + + +/// A gzip streaming encoder +/// +/// This structure exposes a [`BufRead`] interface that will read uncompressed data +/// from the underlying reader and expose the compressed version as a [`BufRead`] +/// interface. +/// +/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// use flate2::Compression; +/// use flate2::bufread::GzEncoder; +/// use std::fs::File; +/// use std::io::BufReader; +/// +/// // Opens sample file, compresses the contents and returns a Vector or error +/// // File wrapped in a BufReader implements BufRead +/// +/// fn open_hello_world() -> io::Result> { +/// let f = File::open("examples/hello_world.txt")?; +/// let b = BufReader::new(f); +/// let mut gz = GzEncoder::new(b, Compression::Fast); +/// let mut buffer = Vec::new(); +/// gz.read_to_end(&mut buffer)?; +/// Ok(buffer) +/// } +/// ``` +#[derive(Debug)] +pub struct GzEncoder { + inner: deflate::bufread::DeflateEncoder>, + header: Vec, + pos: usize, + eof: bool, +} + +pub fn gz_encoder(header: Vec, r: R, lvl: Compression) + -> GzEncoder +{ + let crc = CrcReader::new(r); + GzEncoder { + inner: deflate::bufread::DeflateEncoder::new(crc, lvl), + header: header, + pos: 0, + eof: false, + } +} + +impl GzEncoder { + /// Creates a new encoder which will use the given compression level. + /// + /// The encoder is not configured specially for the emitted header. For + /// header configuration, see the `Builder` type. + /// + /// The data read from the stream `r` will be compressed and available + /// through the returned reader. + pub fn new(r: R, level: Compression) -> GzEncoder { + Builder::new().buf_read(r, level) + } + + fn read_footer(&mut self, into: &mut [u8]) -> io::Result { + if self.pos == 8 { + return Ok(0); + } + let crc = self.inner.get_ref().crc(); + let ref arr = [ + (crc.sum() >> 0) as u8, + (crc.sum() >> 8) as u8, + (crc.sum() >> 16) as u8, + (crc.sum() >> 24) as u8, + (crc.amount() >> 0) as u8, + (crc.amount() >> 8) as u8, + (crc.amount() >> 16) as u8, + (crc.amount() >> 24) as u8, + ]; + Ok(copy(into, arr, &mut self.pos)) + } +} + +impl GzEncoder { + /// Acquires a reference to the underlying reader. + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying reader. + /// + /// Note that mutation of the reader may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Returns the underlying stream, consuming this encoder + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } +} + +impl Read for GzEncoder { + fn read(&mut self, mut into: &mut [u8]) -> io::Result { + let mut amt = 0; + if self.eof { + return self.read_footer(into); + } else if self.pos < self.header.len() { + amt += copy(into, &self.header, &mut self.pos); + if amt == into.len() { + return Ok(amt); + } + let tmp = into; + into = &mut tmp[amt..]; + } + match try!(self.inner.read(into)) { + 0 => { + self.eof = true; + self.pos = 0; + self.read_footer(into) + } + n => Ok(amt + n), + } + } +} + +impl Write for GzEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + + +/// A gzip streaming decoder +/// +/// This structure exposes a [`ReadBuf`] interface that will consume compressed +/// data from the underlying reader and emit uncompressed data. +/// +/// [`ReadBuf`]: https://doc.rust-lang.org/std/io/trait.BufRead.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::GzEncoder; +/// use flate2::bufread::GzDecoder; +/// +/// # fn main() { +/// # let mut e = GzEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_reader(bytes).unwrap()); +/// # } +/// # +/// // Uncompresses a Gz Encoded vector of bytes and returns a string or error +/// // Here &[u8] implements BufRead +/// +/// fn decode_reader(bytes: Vec) -> io::Result { +/// let mut gz = GzDecoder::new(&bytes[..])?; +/// let mut s = String::new(); +/// gz.read_to_string(&mut s)?; +/// Ok(s) +/// } +/// ``` +#[derive(Debug)] +pub struct GzDecoder { + inner: CrcReader>, + header: Header, + finished: bool, +} + + +impl GzDecoder { + /// Creates a new decoder from the given reader, immediately parsing the + /// gzip header. + /// + /// # Errors + /// + /// If an error is encountered when parsing the gzip header, an error is + /// returned. + pub fn new(mut r: R) -> io::Result> { + let header = try!(read_gz_header(&mut r)); + + let flate = deflate::bufread::DeflateDecoder::new(r); + return Ok(GzDecoder { + inner: CrcReader::new(flate), + header: header, + finished: false, + }); + } + + fn finish(&mut self) -> io::Result<()> { + if self.finished { + return Ok(()); + } + let ref mut buf = [0u8; 8]; + { + let mut len = 0; + + while len < buf.len() { + match try!(self.inner.get_mut().get_mut().read(&mut buf[len..])) { + 0 => return Err(corrupt()), + n => len += n, + } + } + } + + let crc = ((buf[0] as u32) << 0) | ((buf[1] as u32) << 8) | ((buf[2] as u32) << 16) | + ((buf[3] as u32) << 24); + let amt = ((buf[4] as u32) << 0) | ((buf[5] as u32) << 8) | ((buf[6] as u32) << 16) | + ((buf[7] as u32) << 24); + if crc != self.inner.crc().sum() as u32 { + return Err(corrupt()); + } + if amt != self.inner.crc().amount() { + return Err(corrupt()); + } + self.finished = true; + Ok(()) + } +} + +impl GzDecoder { + /// Returns the header associated with this stream. + pub fn header(&self) -> &Header { + &self.header + } + + /// Acquires a reference to the underlying reader. + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream. + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } +} + +impl Read for GzDecoder { + fn read(&mut self, into: &mut [u8]) -> io::Result { + match try!(self.inner.read(into)) { + 0 => { + try!(self.finish()); + Ok(0) + } + n => Ok(n), + } + } +} + +impl Write for GzDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + + + +/// A gzip streaming decoder that decodes all members of a multistream +/// +/// A gzip member consists of a header, compressed data and a trailer. The [gzip +/// specification](https://tools.ietf.org/html/rfc1952), however, allows multiple +/// gzip members to be joined in a single stream. `MultiGzDecoder` will +/// decode all consecutive members while `GzDecoder` will only decompress +/// the first gzip member. The multistream format is commonly used in +/// bioinformatics, for example when using the BGZF compressed data. +/// +/// This structure exposes a [`BufRead`] interface that will consume all gzip members +/// from the underlying reader and emit uncompressed data. +/// +/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::GzEncoder; +/// use flate2::bufread::MultiGzDecoder; +/// +/// # fn main() { +/// # let mut e = GzEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_reader(bytes).unwrap()); +/// # } +/// # +/// // Uncompresses a Gz Encoded vector of bytes and returns a string or error +/// // Here &[u8] implements BufRead +/// +/// fn decode_reader(bytes: Vec) -> io::Result { +/// let mut gz = MultiGzDecoder::new(&bytes[..])?; +/// let mut s = String::new(); +/// gz.read_to_string(&mut s)?; +/// Ok(s) +/// } +/// ``` +#[derive(Debug)] +pub struct MultiGzDecoder { + inner: CrcReader>, + header: Header, + finished: bool, +} + + +impl MultiGzDecoder { + /// Creates a new decoder from the given reader, immediately parsing the + /// (first) gzip header. If the gzip stream contains multiple members all will + /// be decoded. + /// + /// # Errors + /// + /// If an error is encountered when parsing the gzip header, an error is + /// returned. + pub fn new(mut r: R) -> io::Result> { + let header = try!(read_gz_header(&mut r)); + + let flate = deflate::bufread::DeflateDecoder::new(r); + return Ok(MultiGzDecoder { + inner: CrcReader::new(flate), + header: header, + finished: false, + }); + } + + fn finish_member(&mut self) -> io::Result { + if self.finished { + return Ok(0); + } + let ref mut buf = [0u8; 8]; + { + let mut len = 0; + + while len < buf.len() { + match try!(self.inner.get_mut().get_mut().read(&mut buf[len..])) { + 0 => return Err(corrupt()), + n => len += n, + } + } + } + + let crc = ((buf[0] as u32) << 0) | ((buf[1] as u32) << 8) | ((buf[2] as u32) << 16) | + ((buf[3] as u32) << 24); + let amt = ((buf[4] as u32) << 0) | ((buf[5] as u32) << 8) | ((buf[6] as u32) << 16) | + ((buf[7] as u32) << 24); + if crc != self.inner.crc().sum() as u32 { + return Err(corrupt()); + } + if amt != self.inner.crc().amount() { + return Err(corrupt()); + } + let remaining = match self.inner.get_mut().get_mut().fill_buf() { + Ok(b) => if b.is_empty() { + self.finished = true; + return Ok(0); + } else { + b.len() + }, + Err(e) => return Err(e), + }; + + let next_header = try!(read_gz_header(self.inner.get_mut().get_mut())); + mem::replace(&mut self.header, next_header); + self.inner.reset(); + self.inner.get_mut().reset_data(); + + Ok(remaining) + } +} + +impl MultiGzDecoder { + /// Returns the current header associated with this stream. + pub fn header(&self) -> &Header { + &self.header + } + + /// Acquires a reference to the underlying reader. + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream. + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } +} + +impl Read for MultiGzDecoder { + fn read(&mut self, into: &mut [u8]) -> io::Result { + match try!(self.inner.read(into)) { + 0 => match self.finish_member() { + Ok(0) => Ok(0), + Ok(_) => self.read(into), + Err(e) => Err(e), + }, + n => Ok(n), + } + } +} + +impl Write for MultiGzDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} diff --git a/src/vendor/flate2/src/gz/mod.rs b/src/vendor/flate2/src/gz/mod.rs new file mode 100644 index 0000000000..04ff5fcf07 --- /dev/null +++ b/src/vendor/flate2/src/gz/mod.rs @@ -0,0 +1,344 @@ +use std::env; +use std::ffi::CString; +use std::io::prelude::*; +use std::time; + +use Compression; +use bufreader::BufReader; + +pub static FHCRC: u8 = 1 << 1; +pub static FEXTRA: u8 = 1 << 2; +pub static FNAME: u8 = 1 << 3; +pub static FCOMMENT: u8 = 1 << 4; + +pub mod bufread; +pub mod read; +pub mod write; + + +/// A structure representing the header of a gzip stream. +/// +/// The header can contain metadata about the file that was compressed, if +/// present. +#[derive(PartialEq, Debug)] +pub struct Header { + extra: Option>, + filename: Option>, + comment: Option>, + mtime: u32, +} + +impl Header { + /// Returns the `filename` field of this gzip stream's header, if present. + pub fn filename(&self) -> Option<&[u8]> { + self.filename.as_ref().map(|s| &s[..]) + } + + /// Returns the `extra` field of this gzip stream's header, if present. + pub fn extra(&self) -> Option<&[u8]> { + self.extra.as_ref().map(|s| &s[..]) + } + + /// Returns the `comment` field of this gzip stream's header, if present. + pub fn comment(&self) -> Option<&[u8]> { + self.comment.as_ref().map(|s| &s[..]) + } + + /// This gives the most recent modification time of the original file being compressed. + /// + /// The time is in Unix format, i.e., seconds since 00:00:00 GMT, Jan. 1, 1970. + /// (Note that this may cause problems for MS-DOS and other systems that use local + /// rather than Universal time.) If the compressed data did not come from a file, + /// `mtime` is set to the time at which compression started. + /// `mtime` = 0 means no time stamp is available. + /// + /// The usage of `mtime` is discouraged because of Year 2038 problem. + pub fn mtime(&self) -> u32 { + self.mtime + } + + /// Returns the most recent modification time represented by a date-time type. + /// Returns `None` if the value of the underlying counter is 0, + /// indicating no time stamp is available. + /// + /// + /// The time is measured as seconds since 00:00:00 GMT, Jan. 1 1970. + /// See [`mtime`](#method.mtime) for more detail. + pub fn mtime_as_datetime(&self) -> Option { + if self.mtime == 0 { + None + } else { + let duration = time::Duration::new(u64::from(self.mtime), 0); + let datetime = time::UNIX_EPOCH + duration; + Some(datetime) + } + } +} + +/// A builder structure to create a new gzip Encoder. +/// +/// This structure controls header configuration options such as the filename. +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// # use std::io; +/// use std::fs::File; +/// use flate2::GzBuilder; +/// use flate2::Compression; +/// +/// // GzBuilder opens a file and writes a sample string using Builder pattern +/// +/// # fn sample_builder() -> Result<(), io::Error> { +/// let f = File::create("examples/hello_world.gz")?; +/// let mut gz = GzBuilder::new() +/// .filename("hello_world.txt") +/// .comment("test file, please delete") +/// .write(f, Compression::Default); +/// gz.write(b"hello world")?; +/// gz.finish()?; +/// # Ok(()) +/// # } +/// ``` +#[derive(Debug)] +pub struct Builder { + extra: Option>, + filename: Option, + comment: Option, + mtime: u32, +} + +impl Builder { + /// Create a new blank builder with no header by default. + pub fn new() -> Builder { + Builder { + extra: None, + filename: None, + comment: None, + mtime: 0, + } + } + + /// Configure the `mtime` field in the gzip header. + pub fn mtime(mut self, mtime: u32) -> Builder { + self.mtime = mtime; + self + } + + /// Configure the `extra` field in the gzip header. + pub fn extra>>(mut self, extra: T) -> Builder { + self.extra = Some(extra.into()); + self + } + + /// Configure the `filename` field in the gzip header. + /// + /// # Panics + /// + /// Panics if the `filename` slice contains a zero. + pub fn filename>>(mut self, filename: T) -> Builder { + self.filename = Some(CString::new(filename.into()).unwrap()); + self + } + + /// Configure the `comment` field in the gzip header. + /// + /// # Panics + /// + /// Panics if the `comment` slice contains a zero. + pub fn comment>>(mut self, comment: T) -> Builder { + self.comment = Some(CString::new(comment.into()).unwrap()); + self + } + + /// Consume this builder, creating a writer encoder in the process. + /// + /// The data written to the returned encoder will be compressed and then + /// written out to the supplied parameter `w`. + pub fn write(self, w: W, lvl: Compression) -> write::GzEncoder { + write::gz_encoder(self.into_header(lvl), w, lvl) + } + + /// Consume this builder, creating a reader encoder in the process. + /// + /// Data read from the returned encoder will be the compressed version of + /// the data read from the given reader. + pub fn read(self, r: R, lvl: Compression) -> read::GzEncoder { + read::gz_encoder(self.buf_read(BufReader::new(r), lvl)) + } + + /// Consume this builder, creating a reader encoder in the process. + /// + /// Data read from the returned encoder will be the compressed version of + /// the data read from the given reader. + pub fn buf_read(self, r: R, lvl: Compression) -> bufread::GzEncoder + where + R: BufRead, + { + bufread::gz_encoder(self.into_header(lvl), r, lvl) + } + + fn into_header(self, lvl: Compression) -> Vec { + let Builder { + extra, + filename, + comment, + mtime, + } = self; + let mut flg = 0; + let mut header = vec![0u8; 10]; + match extra { + Some(v) => { + flg |= FEXTRA; + header.push((v.len() >> 0) as u8); + header.push((v.len() >> 8) as u8); + header.extend(v); + } + None => {} + } + match filename { + Some(filename) => { + flg |= FNAME; + header.extend(filename.as_bytes_with_nul().iter().map(|x| *x)); + } + None => {} + } + match comment { + Some(comment) => { + flg |= FCOMMENT; + header.extend(comment.as_bytes_with_nul().iter().map(|x| *x)); + } + None => {} + } + header[0] = 0x1f; + header[1] = 0x8b; + header[2] = 8; + header[3] = flg; + header[4] = (mtime >> 0) as u8; + header[5] = (mtime >> 8) as u8; + header[6] = (mtime >> 16) as u8; + header[7] = (mtime >> 24) as u8; + header[8] = match lvl { + Compression::Best => 2, + Compression::Fast => 4, + _ => 0, + }; + + // Typically this byte indicates what OS the gz stream was created on, + // but in an effort to have cross-platform reproducible streams just + // always set this to 255. I'm not sure that if we "correctly" set this + // it'd do anything anyway... + header[9] = 255; + return header; + } +} + +#[cfg(test)] +mod tests { + use std::io::prelude::*; + + use super::{read, write, Builder}; + use Compression::Default; + use rand::{thread_rng, Rng}; + + #[test] + fn roundtrip() { + let mut e = write::GzEncoder::new(Vec::new(), Default); + e.write_all(b"foo bar baz").unwrap(); + let inner = e.finish().unwrap(); + let mut d = read::GzDecoder::new(&inner[..]).unwrap(); + let mut s = String::new(); + d.read_to_string(&mut s).unwrap(); + assert_eq!(s, "foo bar baz"); + } + + #[test] + fn roundtrip_zero() { + let e = write::GzEncoder::new(Vec::new(), Default); + let inner = e.finish().unwrap(); + let mut d = read::GzDecoder::new(&inner[..]).unwrap(); + let mut s = String::new(); + d.read_to_string(&mut s).unwrap(); + assert_eq!(s, ""); + } + + #[test] + fn roundtrip_big() { + let mut real = Vec::new(); + let mut w = write::GzEncoder::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let result = w.finish().unwrap(); + let mut r = read::GzDecoder::new(&result[..]).unwrap(); + let mut v = Vec::new(); + r.read_to_end(&mut v).unwrap(); + assert!(v == real); + } + + #[test] + fn roundtrip_big2() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut r = read::GzDecoder::new(read::GzEncoder::new(&v[..], Default)).unwrap(); + let mut res = Vec::new(); + r.read_to_end(&mut res).unwrap(); + assert!(res == v); + } + + #[test] + fn fields() { + let r = vec![0, 2, 4, 6]; + let e = Builder::new() + .filename("foo.rs") + .comment("bar") + .extra(vec![0, 1, 2, 3]) + .read(&r[..], Default); + let mut d = read::GzDecoder::new(e).unwrap(); + assert_eq!(d.header().filename(), Some(&b"foo.rs"[..])); + assert_eq!(d.header().comment(), Some(&b"bar"[..])); + assert_eq!(d.header().extra(), Some(&b"\x00\x01\x02\x03"[..])); + let mut res = Vec::new(); + d.read_to_end(&mut res).unwrap(); + assert_eq!(res, vec![0, 2, 4, 6]); + } + + #[test] + fn keep_reading_after_end() { + let mut e = write::GzEncoder::new(Vec::new(), Default); + e.write_all(b"foo bar baz").unwrap(); + let inner = e.finish().unwrap(); + let mut d = read::GzDecoder::new(&inner[..]).unwrap(); + let mut s = String::new(); + d.read_to_string(&mut s).unwrap(); + assert_eq!(s, "foo bar baz"); + d.read_to_string(&mut s).unwrap(); + assert_eq!(s, "foo bar baz"); + } + + #[test] + fn qc_reader() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let r = read::GzEncoder::new(&v[..], Default); + let mut r = read::GzDecoder::new(r).unwrap(); + let mut v2 = Vec::new(); + r.read_to_end(&mut v2).unwrap(); + v == v2 + } + } + + #[test] + fn flush_after_write() { + let mut f = write::GzEncoder::new(Vec::new(), Default); + write!(f, "Hello world").unwrap(); + f.flush().unwrap(); + } +} diff --git a/src/vendor/flate2/src/gz/read.rs b/src/vendor/flate2/src/gz/read.rs new file mode 100644 index 0000000000..c95e388a5f --- /dev/null +++ b/src/vendor/flate2/src/gz/read.rs @@ -0,0 +1,286 @@ +use std::io::prelude::*; +use std::io; + +use super::{Builder, Header}; +use Compression; +use bufreader::BufReader; +use super::bufread; + +/// A gzip streaming encoder +/// +/// This structure exposes a [`Read`] interface that will read uncompressed data +/// from the underlying reader and expose the compressed version as a [`Read`] +/// interface. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// use flate2::Compression; +/// use flate2::read::GzEncoder; +/// +/// // Return a vector containing the GZ compressed version of hello world +/// +/// fn gzencode_hello_world() -> io::Result> { +/// let mut ret_vec = [0;100]; +/// let bytestring = b"hello world"; +/// let mut gz = GzEncoder::new(&bytestring[..], Compression::Fast); +/// let count = gz.read(&mut ret_vec)?; +/// Ok(ret_vec[0..count].to_vec()) +/// } +/// ``` +#[derive(Debug)] +pub struct GzEncoder { + inner: bufread::GzEncoder>, +} + +pub fn gz_encoder(inner: bufread::GzEncoder>) + -> GzEncoder +{ + GzEncoder { inner: inner } +} + +impl GzEncoder { + /// Creates a new encoder which will use the given compression level. + /// + /// The encoder is not configured specially for the emitted header. For + /// header configuration, see the `Builder` type. + /// + /// The data read from the stream `r` will be compressed and available + /// through the returned reader. + pub fn new(r: R, level: Compression) -> GzEncoder { + Builder::new().read(r, level) + } +} + +impl GzEncoder { + /// Acquires a reference to the underlying reader. + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying reader. + /// + /// Note that mutation of the reader may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Returns the underlying stream, consuming this encoder + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } +} + +impl Read for GzEncoder { + fn read(&mut self, into: &mut [u8]) -> io::Result { + self.inner.read(into) + } +} + +impl Write for GzEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +/// A gzip streaming decoder +/// +/// This structure exposes a [`Read`] interface that will consume compressed +/// data from the underlying reader and emit uncompressed data. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// +/// # Examples +/// +/// ``` +/// +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::GzEncoder; +/// use flate2::read::GzDecoder; +/// +/// # fn main() { +/// # let mut e = GzEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_reader(bytes).unwrap()); +/// # } +/// # +/// // Uncompresses a Gz Encoded vector of bytes and returns a string or error +/// // Here &[u8] implements Read +/// +/// fn decode_reader(bytes: Vec) -> io::Result { +/// let mut gz = GzDecoder::new(&bytes[..])?; +/// let mut s = String::new(); +/// gz.read_to_string(&mut s)?; +/// Ok(s) +/// } +/// ``` +#[derive(Debug)] +pub struct GzDecoder { + inner: bufread::GzDecoder>, +} + +impl GzDecoder { + /// Creates a new decoder from the given reader, immediately parsing the + /// gzip header. + /// + /// # Errors + /// + /// If an error is encountered when parsing the gzip header, an error is + /// returned. + pub fn new(r: R) -> io::Result> { + bufread::GzDecoder::new(BufReader::new(r)).map(|r| GzDecoder { inner: r }) + } +} + +impl GzDecoder { + /// Returns the header associated with this stream. + pub fn header(&self) -> &Header { + self.inner.header() + } + + /// Acquires a reference to the underlying reader. + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream. + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } +} + +impl Read for GzDecoder { + fn read(&mut self, into: &mut [u8]) -> io::Result { + self.inner.read(into) + } +} + +impl Write for GzDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +/// A gzip streaming decoder that decodes all members of a multistream +/// +/// A gzip member consists of a header, compressed data and a trailer. The [gzip +/// specification](https://tools.ietf.org/html/rfc1952), however, allows multiple +/// gzip members to be joined in a single stream. `MultiGzDecoder` will +/// decode all consecutive members while `GzDecoder` will only decompress the +/// first gzip member. The multistream format is commonly used in bioinformatics, +/// for example when using the BGZF compressed data. +/// +/// This structure exposes a [`Read`] interface that will consume all gzip members +/// from the underlying reader and emit uncompressed data. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::GzEncoder; +/// use flate2::read::MultiGzDecoder; +/// +/// # fn main() { +/// # let mut e = GzEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_reader(bytes).unwrap()); +/// # } +/// # +/// // Uncompresses a Gz Encoded vector of bytes and returns a string or error +/// // Here &[u8] implements Read +/// +/// fn decode_reader(bytes: Vec) -> io::Result { +/// let mut gz = MultiGzDecoder::new(&bytes[..])?; +/// let mut s = String::new(); +/// gz.read_to_string(&mut s)?; +/// Ok(s) +/// } +/// ``` +#[derive(Debug)] +pub struct MultiGzDecoder { + inner: bufread::MultiGzDecoder>, +} + +impl MultiGzDecoder { + /// Creates a new decoder from the given reader, immediately parsing the + /// (first) gzip header. If the gzip stream contains multiple members all will + /// be decoded. + /// + /// # Errors + /// + /// If an error is encountered when parsing the gzip header, an error is + /// returned. + pub fn new(r: R) -> io::Result> { + bufread::MultiGzDecoder::new(BufReader::new(r)).map(|r| MultiGzDecoder { inner: r }) + } +} + +impl MultiGzDecoder { + /// Returns the current header associated with this stream. + pub fn header(&self) -> &Header { + self.inner.header() + } + + /// Acquires a reference to the underlying reader. + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream. + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } +} + +impl Read for MultiGzDecoder { + fn read(&mut self, into: &mut [u8]) -> io::Result { + self.inner.read(into) + } +} + +impl Write for MultiGzDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} diff --git a/src/vendor/flate2/src/gz/write.rs b/src/vendor/flate2/src/gz/write.rs new file mode 100644 index 0000000000..a9a10569a1 --- /dev/null +++ b/src/vendor/flate2/src/gz/write.rs @@ -0,0 +1,182 @@ +use std::io::prelude::*; +use std::io; + +#[cfg(feature = "tokio")] +use futures::Poll; +#[cfg(feature = "tokio")] +use tokio_io::{AsyncRead, AsyncWrite}; + +use super::Builder; +use {Compress, Compression}; +use crc::Crc; +use zio; + +/// A gzip streaming encoder +/// +/// This structure exposes a [`Write`] interface that will emit compressed data +/// to the underlying writer `W`. +/// +/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use flate2::Compression; +/// use flate2::write::GzEncoder; +/// +/// // Vec implements Write to print the compressed bytes of sample string +/// # fn main() { +/// +/// let mut e = GzEncoder::new(Vec::new(), Compression::Default); +/// e.write(b"Hello World").unwrap(); +/// println!("{:?}", e.finish().unwrap()); +/// # } +/// ``` +#[derive(Debug)] +pub struct GzEncoder { + inner: zio::Writer, + crc: Crc, + crc_bytes_written: usize, + header: Vec, +} + +pub fn gz_encoder(header: Vec, w: W, lvl: Compression) -> GzEncoder { + GzEncoder { + inner: zio::Writer::new(w, Compress::new(lvl, false)), + crc: Crc::new(), + header: header, + crc_bytes_written: 0, + } +} + +impl GzEncoder { + /// Creates a new encoder which will use the given compression level. + /// + /// The encoder is not configured specially for the emitted header. For + /// header configuration, see the `Builder` type. + /// + /// The data written to the returned encoder will be compressed and then + /// written to the stream `w`. + pub fn new(w: W, level: Compression) -> GzEncoder { + Builder::new().write(w, level) + } + + /// Acquires a reference to the underlying writer. + pub fn get_ref(&self) -> &W { + self.inner.get_ref() + } + + /// Acquires a mutable reference to the underlying writer. + /// + /// Note that mutation of the writer may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut W { + self.inner.get_mut() + } + + /// Attempt to finish this output stream, writing out final chunks of data. + /// + /// Note that this function can only be used once data has finished being + /// written to the output stream. After this function is called then further + /// calls to `write` may result in a panic. + /// + /// # Panics + /// + /// Attempts to write data to this stream may result in a panic after this + /// function is called. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn try_finish(&mut self) -> io::Result<()> { + try!(self.write_header()); + try!(self.inner.finish()); + + while self.crc_bytes_written < 8 { + let (sum, amt) = (self.crc.sum() as u32, self.crc.amount()); + let buf = [ + (sum >> 0) as u8, + (sum >> 8) as u8, + (sum >> 16) as u8, + (sum >> 24) as u8, + (amt >> 0) as u8, + (amt >> 8) as u8, + (amt >> 16) as u8, + (amt >> 24) as u8, + ]; + let inner = self.inner.get_mut(); + let n = try!(inner.write(&buf[self.crc_bytes_written..])); + self.crc_bytes_written += n; + } + Ok(()) + } + + /// Finish encoding this stream, returning the underlying writer once the + /// encoding is done. + /// + /// Note that this function may not be suitable to call in a situation where + /// the underlying stream is an asynchronous I/O stream. To finish a stream + /// the `try_finish` (or `shutdown`) method should be used instead. To + /// re-acquire ownership of a stream it is safe to call this method after + /// `try_finish` or `shutdown` has returned `Ok`. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn finish(mut self) -> io::Result { + try!(self.try_finish()); + Ok(self.inner.take_inner()) + } + + fn write_header(&mut self) -> io::Result<()> { + while self.header.len() > 0 { + let n = try!(self.inner.get_mut().write(&self.header)); + self.header.drain(..n); + } + Ok(()) + } +} + +impl Write for GzEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + assert_eq!(self.crc_bytes_written, 0); + try!(self.write_header()); + let n = try!(self.inner.write(buf)); + self.crc.update(&buf[..n]); + Ok(n) + } + + fn flush(&mut self) -> io::Result<()> { + assert_eq!(self.crc_bytes_written, 0); + try!(self.write_header()); + self.inner.flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for GzEncoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + try_nb!(self.try_finish()); + self.get_mut().shutdown() + } +} + +impl Read for GzEncoder { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.get_mut().read(buf) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for GzEncoder {} + +impl Drop for GzEncoder { + fn drop(&mut self) { + if self.inner.is_present() { + let _ = self.try_finish(); + } + } +} diff --git a/src/vendor/flate2/src/lib.rs b/src/vendor/flate2/src/lib.rs index 25decfedd9..3846913b9f 100644 --- a/src/vendor/flate2/src/lib.rs +++ b/src/vendor/flate2/src/lib.rs @@ -4,68 +4,121 @@ //! libflate library by providing a streaming encoder/decoder rather than purely //! an in-memory encoder/decoder. //! -//! Like with libflate, flate2 is based on [`miniz.c`][1] +//! Like with [`libflate`], flate2 is based on [`miniz.c`][1] //! //! [1]: https://code.google.com/p/miniz/ +//! [`libflate`]: https://docs.rs/crate/libflate/ //! //! # Organization //! -//! This crate consists mainly of two modules, `read` and `write`. Each +//! This crate consists mainly of two modules, [`read`] and [`write`]. Each //! module contains a number of types used to encode and decode various streams -//! of data. All types in the `write` module work on instances of `Write`, -//! whereas all types in the `read` module work on instances of `Read`. +//! of data. All types in the [`write`] module work on instances of [`Write`], +//! whereas all types in the [`read`] module work on instances of [`Read`]. +//! +//! ``` +//! use flate2::write::GzEncoder; +//! use flate2::Compression; +//! use std::io; +//! use std::io::prelude::*; +//! +//! # fn main() { let _ = run(); } +//! # fn run() -> io::Result<()> { +//! let mut encoder = GzEncoder::new(Vec::new(), Compression::Default); +//! encoder.write(b"Example")?; +//! # Ok(()) +//! # } +//! ``` +//! //! //! Other various types are provided at the top-level of the crate for //! management and dealing with encoders/decoders. //! +//! [`read`]: read/index.html +//! [`write`]: write/index.html +//! [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +//! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +//! //! # Helper traits //! -//! There are two helper traits provided: `FlateReader` and `FlateWriter`. +//! There are two helper traits provided: [`FlateReadExt`] and [`FlateWriteExt`]. //! These provide convenience methods for creating a decoder/encoder out of an //! already existing stream to chain construction. //! +//! [`FlateReadExt`]: trait.FlateReadExt.html +//! [`FlateWriteExt`]: trait.FlateWriteExt.html +//! +//! ``` +//! use flate2::{FlateReadExt, Compression}; +//! use std::io::prelude::*; +//! use std::io; +//! use std::fs::File; +//! +//! # fn main() { +//! # println!("{}", run().unwrap()); +//! # } +//! # +//! // Read contents of file with a compression stream, then decompress with GZ +//! +//! # fn run() -> io::Result { +//! let f = File::open("examples/hello_world.txt")?; +//! +//! //gz_encode method comes from FlateReadExt and applies to a std::fs::File +//! let data = f.gz_encode(Compression::Default); +//! let mut buffer = String::new(); +//! +//! //gz_decode method comes from FlateReadExt and applies to a &[u8] +//! &data.gz_decode()?.read_to_string(&mut buffer)?; +//! # Ok(buffer) +//! # } +//! ``` +//! //! # Async I/O //! -//! This crate optionally can support async I/O streams with the Tokio stack via +//! This crate optionally can support async I/O streams with the [Tokio stack] via //! the `tokio` feature of this crate: //! +//! [Tokio stack]: https://tokio.rs/ +//! //! ```toml //! flate2 = { version = "0.2", features = ["tokio"] } //! ``` //! //! All methods are internally capable of working with streams that may return -//! `ErrorKind::WouldBlock` when they're not ready to perform the particular +//! [`ErrorKind::WouldBlock`] when they're not ready to perform the particular //! operation. //! +//! [`ErrorKind::WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html +//! //! Note that care needs to be taken when using these objects, however. The //! Tokio runtime, in particular, requires that data is fully flushed before //! dropping streams. For compatibility with blocking streams all streams are //! flushed/written when they are dropped, and this is not always a suitable //! time to perform I/O. If I/O streams are flushed before drop, however, then //! these operations will be a noop. - #![doc(html_root_url = "https://docs.rs/flate2/0.2")] #![deny(missing_docs)] +#![deny(missing_debug_implementations)] #![allow(trivial_numeric_casts)] #![cfg_attr(test, deny(warnings))] +#[cfg(feature = "tokio")] +extern crate futures; extern crate libc; #[cfg(test)] -extern crate rand; -#[cfg(test)] extern crate quickcheck; +#[cfg(test)] +extern crate rand; #[cfg(feature = "tokio")] #[macro_use] extern crate tokio_io; -#[cfg(feature = "tokio")] -extern crate futures; use std::io::prelude::*; use std::io; pub use gz::Builder as GzBuilder; pub use gz::Header as GzHeader; -pub use mem::{Compress, Decompress, DataError, Status, Flush}; +pub use mem::{Compress, DataError, Decompress, Flush, Status}; pub use crc::{Crc, CrcReader}; mod bufreader; @@ -77,38 +130,44 @@ mod zio; mod mem; mod zlib; -/// Types which operate over `Read` streams, both encoders and decoders for +/// Types which operate over [`Read`] streams, both encoders and decoders for /// various formats. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html pub mod read { - pub use deflate::EncoderReader as DeflateEncoder; - pub use deflate::DecoderReader as DeflateDecoder; - pub use zlib::EncoderReader as ZlibEncoder; - pub use zlib::DecoderReader as ZlibDecoder; - pub use gz::EncoderReader as GzEncoder; - pub use gz::DecoderReader as GzDecoder; - pub use gz::MultiDecoderReader as MultiGzDecoder; + pub use deflate::read::DeflateEncoder; + pub use deflate::read::DeflateDecoder; + pub use zlib::read::ZlibEncoder; + pub use zlib::read::ZlibDecoder; + pub use gz::read::GzEncoder; + pub use gz::read::GzDecoder; + pub use gz::read::MultiGzDecoder; } -/// Types which operate over `Write` streams, both encoders and decoders for +/// Types which operate over [`Write`] streams, both encoders and decoders for /// various formats. +/// +/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html pub mod write { - pub use deflate::EncoderWriter as DeflateEncoder; - pub use deflate::DecoderWriter as DeflateDecoder; - pub use zlib::EncoderWriter as ZlibEncoder; - pub use zlib::DecoderWriter as ZlibDecoder; - pub use gz::EncoderWriter as GzEncoder; + pub use deflate::write::DeflateEncoder; + pub use deflate::write::DeflateDecoder; + pub use zlib::write::ZlibEncoder; + pub use zlib::write::ZlibDecoder; + pub use gz::write::GzEncoder; } -/// Types which operate over `BufRead` streams, both encoders and decoders for +/// Types which operate over [`BufRead`] streams, both encoders and decoders for /// various formats. +/// +/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html pub mod bufread { - pub use deflate::EncoderReaderBuf as DeflateEncoder; - pub use deflate::DecoderReaderBuf as DeflateDecoder; - pub use zlib::EncoderReaderBuf as ZlibEncoder; - pub use zlib::DecoderReaderBuf as ZlibDecoder; - pub use gz::EncoderReaderBuf as GzEncoder; - pub use gz::DecoderReaderBuf as GzDecoder; - pub use gz::MultiDecoderReaderBuf as MultiGzDecoder; + pub use deflate::bufread::DeflateEncoder; + pub use deflate::bufread::DeflateDecoder; + pub use zlib::bufread::ZlibEncoder; + pub use zlib::bufread::ZlibDecoder; + pub use gz::bufread::GzEncoder; + pub use gz::bufread::GzDecoder; + pub use gz::bufread::MultiGzDecoder; } fn _assert_send_sync() { @@ -130,7 +189,7 @@ fn _assert_send_sync() { /// When compressing data, the compression level can be specified by a value in /// this enum. -#[derive(Copy, Clone)] +#[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Compression { /// No compression is to be performed, this may actually inflate data /// slightly when encoding. @@ -143,6 +202,13 @@ pub enum Compression { Default = 6, } +/// Default to Compression::Default. +impl Default for Compression { + fn default() -> Compression { + Compression::Default + } +} + /// A helper trait to create encoder/decoders with method syntax. pub trait FlateReadExt: Read + Sized { /// Consume this reader to create a compression stream at the specified @@ -222,21 +288,21 @@ impl FlateWriteExt for T {} #[cfg(test)] mod test { use std::io::prelude::*; - use {FlateReadExt, Compression}; + use {Compression, FlateReadExt}; #[test] fn crazy() { let rdr = &mut b"foobar"; let mut res = Vec::new(); rdr.gz_encode(Compression::Default) - .deflate_encode(Compression::Default) - .zlib_encode(Compression::Default) - .zlib_decode() - .deflate_decode() - .gz_decode() - .unwrap() - .read_to_end(&mut res) - .unwrap(); + .deflate_encode(Compression::Default) + .zlib_encode(Compression::Default) + .zlib_decode() + .deflate_decode() + .gz_decode() + .unwrap() + .read_to_end(&mut res) + .unwrap(); assert_eq!(res, b"foobar"); } } diff --git a/src/vendor/flate2/src/mem.rs b/src/vendor/flate2/src/mem.rs index 4f8b5e39ba..3eaf36abe7 100644 --- a/src/vendor/flate2/src/mem.rs +++ b/src/vendor/flate2/src/mem.rs @@ -12,12 +12,16 @@ use ffi; /// Raw in-memory compression stream for blocks of data. /// /// This type is the building block for the I/O streams in the rest of this -/// crate. It requires more management than the `Read`/`Write` API but is +/// crate. It requires more management than the [`Read`]/[`Write`] API but is /// maximally flexible in terms of accepting input from any source and being /// able to produce output to any memory location. /// /// It is recommended to use the I/O stream adaptors over this type as they're /// easier to use. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +#[derive(Debug)] pub struct Compress { inner: Stream, } @@ -25,16 +29,21 @@ pub struct Compress { /// Raw in-memory decompression stream for blocks of data. /// /// This type is the building block for the I/O streams in the rest of this -/// crate. It requires more management than the `Read`/`Write` API but is +/// crate. It requires more management than the [`Read`]/[`Write`] API but is /// maximally flexible in terms of accepting input from any source and being /// able to produce output to any memory location. /// /// It is recommended to use the I/O stream adaptors over this type as they're /// easier to use. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +#[derive(Debug)] pub struct Decompress { inner: Stream, } +#[derive(Debug)] struct Stream { stream_wrapper: ffi::StreamWrapper, total_in: u64, @@ -49,11 +58,14 @@ trait Direction { unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int; } +#[derive(Debug)] enum DirCompress {} +#[derive(Debug)] enum DirDecompress {} /// Values which indicate the form of flushing to be used when compressing or /// decompressing in-memory data. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Flush { /// A typical parameter for passing to compression/decompression functions, /// this indicates that the underlying stream to decide how much data to @@ -112,6 +124,7 @@ pub struct DataError(()); /// Possible status results of compressing some data or successfully /// decompressing a block of data. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Status { /// Indicates success. /// @@ -308,6 +321,12 @@ impl Decompress { /// /// To learn how much data was consumed or how much output was produced, use /// the `total_in` and `total_out` functions before/after this is called. + /// + /// # Errors + /// + /// If the input data to this instance of `Decompress` is not a valid + /// zlib/deflate stream then this function may return an instance of + /// `DataError` to indicate that the stream of input bytes is corrupted. pub fn decompress(&mut self, input: &[u8], output: &mut [u8], @@ -346,6 +365,12 @@ impl Decompress { /// the vector provided or attempt to grow it, so space for the output must /// be reserved in the output vector by the caller before calling this /// function. + /// + /// # Errors + /// + /// If the input data to this instance of `Decompress` is not a valid + /// zlib/deflate stream then this function may return an instance of + /// `DataError` to indicate that the stream of input bytes is corrupted. pub fn decompress_vec(&mut self, input: &[u8], output: &mut Vec, diff --git a/src/vendor/flate2/src/zio.rs b/src/vendor/flate2/src/zio.rs index efb49f9c8e..358ffb3262 100644 --- a/src/vendor/flate2/src/zio.rs +++ b/src/vendor/flate2/src/zio.rs @@ -4,6 +4,7 @@ use std::mem; use {Decompress, Compress, Status, Flush, DataError}; +#[derive(Debug)] pub struct Writer { obj: Option, pub data: D, @@ -132,6 +133,9 @@ impl Writer { // a deque-like strategy. while self.buf.len() > 0 { let n = try!(self.obj.as_mut().unwrap().write(&self.buf)); + if n == 0 { + return Err(io::ErrorKind::WriteZero.into()) + } self.buf.drain(..n); } Ok(()) diff --git a/src/vendor/flate2/src/zlib.rs b/src/vendor/flate2/src/zlib.rs deleted file mode 100644 index 8babef74d0..0000000000 --- a/src/vendor/flate2/src/zlib.rs +++ /dev/null @@ -1,810 +0,0 @@ -//! ZLIB compression and decompression of streams - -use std::io::prelude::*; -use std::io; -use std::mem; - -#[cfg(feature = "tokio")] -use futures::Poll; -#[cfg(feature = "tokio")] -use tokio_io::{AsyncRead, AsyncWrite}; - -use bufreader::BufReader; -use zio; -use {Compress, Decompress}; - -/// A ZLIB encoder, or compressor. -/// -/// This structure implements a `Write` interface and takes a stream of -/// uncompressed data, writing the compressed data to the wrapped writer. -pub struct EncoderWriter { - inner: zio::Writer, -} - -/// A ZLIB encoder, or compressor. -/// -/// This structure implements a `Read` interface and will read uncompressed -/// data from an underlying stream and emit a stream of compressed data. -pub struct EncoderReader { - inner: EncoderReaderBuf>, -} - -/// A ZLIB encoder, or compressor. -/// -/// This structure implements a `BufRead` interface and will read uncompressed -/// data from an underlying stream and emit a stream of compressed data. -pub struct EncoderReaderBuf { - obj: R, - data: Compress, -} - -/// A ZLIB decoder, or decompressor. -/// -/// This structure implements a `Read` interface and takes a stream of -/// compressed data as input, providing the decompressed data when read from. -pub struct DecoderReader { - inner: DecoderReaderBuf>, -} - -/// A ZLIB decoder, or decompressor. -/// -/// This structure implements a `BufRead` interface and takes a stream of -/// compressed data as input, providing the decompressed data when read from. -pub struct DecoderReaderBuf { - obj: R, - data: Decompress, -} - -/// A ZLIB decoder, or decompressor. -/// -/// This structure implements a `Write` and will emit a stream of decompressed -/// data when fed a stream of compressed data. -pub struct DecoderWriter { - inner: zio::Writer, -} - -impl EncoderWriter { - /// Creates a new encoder which will write compressed data to the stream - /// given at the given compression level. - /// - /// When this encoder is dropped or unwrapped the final pieces of data will - /// be flushed. - pub fn new(w: W, level: ::Compression) -> EncoderWriter { - EncoderWriter { - inner: zio::Writer::new(w, Compress::new(level, true)), - } - } - - /// Acquires a reference to the underlying writer. - pub fn get_ref(&self) -> &W { - self.inner.get_ref() - } - - /// Acquires a mutable reference to the underlying writer. - /// - /// Note that mutating the output/input state of the stream may corrupt this - /// object, so care must be taken when using this method. - pub fn get_mut(&mut self) -> &mut W { - self.inner.get_mut() - } - - /// Resets the state of this encoder entirely, swapping out the output - /// stream for another. - /// - /// This function will finish encoding the current stream into the current - /// output stream before swapping out the two output streams. If the stream - /// cannot be finished an error is returned. - /// - /// After the current stream has been finished, this will reset the internal - /// state of this encoder and replace the output stream with the one - /// provided, returning the previous output stream. Future data written to - /// this encoder will be the compressed into the stream `w` provided. - pub fn reset(&mut self, w: W) -> io::Result { - try!(self.inner.finish()); - self.inner.data.reset(); - Ok(self.inner.replace(w)) - } - - /// Attempt to finish this output stream, writing out final chunks of data. - /// - /// Note that this function can only be used once data has finished being - /// written to the output stream. After this function is called then further - /// calls to `write` may result in a panic. - /// - /// # Panics - /// - /// Attempts to write data to this stream may result in a panic after this - /// function is called. - pub fn try_finish(&mut self) -> io::Result<()> { - self.inner.finish() - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream, close off the compressed - /// stream and, if successful, return the contained writer. - /// - /// Note that this function may not be suitable to call in a situation where - /// the underlying stream is an asynchronous I/O stream. To finish a stream - /// the `try_finish` (or `shutdown`) method should be used instead. To - /// re-acquire ownership of a stream it is safe to call this method after - /// `try_finish` or `shutdown` has returned `Ok`. - pub fn finish(mut self) -> io::Result { - try!(self.inner.finish()); - Ok(self.inner.take_inner()) - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream and then return the contained - /// writer if the flush succeeded. - /// The compressed stream will not closed but only flushed. This - /// means that obtained byte array can by extended by another deflated - /// stream. To close the stream add the two bytes 0x3 and 0x0. - pub fn flush_finish(mut self) -> io::Result { - try!(self.inner.flush()); - Ok(self.inner.take_inner()) - } - - /// Returns the number of bytes that have been written to this compresor. - /// - /// Note that not all bytes written to this object may be accounted for, - /// there may still be some active buffering. - pub fn total_in(&self) -> u64 { - self.inner.data.total_in() - } - - /// Returns the number of bytes that the compressor has produced. - /// - /// Note that not all bytes may have been written yet, some may still be - /// buffered. - pub fn total_out(&self) -> u64 { - self.inner.data.total_out() - } -} - -impl Write for EncoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for EncoderWriter { - fn shutdown(&mut self) -> Poll<(), io::Error> { - try_nb!(self.try_finish()); - self.get_mut().shutdown() - } -} - -impl Read for EncoderWriter { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.get_mut().read(buf) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for EncoderWriter { -} - -impl EncoderReader { - /// Creates a new encoder which will read uncompressed data from the given - /// stream and emit the compressed stream. - pub fn new(r: R, level: ::Compression) -> EncoderReader { - EncoderReader { - inner: EncoderReaderBuf::new(BufReader::new(r), level), - } - } - - /// Resets the state of this encoder entirely, swapping out the input - /// stream for another. - /// - /// This function will reset the internal state of this encoder and replace - /// the input stream with the one provided, returning the previous input - /// stream. Future data read from this encoder will be the compressed - /// version of `r`'s data. - /// - /// Note that there may be currently buffered data when this function is - /// called, and in that case the buffered data is discarded. - pub fn reset(&mut self, r: R) -> R { - self.inner.data.reset(); - self.inner.obj.reset(r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this encoder, returning the underlying reader. - /// - /// Note that there may be buffered bytes which are not re-acquired as part - /// of this transition. It's recommended to only call this function after - /// EOF has been reached. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - /// Returns the number of bytes that have been read into this compressor. - /// - /// Note that not all bytes read from the underlying object may be accounted - /// for, there may still be some active buffering. - pub fn total_in(&self) -> u64 { - self.inner.data.total_in() - } - - /// Returns the number of bytes that the compressor has produced. - /// - /// Note that not all bytes may have been read yet, some may still be - /// buffered. - pub fn total_out(&self) -> u64 { - self.inner.data.total_out() - } -} - -impl Read for EncoderReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.read(buf) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for EncoderReader { -} - -impl Write for EncoderReader { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for EncoderReader { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.get_mut().shutdown() - } -} - -impl EncoderReaderBuf { - /// Creates a new encoder which will read uncompressed data from the given - /// stream and emit the compressed stream. - pub fn new(r: R, level: ::Compression) -> EncoderReaderBuf { - EncoderReaderBuf { - obj: r, - data: Compress::new(level, true), - } - } - - /// Resets the state of this encoder entirely, swapping out the input - /// stream for another. - /// - /// This function will reset the internal state of this encoder and replace - /// the input stream with the one provided, returning the previous input - /// stream. Future data read from this encoder will be the compressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.data.reset(); - mem::replace(&mut self.obj, r) - } - - /// Acquires a reference to the underlying reader - pub fn get_ref(&self) -> &R { - &self.obj - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - &mut self.obj - } - - /// Consumes this encoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.obj - } - - /// Returns the number of bytes that have been read into this compressor. - /// - /// Note that not all bytes read from the underlying object may be accounted - /// for, there may still be some active buffering. - pub fn total_in(&self) -> u64 { - self.data.total_in() - } - - /// Returns the number of bytes that the compressor has produced. - /// - /// Note that not all bytes may have been read yet, some may still be - /// buffered. - pub fn total_out(&self) -> u64 { - self.data.total_out() - } -} - -impl Read for EncoderReaderBuf { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - zio::read(&mut self.obj, &mut self.data, buf) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for EncoderReaderBuf { -} - -impl Write for EncoderReaderBuf { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for EncoderReaderBuf { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.get_mut().shutdown() - } -} - -impl DecoderReader { - /// Creates a new decoder which will decompress data read from the given - /// stream. - pub fn new(r: R) -> DecoderReader { - DecoderReader::new_with_buf(r, vec![0; 32 * 1024]) - } - - /// Same as `new`, but the intermediate buffer for data is specified. - /// - /// Note that the specified buffer will only be used up to its current - /// length. The buffer's capacity will also not grow over time. - pub fn new_with_buf(r: R, buf: Vec) -> DecoderReader { - DecoderReader { - inner: DecoderReaderBuf::new(BufReader::with_buf(buf, r)), - } - } - - /// Resets the state of this decoder entirely, swapping out the input - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// input stream with the one provided, returning the previous input - /// stream. Future data read from this decoder will be the decompressed - /// version of `r`'s data. - /// - /// Note that there may be currently buffered data when this function is - /// called, and in that case the buffered data is discarded. - pub fn reset(&mut self, r: R) -> R { - self.inner.data = Decompress::new(true); - self.inner.obj.reset(r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this decoder, returning the underlying reader. - /// - /// Note that there may be buffered bytes which are not re-acquired as part - /// of this transition. It's recommended to only call this function after - /// EOF has been reached. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - /// Returns the number of bytes that the decompressor has consumed. - /// - /// Note that this will likely be smaller than what the decompressor - /// actually read from the underlying stream due to buffering. - pub fn total_in(&self) -> u64 { - self.inner.total_in() - } - - /// Returns the number of bytes that the decompressor has produced. - pub fn total_out(&self) -> u64 { - self.inner.total_out() - } -} - -impl Read for DecoderReader { - fn read(&mut self, into: &mut [u8]) -> io::Result { - self.inner.read(into) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for DecoderReader { -} - -impl Write for DecoderReader { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for DecoderReader { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.get_mut().shutdown() - } -} - -impl DecoderReaderBuf { - /// Creates a new decoder which will decompress data read from the given - /// stream. - pub fn new(r: R) -> DecoderReaderBuf { - DecoderReaderBuf { - obj: r, - data: Decompress::new(true), - } - } - - /// Resets the state of this decoder entirely, swapping out the input - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// input stream with the one provided, returning the previous input - /// stream. Future data read from this decoder will be the decompressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.data = Decompress::new(true); - mem::replace(&mut self.obj, r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - &self.obj - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - &mut self.obj - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.obj - } - - /// Returns the number of bytes that the decompressor has consumed. - /// - /// Note that this will likely be smaller than what the decompressor - /// actually read from the underlying stream due to buffering. - pub fn total_in(&self) -> u64 { - self.data.total_in() - } - - /// Returns the number of bytes that the decompressor has produced. - pub fn total_out(&self) -> u64 { - self.data.total_out() - } -} - -impl Read for DecoderReaderBuf { - fn read(&mut self, into: &mut [u8]) -> io::Result { - zio::read(&mut self.obj, &mut self.data, into) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for DecoderReaderBuf { -} - -impl Write for DecoderReaderBuf { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.get_mut().write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.get_mut().flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for DecoderReaderBuf { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.get_mut().shutdown() - } -} - -impl DecoderWriter { - /// Creates a new decoder which will write uncompressed data to the stream. - /// - /// When this decoder is dropped or unwrapped the final pieces of data will - /// be flushed. - pub fn new(w: W) -> DecoderWriter { - DecoderWriter { - inner: zio::Writer::new(w, Decompress::new(true)), - } - } - - /// Acquires a reference to the underlying writer. - pub fn get_ref(&self) -> &W { - self.inner.get_ref() - } - - /// Acquires a mutable reference to the underlying writer. - /// - /// Note that mutating the output/input state of the stream may corrupt this - /// object, so care must be taken when using this method. - pub fn get_mut(&mut self) -> &mut W { - self.inner.get_mut() - } - - /// Resets the state of this decoder entirely, swapping out the output - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// output stream with the one provided, returning the previous output - /// stream. Future data written to this decoder will be decompressed into - /// the output stream `w`. - pub fn reset(&mut self, w: W) -> io::Result { - try!(self.inner.finish()); - self.inner.data = Decompress::new(true); - Ok(self.inner.replace(w)) - } - - /// Attempt to finish this output stream, writing out final chunks of data. - /// - /// Note that this function can only be used once data has finished being - /// written to the output stream. After this function is called then further - /// calls to `write` may result in a panic. - /// - /// # Panics - /// - /// Attempts to write data to this stream may result in a panic after this - /// function is called. - pub fn try_finish(&mut self) -> io::Result<()> { - self.inner.finish() - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream and then return the contained - /// writer if the flush succeeded. - /// - /// Note that this function may not be suitable to call in a situation where - /// the underlying stream is an asynchronous I/O stream. To finish a stream - /// the `try_finish` (or `shutdown`) method should be used instead. To - /// re-acquire ownership of a stream it is safe to call this method after - /// `try_finish` or `shutdown` has returned `Ok`. - pub fn finish(mut self) -> io::Result { - try!(self.inner.finish()); - Ok(self.inner.take_inner()) - } - - /// Returns the number of bytes that the decompressor has consumed for - /// decompression. - /// - /// Note that this will likely be smaller than the number of bytes - /// successfully written to this stream due to internal buffering. - pub fn total_in(&self) -> u64 { - self.inner.data.total_in() - } - - /// Returns the number of bytes that the decompressor has written to its - /// output stream. - pub fn total_out(&self) -> u64 { - self.inner.data.total_out() - } -} - -impl Write for DecoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -#[cfg(feature = "tokio")] -impl AsyncWrite for DecoderWriter { - fn shutdown(&mut self) -> Poll<(), io::Error> { - try_nb!(self.inner.finish()); - self.inner.get_mut().shutdown() - } -} - -impl Read for DecoderWriter { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.get_mut().read(buf) - } -} - -#[cfg(feature = "tokio")] -impl AsyncRead for DecoderWriter { -} - -#[cfg(test)] -mod tests { - use std::io::prelude::*; - use std::io; - - use rand::{thread_rng, Rng}; - - use zlib::{EncoderWriter, EncoderReader, DecoderReader, DecoderWriter}; - use Compression::Default; - - #[test] - fn roundtrip() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let result = w.finish().unwrap(); - let mut r = DecoderReader::new(&result[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == real); - } - - #[test] - fn drop_writes() { - let mut data = Vec::new(); - EncoderWriter::new(&mut data, Default).write_all(b"foo").unwrap(); - let mut r = DecoderReader::new(&data[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == b"foo"); - } - - #[test] - fn total_in() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let mut result = w.finish().unwrap(); - - let result_len = result.len(); - - for _ in 0..200 { - result.extend(v.iter().map(|x| *x)); - } - - let mut r = DecoderReader::new(&result[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == real); - assert_eq!(r.total_in(), result_len as u64); - } - - #[test] - fn roundtrip2() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert_eq!(ret, v); - } - - #[test] - fn roundtrip3() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); - w.write_all(&v).unwrap(); - let w = w.finish().unwrap().finish().unwrap(); - assert!(w == v); - } - - #[test] - fn reset_decoder() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(Vec::new(), Default); - w.write_all(&v).unwrap(); - let data = w.finish().unwrap(); - - { - let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); - let mut r = DecoderReader::new(&data[..]); - r.read_to_end(&mut a).unwrap(); - r.reset(&data); - r.read_to_end(&mut b).unwrap(); - - let mut r = DecoderReader::new(&data[..]); - r.read_to_end(&mut c).unwrap(); - assert!(a == b && b == c && c == v); - } - - { - let mut w = DecoderWriter::new(Vec::new()); - w.write_all(&data).unwrap(); - let a = w.reset(Vec::new()).unwrap(); - w.write_all(&data).unwrap(); - let b = w.finish().unwrap(); - - let mut w = DecoderWriter::new(Vec::new()); - w.write_all(&data).unwrap(); - let c = w.finish().unwrap(); - assert!(a == b && b == c && c == v); - } - } - - #[test] - fn bad_input() { - // regress tests: previously caused a panic on drop - let mut out: Vec = Vec::new(); - let data: Vec = (0..255).cycle().take(1024).collect(); - let mut w = DecoderWriter::new(&mut out); - match w.write_all(&data[..]) { - Ok(_) => panic!("Expected an error to be returned!"), - Err(e) => assert_eq!(e.kind(), io::ErrorKind::InvalidInput), - } - } - - #[test] - fn qc_reader() { - ::quickcheck::quickcheck(test as fn(_) -> _); - - fn test(v: Vec) -> bool { - let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); - let mut v2 = Vec::new(); - r.read_to_end(&mut v2).unwrap(); - v == v2 - } - } - - #[test] - fn qc_writer() { - ::quickcheck::quickcheck(test as fn(_) -> _); - - fn test(v: Vec) -> bool { - let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); - w.write_all(&v).unwrap(); - v == w.finish().unwrap().finish().unwrap() - } - } -} diff --git a/src/vendor/flate2/src/zlib/bufread.rs b/src/vendor/flate2/src/zlib/bufread.rs new file mode 100644 index 0000000000..a21a905f61 --- /dev/null +++ b/src/vendor/flate2/src/zlib/bufread.rs @@ -0,0 +1,259 @@ +use std::io::prelude::*; +use std::io; +use std::mem; + +#[cfg(feature = "tokio")] +use futures::Poll; +#[cfg(feature = "tokio")] +use tokio_io::{AsyncRead, AsyncWrite}; + +use zio; +use {Compress, Decompress}; + +/// A ZLIB encoder, or compressor. +/// +/// This structure implements a [`BufRead`] interface and will read uncompressed +/// data from an underlying stream and emit a stream of compressed data. +/// +/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use flate2::Compression; +/// use flate2::bufread::ZlibEncoder; +/// use std::fs::File; +/// use std::io::BufReader; +/// +/// // Use a buffered file to compress contents into a Vec +/// +/// # fn open_hello_world() -> std::io::Result> { +/// let f = File::open("examples/hello_world.txt")?; +/// let b = BufReader::new(f); +/// let mut z = ZlibEncoder::new(b, Compression::Fast); +/// let mut buffer = Vec::new(); +/// z.read_to_end(&mut buffer)?; +/// # Ok(buffer) +/// # } +/// ``` +#[derive(Debug)] +pub struct ZlibEncoder { + obj: R, + data: Compress, +} + + +impl ZlibEncoder { + /// Creates a new encoder which will read uncompressed data from the given + /// stream and emit the compressed stream. + pub fn new(r: R, level: ::Compression) -> ZlibEncoder { + ZlibEncoder { + obj: r, + data: Compress::new(level, true), + } + } +} + +pub fn reset_encoder_data(zlib: &mut ZlibEncoder) { + zlib.data.reset() +} + +impl ZlibEncoder { + /// Resets the state of this encoder entirely, swapping out the input + /// stream for another. + /// + /// This function will reset the internal state of this encoder and replace + /// the input stream with the one provided, returning the previous input + /// stream. Future data read from this encoder will be the compressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + reset_encoder_data(self); + mem::replace(&mut self.obj, r) + } + + /// Acquires a reference to the underlying reader + pub fn get_ref(&self) -> &R { + &self.obj + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + &mut self.obj + } + + /// Consumes this encoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.obj + } + + /// Returns the number of bytes that have been read into this compressor. + /// + /// Note that not all bytes read from the underlying object may be accounted + /// for, there may still be some active buffering. + pub fn total_in(&self) -> u64 { + self.data.total_in() + } + + /// Returns the number of bytes that the compressor has produced. + /// + /// Note that not all bytes may have been read yet, some may still be + /// buffered. + pub fn total_out(&self) -> u64 { + self.data.total_out() + } +} + +impl Read for ZlibEncoder { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + zio::read(&mut self.obj, &mut self.data, buf) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for ZlibEncoder {} + +impl Write for ZlibEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for ZlibEncoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.get_mut().shutdown() + } +} + +/// A ZLIB decoder, or decompressor. +/// +/// This structure implements a [`BufRead`] interface and takes a stream of +/// compressed data as input, providing the decompressed data when read from. +/// +/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::ZlibEncoder; +/// use flate2::bufread::ZlibDecoder; +/// +/// # fn main() { +/// # let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_bufreader(bytes).unwrap()); +/// # } +/// # +/// // Uncompresses a Zlib Encoded vector of bytes and returns a string or error +/// // Here &[u8] implements BufRead +/// +/// fn decode_bufreader(bytes: Vec) -> io::Result { +/// let mut z = ZlibDecoder::new(&bytes[..]); +/// let mut s = String::new(); +/// z.read_to_string(&mut s)?; +/// Ok(s) +/// } +/// ``` +#[derive(Debug)] +pub struct ZlibDecoder { + obj: R, + data: Decompress, +} + +impl ZlibDecoder { + /// Creates a new decoder which will decompress data read from the given + /// stream. + pub fn new(r: R) -> ZlibDecoder { + ZlibDecoder { + obj: r, + data: Decompress::new(true), + } + } +} + +pub fn reset_decoder_data(zlib: &mut ZlibDecoder) { + zlib.data = Decompress::new(true); +} + +impl ZlibDecoder { + /// Resets the state of this decoder entirely, swapping out the input + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// input stream with the one provided, returning the previous input + /// stream. Future data read from this decoder will be the decompressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + reset_decoder_data(self); + mem::replace(&mut self.obj, r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + &self.obj + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + &mut self.obj + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.obj + } + + /// Returns the number of bytes that the decompressor has consumed. + /// + /// Note that this will likely be smaller than what the decompressor + /// actually read from the underlying stream due to buffering. + pub fn total_in(&self) -> u64 { + self.data.total_in() + } + + /// Returns the number of bytes that the decompressor has produced. + pub fn total_out(&self) -> u64 { + self.data.total_out() + } +} + +impl Read for ZlibDecoder { + fn read(&mut self, into: &mut [u8]) -> io::Result { + zio::read(&mut self.obj, &mut self.data, into) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for ZlibDecoder {} + +impl Write for ZlibDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for ZlibDecoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.get_mut().shutdown() + } +} diff --git a/src/vendor/flate2/src/zlib/mod.rs b/src/vendor/flate2/src/zlib/mod.rs new file mode 100644 index 0000000000..b04e40b826 --- /dev/null +++ b/src/vendor/flate2/src/zlib/mod.rs @@ -0,0 +1,164 @@ +pub mod bufread; +pub mod read; +pub mod write; + + +#[cfg(test)] +mod tests { + use std::io::prelude::*; + use std::io; + + use rand::{thread_rng, Rng}; + + use zlib::{read, write}; + use Compression::Default; + + #[test] + fn roundtrip() { + let mut real = Vec::new(); + let mut w = write::ZlibEncoder::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let result = w.finish().unwrap(); + let mut r = read::ZlibDecoder::new(&result[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == real); + } + + #[test] + fn drop_writes() { + let mut data = Vec::new(); + write::ZlibEncoder::new(&mut data, Default) + .write_all(b"foo") + .unwrap(); + let mut r = read::ZlibDecoder::new(&data[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == b"foo"); + } + + #[test] + fn total_in() { + let mut real = Vec::new(); + let mut w = write::ZlibEncoder::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let mut result = w.finish().unwrap(); + + let result_len = result.len(); + + for _ in 0..200 { + result.extend(v.iter().map(|x| *x)); + } + + let mut r = read::ZlibDecoder::new(&result[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == real); + assert_eq!(r.total_in(), result_len as u64); + } + + #[test] + fn roundtrip2() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut r = read::ZlibDecoder::new(read::ZlibEncoder::new(&v[..], Default)); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert_eq!(ret, v); + } + + #[test] + fn roundtrip3() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = write::ZlibEncoder::new(write::ZlibDecoder::new(Vec::new()), Default); + w.write_all(&v).unwrap(); + let w = w.finish().unwrap().finish().unwrap(); + assert!(w == v); + } + + #[test] + fn reset_decoder() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = write::ZlibEncoder::new(Vec::new(), Default); + w.write_all(&v).unwrap(); + let data = w.finish().unwrap(); + + { + let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); + let mut r = read::ZlibDecoder::new(&data[..]); + r.read_to_end(&mut a).unwrap(); + r.reset(&data); + r.read_to_end(&mut b).unwrap(); + + let mut r = read::ZlibDecoder::new(&data[..]); + r.read_to_end(&mut c).unwrap(); + assert!(a == b && b == c && c == v); + } + + { + let mut w = write::ZlibDecoder::new(Vec::new()); + w.write_all(&data).unwrap(); + let a = w.reset(Vec::new()).unwrap(); + w.write_all(&data).unwrap(); + let b = w.finish().unwrap(); + + let mut w = write::ZlibDecoder::new(Vec::new()); + w.write_all(&data).unwrap(); + let c = w.finish().unwrap(); + assert!(a == b && b == c && c == v); + } + } + + #[test] + fn bad_input() { + // regress tests: previously caused a panic on drop + let mut out: Vec = Vec::new(); + let data: Vec = (0..255).cycle().take(1024).collect(); + let mut w = write::ZlibDecoder::new(&mut out); + match w.write_all(&data[..]) { + Ok(_) => panic!("Expected an error to be returned!"), + Err(e) => assert_eq!(e.kind(), io::ErrorKind::InvalidInput), + } + } + + #[test] + fn qc_reader() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let mut r = read::ZlibDecoder::new(read::ZlibEncoder::new(&v[..], Default)); + let mut v2 = Vec::new(); + r.read_to_end(&mut v2).unwrap(); + v == v2 + } + } + + #[test] + fn qc_writer() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let mut w = write::ZlibEncoder::new(write::ZlibDecoder::new(Vec::new()), Default); + w.write_all(&v).unwrap(); + v == w.finish().unwrap().finish().unwrap() + } + } +} diff --git a/src/vendor/flate2/src/zlib/read.rs b/src/vendor/flate2/src/zlib/read.rs new file mode 100644 index 0000000000..a2eece78dd --- /dev/null +++ b/src/vendor/flate2/src/zlib/read.rs @@ -0,0 +1,266 @@ +use std::io::prelude::*; +use std::io; + +#[cfg(feature = "tokio")] +use futures::Poll; +#[cfg(feature = "tokio")] +use tokio_io::{AsyncRead, AsyncWrite}; + +use bufreader::BufReader; +use super::bufread; + +/// A ZLIB encoder, or compressor. +/// +/// This structure implements a [`Read`] interface and will read uncompressed +/// data from an underlying stream and emit a stream of compressed data. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use flate2::Compression; +/// use flate2::read::ZlibEncoder; +/// use std::fs::File; +/// +/// // Open example file and compress the contents using Read interface +/// +/// # fn open_hello_world() -> std::io::Result> { +/// let f = File::open("examples/hello_world.txt")?; +/// let mut z = ZlibEncoder::new(f, Compression::Fast); +/// let mut buffer = [0;50]; +/// let byte_count = z.read(&mut buffer)?; +/// # Ok(buffer[0..byte_count].to_vec()) +/// # } +/// ``` +#[derive(Debug)] +pub struct ZlibEncoder { + inner: bufread::ZlibEncoder>, +} + +impl ZlibEncoder { + /// Creates a new encoder which will read uncompressed data from the given + /// stream and emit the compressed stream. + pub fn new(r: R, level: ::Compression) -> ZlibEncoder { + ZlibEncoder { + inner: bufread::ZlibEncoder::new(BufReader::new(r), level), + } + } +} + +impl ZlibEncoder { + /// Resets the state of this encoder entirely, swapping out the input + /// stream for another. + /// + /// This function will reset the internal state of this encoder and replace + /// the input stream with the one provided, returning the previous input + /// stream. Future data read from this encoder will be the compressed + /// version of `r`'s data. + /// + /// Note that there may be currently buffered data when this function is + /// called, and in that case the buffered data is discarded. + pub fn reset(&mut self, r: R) -> R { + super::bufread::reset_encoder_data(&mut self.inner); + self.inner.get_mut().reset(r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this encoder, returning the underlying reader. + /// + /// Note that there may be buffered bytes which are not re-acquired as part + /// of this transition. It's recommended to only call this function after + /// EOF has been reached. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } + + /// Returns the number of bytes that have been read into this compressor. + /// + /// Note that not all bytes read from the underlying object may be accounted + /// for, there may still be some active buffering. + pub fn total_in(&self) -> u64 { + self.inner.total_in() + } + + /// Returns the number of bytes that the compressor has produced. + /// + /// Note that not all bytes may have been read yet, some may still be + /// buffered. + pub fn total_out(&self) -> u64 { + self.inner.total_out() + } +} + +impl Read for ZlibEncoder { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.read(buf) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for ZlibEncoder {} + +impl Write for ZlibEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for ZlibEncoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.get_mut().shutdown() + } +} + +/// A ZLIB decoder, or decompressor. +/// +/// This structure implements a [`Read`] interface and takes a stream of +/// compressed data as input, providing the decompressed data when read from. +/// +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::ZlibEncoder; +/// use flate2::read::ZlibDecoder; +/// +/// # fn main() { +/// # let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_reader(bytes).unwrap()); +/// # } +/// # +/// // Uncompresses a Zlib Encoded vector of bytes and returns a string or error +/// // Here &[u8] implements Read +/// +/// fn decode_reader(bytes: Vec) -> io::Result { +/// let mut z = ZlibDecoder::new(&bytes[..]); +/// let mut s = String::new(); +/// z.read_to_string(&mut s)?; +/// Ok(s) +/// } +/// ``` +#[derive(Debug)] +pub struct ZlibDecoder { + inner: bufread::ZlibDecoder>, +} + + +impl ZlibDecoder { + /// Creates a new decoder which will decompress data read from the given + /// stream. + pub fn new(r: R) -> ZlibDecoder { + ZlibDecoder::new_with_buf(r, vec![0; 32 * 1024]) + } + + /// Same as `new`, but the intermediate buffer for data is specified. + /// + /// Note that the specified buffer will only be used up to its current + /// length. The buffer's capacity will also not grow over time. + pub fn new_with_buf(r: R, buf: Vec) -> ZlibDecoder { + ZlibDecoder { + inner: bufread::ZlibDecoder::new(BufReader::with_buf(buf, r)), + } + } +} + +impl ZlibDecoder { + /// Resets the state of this decoder entirely, swapping out the input + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// input stream with the one provided, returning the previous input + /// stream. Future data read from this decoder will be the decompressed + /// version of `r`'s data. + /// + /// Note that there may be currently buffered data when this function is + /// called, and in that case the buffered data is discarded. + pub fn reset(&mut self, r: R) -> R { + super::bufread::reset_decoder_data(&mut self.inner); + self.inner.get_mut().reset(r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this decoder, returning the underlying reader. + /// + /// Note that there may be buffered bytes which are not re-acquired as part + /// of this transition. It's recommended to only call this function after + /// EOF has been reached. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } + + /// Returns the number of bytes that the decompressor has consumed. + /// + /// Note that this will likely be smaller than what the decompressor + /// actually read from the underlying stream due to buffering. + pub fn total_in(&self) -> u64 { + self.inner.total_in() + } + + /// Returns the number of bytes that the decompressor has produced. + pub fn total_out(&self) -> u64 { + self.inner.total_out() + } +} + +impl Read for ZlibDecoder { + fn read(&mut self, into: &mut [u8]) -> io::Result { + self.inner.read(into) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for ZlibDecoder {} + +impl Write for ZlibDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.get_mut().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for ZlibDecoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.get_mut().shutdown() + } +} diff --git a/src/vendor/flate2/src/zlib/write.rs b/src/vendor/flate2/src/zlib/write.rs new file mode 100644 index 0000000000..52e4fc28e5 --- /dev/null +++ b/src/vendor/flate2/src/zlib/write.rs @@ -0,0 +1,351 @@ +use std::io::prelude::*; +use std::io; + +#[cfg(feature = "tokio")] +use futures::Poll; +#[cfg(feature = "tokio")] +use tokio_io::{AsyncRead, AsyncWrite}; + +use zio; +use {Compress, Decompress}; + +/// A ZLIB encoder, or compressor. +/// +/// This structure implements a [`Write`] interface and takes a stream of +/// uncompressed data, writing the compressed data to the wrapped writer. +/// +/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use flate2::Compression; +/// use flate2::write::ZlibEncoder; +/// +/// // Vec implements Write, assigning the compressed bytes of sample string +/// +/// # fn zlib_encoding() -> std::io::Result<()> { +/// let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); +/// e.write(b"Hello World")?; +/// let compressed = e.finish()?; +/// # Ok(()) +/// # } +/// ``` +#[derive(Debug)] +pub struct ZlibEncoder { + inner: zio::Writer, +} + + +impl ZlibEncoder { + /// Creates a new encoder which will write compressed data to the stream + /// given at the given compression level. + /// + /// When this encoder is dropped or unwrapped the final pieces of data will + /// be flushed. + pub fn new(w: W, level: ::Compression) -> ZlibEncoder { + ZlibEncoder { + inner: zio::Writer::new(w, Compress::new(level, true)), + } + } + + /// Acquires a reference to the underlying writer. + pub fn get_ref(&self) -> &W { + self.inner.get_ref() + } + + /// Acquires a mutable reference to the underlying writer. + /// + /// Note that mutating the output/input state of the stream may corrupt this + /// object, so care must be taken when using this method. + pub fn get_mut(&mut self) -> &mut W { + self.inner.get_mut() + } + + /// Resets the state of this encoder entirely, swapping out the output + /// stream for another. + /// + /// This function will finish encoding the current stream into the current + /// output stream before swapping out the two output streams. + /// + /// After the current stream has been finished, this will reset the internal + /// state of this encoder and replace the output stream with the one + /// provided, returning the previous output stream. Future data written to + /// this encoder will be the compressed into the stream `w` provided. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn reset(&mut self, w: W) -> io::Result { + try!(self.inner.finish()); + self.inner.data.reset(); + Ok(self.inner.replace(w)) + } + + /// Attempt to finish this output stream, writing out final chunks of data. + /// + /// Note that this function can only be used once data has finished being + /// written to the output stream. After this function is called then further + /// calls to `write` may result in a panic. + /// + /// # Panics + /// + /// Attempts to write data to this stream may result in a panic after this + /// function is called. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn try_finish(&mut self) -> io::Result<()> { + self.inner.finish() + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream, close off the compressed + /// stream and, if successful, return the contained writer. + /// + /// Note that this function may not be suitable to call in a situation where + /// the underlying stream is an asynchronous I/O stream. To finish a stream + /// the `try_finish` (or `shutdown`) method should be used instead. To + /// re-acquire ownership of a stream it is safe to call this method after + /// `try_finish` or `shutdown` has returned `Ok`. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn finish(mut self) -> io::Result { + try!(self.inner.finish()); + Ok(self.inner.take_inner()) + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream and then return the contained + /// writer if the flush succeeded. + /// The compressed stream will not closed but only flushed. This + /// means that obtained byte array can by extended by another deflated + /// stream. To close the stream add the two bytes 0x3 and 0x0. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn flush_finish(mut self) -> io::Result { + try!(self.inner.flush()); + Ok(self.inner.take_inner()) + } + + /// Returns the number of bytes that have been written to this compresor. + /// + /// Note that not all bytes written to this object may be accounted for, + /// there may still be some active buffering. + pub fn total_in(&self) -> u64 { + self.inner.data.total_in() + } + + /// Returns the number of bytes that the compressor has produced. + /// + /// Note that not all bytes may have been written yet, some may still be + /// buffered. + pub fn total_out(&self) -> u64 { + self.inner.data.total_out() + } +} + +impl Write for ZlibEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for ZlibEncoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + try_nb!(self.try_finish()); + self.get_mut().shutdown() + } +} + +impl Read for ZlibEncoder { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.get_mut().read(buf) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for ZlibEncoder {} + + +/// A ZLIB decoder, or decompressor. +/// +/// This structure implements a [`Write`] and will emit a stream of decompressed +/// data when fed a stream of compressed data. +/// +/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +/// +/// # Examples +/// +/// ``` +/// use std::io::prelude::*; +/// use std::io; +/// # use flate2::Compression; +/// # use flate2::write::ZlibEncoder; +/// use flate2::write::ZlibDecoder; +/// +/// # fn main() { +/// # let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); +/// # e.write(b"Hello World").unwrap(); +/// # let bytes = e.finish().unwrap(); +/// # println!("{}", decode_reader(bytes).unwrap()); +/// # } +/// # +/// // Uncompresses a Zlib Encoded vector of bytes and returns a string or error +/// // Here Vec implements Write +/// +/// fn decode_reader(bytes: Vec) -> io::Result { +/// let mut writer = Vec::new(); +/// let mut z = ZlibDecoder::new(writer); +/// z.write(&bytes[..])?; +/// writer = z.finish()?; +/// let return_string = String::from_utf8(writer).expect("String parsing error"); +/// Ok(return_string) +/// } +/// ``` +#[derive(Debug)] +pub struct ZlibDecoder { + inner: zio::Writer, +} + + +impl ZlibDecoder { + /// Creates a new decoder which will write uncompressed data to the stream. + /// + /// When this decoder is dropped or unwrapped the final pieces of data will + /// be flushed. + pub fn new(w: W) -> ZlibDecoder { + ZlibDecoder { + inner: zio::Writer::new(w, Decompress::new(true)), + } + } + + /// Acquires a reference to the underlying writer. + pub fn get_ref(&self) -> &W { + self.inner.get_ref() + } + + /// Acquires a mutable reference to the underlying writer. + /// + /// Note that mutating the output/input state of the stream may corrupt this + /// object, so care must be taken when using this method. + pub fn get_mut(&mut self) -> &mut W { + self.inner.get_mut() + } + + /// Resets the state of this decoder entirely, swapping out the output + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// output stream with the one provided, returning the previous output + /// stream. Future data written to this decoder will be decompressed into + /// the output stream `w`. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn reset(&mut self, w: W) -> io::Result { + try!(self.inner.finish()); + self.inner.data = Decompress::new(true); + Ok(self.inner.replace(w)) + } + + /// Attempt to finish this output stream, writing out final chunks of data. + /// + /// Note that this function can only be used once data has finished being + /// written to the output stream. After this function is called then further + /// calls to `write` may result in a panic. + /// + /// # Panics + /// + /// Attempts to write data to this stream may result in a panic after this + /// function is called. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn try_finish(&mut self) -> io::Result<()> { + self.inner.finish() + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream and then return the contained + /// writer if the flush succeeded. + /// + /// Note that this function may not be suitable to call in a situation where + /// the underlying stream is an asynchronous I/O stream. To finish a stream + /// the `try_finish` (or `shutdown`) method should be used instead. To + /// re-acquire ownership of a stream it is safe to call this method after + /// `try_finish` or `shutdown` has returned `Ok`. + /// + /// # Errors + /// + /// This function will perform I/O to complete this stream, and any I/O + /// errors which occur will be returned from this function. + pub fn finish(mut self) -> io::Result { + try!(self.inner.finish()); + Ok(self.inner.take_inner()) + } + + /// Returns the number of bytes that the decompressor has consumed for + /// decompression. + /// + /// Note that this will likely be smaller than the number of bytes + /// successfully written to this stream due to internal buffering. + pub fn total_in(&self) -> u64 { + self.inner.data.total_in() + } + + /// Returns the number of bytes that the decompressor has written to its + /// output stream. + pub fn total_out(&self) -> u64 { + self.inner.data.total_out() + } +} + +impl Write for ZlibDecoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +#[cfg(feature = "tokio")] +impl AsyncWrite for ZlibDecoder { + fn shutdown(&mut self) -> Poll<(), io::Error> { + try_nb!(self.inner.finish()); + self.inner.get_mut().shutdown() + } +} + +impl Read for ZlibDecoder { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.get_mut().read(buf) + } +} + +#[cfg(feature = "tokio")] +impl AsyncRead for ZlibDecoder {} diff --git a/src/vendor/flate2/tests/early-flush.rs b/src/vendor/flate2/tests/early-flush.rs new file mode 100644 index 0000000000..70c10673fd --- /dev/null +++ b/src/vendor/flate2/tests/early-flush.rs @@ -0,0 +1,20 @@ +extern crate flate2; + +use std::io::{Read, Write}; + +use flate2::write::GzEncoder; +use flate2::read::GzDecoder; + +#[test] +fn smoke() { + let mut w = GzEncoder::new(Vec::new(), flate2::Compression::Default); + w.flush().unwrap(); + w.write(b"hello").unwrap(); + + let bytes = w.finish().unwrap(); + + let mut r = GzDecoder::new(&bytes[..]).unwrap(); + let mut s = String::new(); + r.read_to_string(&mut s).unwrap(); + assert_eq!(s, "hello"); +} diff --git a/src/vendor/flate2/tests/zero-write.rs b/src/vendor/flate2/tests/zero-write.rs new file mode 100644 index 0000000000..8174e0c396 --- /dev/null +++ b/src/vendor/flate2/tests/zero-write.rs @@ -0,0 +1,8 @@ +extern crate flate2; + +#[test] +fn zero_write_is_error() { + let mut buf = [0u8]; + let writer = flate2::write::DeflateEncoder::new(&mut buf[..], flate2::Compression::Default); + assert!(writer.finish().is_err()); +} diff --git a/src/vendor/fnv/.cargo-checksum.json b/src/vendor/fnv/.cargo-checksum.json new file mode 100644 index 0000000000..0d7d299453 --- /dev/null +++ b/src/vendor/fnv/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"29b74b95210896ce634c11a9037638668473b5a1b3b1716c505cb04dbb6341fa","Cargo.toml":"19864ecb948c0e7be14ead11068a2c689a4d31a684c85b6ad1bdf4a26d893516","README.md":"772c547b8e78764f07cc22f2294cb7c691cb20f30d459ed45a65c2434b1ca8a9","lib.rs":"2e2be31e9c90c9b2b0fe223f64f4b4bb24487e370e1cd2fbcce70d30f50fc452"},"package":"6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344"} \ No newline at end of file diff --git a/src/vendor/fnv/.cargo-ok b/src/vendor/fnv/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/fnv/.gitignore b/src/vendor/fnv/.gitignore new file mode 100644 index 0000000000..a9d37c560c --- /dev/null +++ b/src/vendor/fnv/.gitignore @@ -0,0 +1,2 @@ +target +Cargo.lock diff --git a/src/vendor/fnv/.travis.yml b/src/vendor/fnv/.travis.yml new file mode 100644 index 0000000000..9c58f03c66 --- /dev/null +++ b/src/vendor/fnv/.travis.yml @@ -0,0 +1,8 @@ +language: rust +rust: + - nightly + - beta + - stable + +notifications: + webhooks: http://build.servo.org:54856/travis diff --git a/src/vendor/fnv/Cargo.toml b/src/vendor/fnv/Cargo.toml new file mode 100644 index 0000000000..c401150489 --- /dev/null +++ b/src/vendor/fnv/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "fnv" +version = "1.0.5" +authors = ["Alex Crichton "] +description = "Fowler–Noll–Vo hash function" +license = "Apache-2.0 / MIT" +readme = "README.md" +repository = "https://github.com/servo/rust-fnv" +documentation = "https://doc.servo.org/fnv/" + +[lib] +name = "fnv" +path = "lib.rs" diff --git a/src/vendor/fnv/README.md b/src/vendor/fnv/README.md new file mode 100644 index 0000000000..96001e22de --- /dev/null +++ b/src/vendor/fnv/README.md @@ -0,0 +1,81 @@ +# rust-fnv + +An implementation of the [Fowler–Noll–Vo hash function][chongo]. + +### [Read the documentation](https://doc.servo.org/fnv/) + + +## About + +The FNV hash function is a custom `Hasher` implementation that is more +efficient for smaller hash keys. + +[The Rust FAQ states that][faq] while the default `Hasher` implementation, +SipHash, is good in many cases, it is notably slower than other algorithms +with short keys, such as when you have a map of integers to other values. +In cases like these, [FNV is demonstrably faster][graphs]. + +Its disadvantages are that it performs badly on larger inputs, and +provides no protection against collision attacks, where a malicious user +can craft specific keys designed to slow a hasher down. Thus, it is +important to profile your program to ensure that you are using small hash +keys, and be certain that your program could not be exposed to malicious +inputs (including being a networked server). + +The Rust compiler itself uses FNV, as it is not worried about +denial-of-service attacks, and can assume that its inputs are going to be +small—a perfect use case for FNV. + + +## Usage + +To include this crate in your program, add the following to your `Cargo.toml`: + +```toml +[dependencies] +fnv = "1.0.3" +``` + + +## Using FNV in a HashMap + +The `FnvHashMap` type alias is the easiest way to use the standard library’s +`HashMap` with FNV. + +```rust +use fnv::FnvHashMap; + +let mut map = FnvHashMap::default(); +map.insert(1, "one"); +map.insert(2, "two"); + +map = FnvHashMap::with_capacity_and_hasher(10, Default::default()); +map.insert(1, "one"); +map.insert(2, "two"); +``` + +Note, the standard library’s `HashMap::new` and `HashMap::with_capacity` +are only implemented for the `RandomState` hasher, so using `Default` to +get the hasher is the next best option. + + +## Using FNV in a HashSet + +Similarly, `FnvHashSet` is a type alias for the standard library’s `HashSet` +with FNV. + +```rust +use fnv::FnvHashSet; + +let mut set = FnvHashSet::default(); +set.insert(1); +set.insert(2); + +set = FnvHashSet::with_capacity_and_hasher(10, Default::default()); +set.insert(1); +set.insert(2); +``` + +[chongo]: http://www.isthe.com/chongo/tech/comp/fnv/index.html +[faq]: https://www.rust-lang.org/faq.html#why-are-rusts-hashmaps-slow +[graphs]: http://cglab.ca/~abeinges/blah/hash-rs/ diff --git a/src/vendor/fnv/lib.rs b/src/vendor/fnv/lib.rs new file mode 100644 index 0000000000..1fc5d28c20 --- /dev/null +++ b/src/vendor/fnv/lib.rs @@ -0,0 +1,349 @@ +//! An implementation of the [Fowler–Noll–Vo hash function][chongo]. +//! +//! ## About +//! +//! The FNV hash function is a custom `Hasher` implementation that is more +//! efficient for smaller hash keys. +//! +//! [The Rust FAQ states that][faq] while the default `Hasher` implementation, +//! SipHash, is good in many cases, it is notably slower than other algorithms +//! with short keys, such as when you have a map of integers to other values. +//! In cases like these, [FNV is demonstrably faster][graphs]. +//! +//! Its disadvantages are that it performs badly on larger inputs, and +//! provides no protection against collision attacks, where a malicious user +//! can craft specific keys designed to slow a hasher down. Thus, it is +//! important to profile your program to ensure that you are using small hash +//! keys, and be certain that your program could not be exposed to malicious +//! inputs (including being a networked server). +//! +//! The Rust compiler itself uses FNV, as it is not worried about +//! denial-of-service attacks, and can assume that its inputs are going to be +//! small—a perfect use case for FNV. +//! +//! +//! ## Using FNV in a `HashMap` +//! +//! The `FnvHashMap` type alias is the easiest way to use the standard library’s +//! `HashMap` with FNV. +//! +//! ```rust +//! use fnv::FnvHashMap; +//! +//! let mut map = FnvHashMap::default(); +//! map.insert(1, "one"); +//! map.insert(2, "two"); +//! +//! map = FnvHashMap::with_capacity_and_hasher(10, Default::default()); +//! map.insert(1, "one"); +//! map.insert(2, "two"); +//! ``` +//! +//! Note, the standard library’s `HashMap::new` and `HashMap::with_capacity` +//! are only implemented for the `RandomState` hasher, so using `Default` to +//! get the hasher is the next best option. +//! +//! ## Using FNV in a `HashSet` +//! +//! Similarly, `FnvHashSet` is a type alias for the standard library’s `HashSet` +//! with FNV. +//! +//! ```rust +//! use fnv::FnvHashSet; +//! +//! let mut set = FnvHashSet::default(); +//! set.insert(1); +//! set.insert(2); +//! +//! set = FnvHashSet::with_capacity_and_hasher(10, Default::default()); +//! set.insert(1); +//! set.insert(2); +//! ``` +//! +//! [chongo]: http://www.isthe.com/chongo/tech/comp/fnv/index.html +//! [faq]: https://www.rust-lang.org/faq.html#why-are-rusts-hashmaps-slow +//! [graphs]: http://cglab.ca/~abeinges/blah/hash-rs/ + + +use std::default::Default; +use std::hash::{Hasher, BuildHasherDefault}; +use std::collections::{HashMap, HashSet}; + +/// An implementation of the Fowler–Noll–Vo hash function. +/// +/// See the [crate documentation](index.html) for more details. +#[allow(missing_copy_implementations)] +pub struct FnvHasher(u64); + +impl Default for FnvHasher { + + #[inline] + fn default() -> FnvHasher { + FnvHasher(0xcbf29ce484222325) + } +} + +impl FnvHasher { + /// Create an FNV hasher starting with a state corresponding + /// to the hash `key`. + #[inline] + pub fn with_key(key: u64) -> FnvHasher { + FnvHasher(key) + } +} + +impl Hasher for FnvHasher { + #[inline] + fn finish(&self) -> u64 { + self.0 + } + + #[inline] + fn write(&mut self, bytes: &[u8]) { + let FnvHasher(mut hash) = *self; + + for byte in bytes.iter() { + hash = hash ^ (*byte as u64); + hash = hash.wrapping_mul(0x100000001b3); + } + + *self = FnvHasher(hash); + } +} + +/// A builder for default FNV hashers. +pub type FnvBuildHasher = BuildHasherDefault; + +/// A `HashMap` using a default FNV hasher. +pub type FnvHashMap = HashMap; + +/// A `HashSet` using a default FNV hasher. +pub type FnvHashSet = HashSet; + + +#[cfg(test)] +mod test { + use super::*; + use std::hash::Hasher; + + fn fnv1a(bytes: &[u8]) -> u64 { + let mut hasher = FnvHasher::default(); + hasher.write(bytes); + hasher.finish() + } + + fn repeat_10(bytes: &[u8]) -> Vec { + (0..10).flat_map(|_| bytes.iter().cloned()).collect() + } + + fn repeat_500(bytes: &[u8]) -> Vec { + (0..500).flat_map(|_| bytes.iter().cloned()).collect() + } + + #[test] + fn basic_tests() { + assert_eq!(fnv1a(b""), 0xcbf29ce484222325); + assert_eq!(fnv1a(b"a"), 0xaf63dc4c8601ec8c); + assert_eq!(fnv1a(b"b"), 0xaf63df4c8601f1a5); + assert_eq!(fnv1a(b"c"), 0xaf63de4c8601eff2); + assert_eq!(fnv1a(b"d"), 0xaf63d94c8601e773); + assert_eq!(fnv1a(b"e"), 0xaf63d84c8601e5c0); + assert_eq!(fnv1a(b"f"), 0xaf63db4c8601ead9); + assert_eq!(fnv1a(b"fo"), 0x08985907b541d342); + assert_eq!(fnv1a(b"foo"), 0xdcb27518fed9d577); + assert_eq!(fnv1a(b"foob"), 0xdd120e790c2512af); + assert_eq!(fnv1a(b"fooba"), 0xcac165afa2fef40a); + assert_eq!(fnv1a(b"foobar"), 0x85944171f73967e8); + assert_eq!(fnv1a(b"\0"), 0xaf63bd4c8601b7df); + assert_eq!(fnv1a(b"a\0"), 0x089be207b544f1e4); + assert_eq!(fnv1a(b"b\0"), 0x08a61407b54d9b5f); + assert_eq!(fnv1a(b"c\0"), 0x08a2ae07b54ab836); + assert_eq!(fnv1a(b"d\0"), 0x0891b007b53c4869); + assert_eq!(fnv1a(b"e\0"), 0x088e4a07b5396540); + assert_eq!(fnv1a(b"f\0"), 0x08987c07b5420ebb); + assert_eq!(fnv1a(b"fo\0"), 0xdcb28a18fed9f926); + assert_eq!(fnv1a(b"foo\0"), 0xdd1270790c25b935); + assert_eq!(fnv1a(b"foob\0"), 0xcac146afa2febf5d); + assert_eq!(fnv1a(b"fooba\0"), 0x8593d371f738acfe); + assert_eq!(fnv1a(b"foobar\0"), 0x34531ca7168b8f38); + assert_eq!(fnv1a(b"ch"), 0x08a25607b54a22ae); + assert_eq!(fnv1a(b"cho"), 0xf5faf0190cf90df3); + assert_eq!(fnv1a(b"chon"), 0xf27397910b3221c7); + assert_eq!(fnv1a(b"chong"), 0x2c8c2b76062f22e0); + assert_eq!(fnv1a(b"chongo"), 0xe150688c8217b8fd); + assert_eq!(fnv1a(b"chongo "), 0xf35a83c10e4f1f87); + assert_eq!(fnv1a(b"chongo w"), 0xd1edd10b507344d0); + assert_eq!(fnv1a(b"chongo wa"), 0x2a5ee739b3ddb8c3); + assert_eq!(fnv1a(b"chongo was"), 0xdcfb970ca1c0d310); + assert_eq!(fnv1a(b"chongo was "), 0x4054da76daa6da90); + assert_eq!(fnv1a(b"chongo was h"), 0xf70a2ff589861368); + assert_eq!(fnv1a(b"chongo was he"), 0x4c628b38aed25f17); + assert_eq!(fnv1a(b"chongo was her"), 0x9dd1f6510f78189f); + assert_eq!(fnv1a(b"chongo was here"), 0xa3de85bd491270ce); + assert_eq!(fnv1a(b"chongo was here!"), 0x858e2fa32a55e61d); + assert_eq!(fnv1a(b"chongo was here!\n"), 0x46810940eff5f915); + assert_eq!(fnv1a(b"ch\0"), 0xf5fadd190cf8edaa); + assert_eq!(fnv1a(b"cho\0"), 0xf273ed910b32b3e9); + assert_eq!(fnv1a(b"chon\0"), 0x2c8c5276062f6525); + assert_eq!(fnv1a(b"chong\0"), 0xe150b98c821842a0); + assert_eq!(fnv1a(b"chongo\0"), 0xf35aa3c10e4f55e7); + assert_eq!(fnv1a(b"chongo \0"), 0xd1ed680b50729265); + assert_eq!(fnv1a(b"chongo w\0"), 0x2a5f0639b3dded70); + assert_eq!(fnv1a(b"chongo wa\0"), 0xdcfbaa0ca1c0f359); + assert_eq!(fnv1a(b"chongo was\0"), 0x4054ba76daa6a430); + assert_eq!(fnv1a(b"chongo was \0"), 0xf709c7f5898562b0); + assert_eq!(fnv1a(b"chongo was h\0"), 0x4c62e638aed2f9b8); + assert_eq!(fnv1a(b"chongo was he\0"), 0x9dd1a8510f779415); + assert_eq!(fnv1a(b"chongo was her\0"), 0xa3de2abd4911d62d); + assert_eq!(fnv1a(b"chongo was here\0"), 0x858e0ea32a55ae0a); + assert_eq!(fnv1a(b"chongo was here!\0"), 0x46810f40eff60347); + assert_eq!(fnv1a(b"chongo was here!\n\0"), 0xc33bce57bef63eaf); + assert_eq!(fnv1a(b"cu"), 0x08a24307b54a0265); + assert_eq!(fnv1a(b"cur"), 0xf5b9fd190cc18d15); + assert_eq!(fnv1a(b"curd"), 0x4c968290ace35703); + assert_eq!(fnv1a(b"curds"), 0x07174bd5c64d9350); + assert_eq!(fnv1a(b"curds "), 0x5a294c3ff5d18750); + assert_eq!(fnv1a(b"curds a"), 0x05b3c1aeb308b843); + assert_eq!(fnv1a(b"curds an"), 0xb92a48da37d0f477); + assert_eq!(fnv1a(b"curds and"), 0x73cdddccd80ebc49); + assert_eq!(fnv1a(b"curds and "), 0xd58c4c13210a266b); + assert_eq!(fnv1a(b"curds and w"), 0xe78b6081243ec194); + assert_eq!(fnv1a(b"curds and wh"), 0xb096f77096a39f34); + assert_eq!(fnv1a(b"curds and whe"), 0xb425c54ff807b6a3); + assert_eq!(fnv1a(b"curds and whey"), 0x23e520e2751bb46e); + assert_eq!(fnv1a(b"curds and whey\n"), 0x1a0b44ccfe1385ec); + assert_eq!(fnv1a(b"cu\0"), 0xf5ba4b190cc2119f); + assert_eq!(fnv1a(b"cur\0"), 0x4c962690ace2baaf); + assert_eq!(fnv1a(b"curd\0"), 0x0716ded5c64cda19); + assert_eq!(fnv1a(b"curds\0"), 0x5a292c3ff5d150f0); + assert_eq!(fnv1a(b"curds \0"), 0x05b3e0aeb308ecf0); + assert_eq!(fnv1a(b"curds a\0"), 0xb92a5eda37d119d9); + assert_eq!(fnv1a(b"curds an\0"), 0x73ce41ccd80f6635); + assert_eq!(fnv1a(b"curds and\0"), 0xd58c2c132109f00b); + assert_eq!(fnv1a(b"curds and \0"), 0xe78baf81243f47d1); + assert_eq!(fnv1a(b"curds and w\0"), 0xb0968f7096a2ee7c); + assert_eq!(fnv1a(b"curds and wh\0"), 0xb425a84ff807855c); + assert_eq!(fnv1a(b"curds and whe\0"), 0x23e4e9e2751b56f9); + assert_eq!(fnv1a(b"curds and whey\0"), 0x1a0b4eccfe1396ea); + assert_eq!(fnv1a(b"curds and whey\n\0"), 0x54abd453bb2c9004); + assert_eq!(fnv1a(b"hi"), 0x08ba5f07b55ec3da); + assert_eq!(fnv1a(b"hi\0"), 0x337354193006cb6e); + assert_eq!(fnv1a(b"hello"), 0xa430d84680aabd0b); + assert_eq!(fnv1a(b"hello\0"), 0xa9bc8acca21f39b1); + assert_eq!(fnv1a(b"\xff\x00\x00\x01"), 0x6961196491cc682d); + assert_eq!(fnv1a(b"\x01\x00\x00\xff"), 0xad2bb1774799dfe9); + assert_eq!(fnv1a(b"\xff\x00\x00\x02"), 0x6961166491cc6314); + assert_eq!(fnv1a(b"\x02\x00\x00\xff"), 0x8d1bb3904a3b1236); + assert_eq!(fnv1a(b"\xff\x00\x00\x03"), 0x6961176491cc64c7); + assert_eq!(fnv1a(b"\x03\x00\x00\xff"), 0xed205d87f40434c7); + assert_eq!(fnv1a(b"\xff\x00\x00\x04"), 0x6961146491cc5fae); + assert_eq!(fnv1a(b"\x04\x00\x00\xff"), 0xcd3baf5e44f8ad9c); + assert_eq!(fnv1a(b"\x40\x51\x4e\x44"), 0xe3b36596127cd6d8); + assert_eq!(fnv1a(b"\x44\x4e\x51\x40"), 0xf77f1072c8e8a646); + assert_eq!(fnv1a(b"\x40\x51\x4e\x4a"), 0xe3b36396127cd372); + assert_eq!(fnv1a(b"\x4a\x4e\x51\x40"), 0x6067dce9932ad458); + assert_eq!(fnv1a(b"\x40\x51\x4e\x54"), 0xe3b37596127cf208); + assert_eq!(fnv1a(b"\x54\x4e\x51\x40"), 0x4b7b10fa9fe83936); + assert_eq!(fnv1a(b"127.0.0.1"), 0xaabafe7104d914be); + assert_eq!(fnv1a(b"127.0.0.1\0"), 0xf4d3180b3cde3eda); + assert_eq!(fnv1a(b"127.0.0.2"), 0xaabafd7104d9130b); + assert_eq!(fnv1a(b"127.0.0.2\0"), 0xf4cfb20b3cdb5bb1); + assert_eq!(fnv1a(b"127.0.0.3"), 0xaabafc7104d91158); + assert_eq!(fnv1a(b"127.0.0.3\0"), 0xf4cc4c0b3cd87888); + assert_eq!(fnv1a(b"64.81.78.68"), 0xe729bac5d2a8d3a7); + assert_eq!(fnv1a(b"64.81.78.68\0"), 0x74bc0524f4dfa4c5); + assert_eq!(fnv1a(b"64.81.78.74"), 0xe72630c5d2a5b352); + assert_eq!(fnv1a(b"64.81.78.74\0"), 0x6b983224ef8fb456); + assert_eq!(fnv1a(b"64.81.78.84"), 0xe73042c5d2ae266d); + assert_eq!(fnv1a(b"64.81.78.84\0"), 0x8527e324fdeb4b37); + assert_eq!(fnv1a(b"feedface"), 0x0a83c86fee952abc); + assert_eq!(fnv1a(b"feedface\0"), 0x7318523267779d74); + assert_eq!(fnv1a(b"feedfacedaffdeed"), 0x3e66d3d56b8caca1); + assert_eq!(fnv1a(b"feedfacedaffdeed\0"), 0x956694a5c0095593); + assert_eq!(fnv1a(b"feedfacedeadbeef"), 0xcac54572bb1a6fc8); + assert_eq!(fnv1a(b"feedfacedeadbeef\0"), 0xa7a4c9f3edebf0d8); + assert_eq!(fnv1a(b"line 1\nline 2\nline 3"), 0x7829851fac17b143); + assert_eq!(fnv1a(b"chongo /\\../\\"), 0x2c8f4c9af81bcf06); + assert_eq!(fnv1a(b"chongo /\\../\\\0"), 0xd34e31539740c732); + assert_eq!(fnv1a(b"chongo (Landon Curt Noll) /\\../\\"), 0x3605a2ac253d2db1); + assert_eq!(fnv1a(b"chongo (Landon Curt Noll) /\\../\\\0"), 0x08c11b8346f4a3c3); + assert_eq!(fnv1a(b"http://antwrp.gsfc.nasa.gov/apod/astropix.html"), 0x6be396289ce8a6da); + assert_eq!(fnv1a(b"http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash"), 0xd9b957fb7fe794c5); + assert_eq!(fnv1a(b"http://epod.usra.edu/"), 0x05be33da04560a93); + assert_eq!(fnv1a(b"http://exoplanet.eu/"), 0x0957f1577ba9747c); + assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/cam3/"), 0xda2cc3acc24fba57); + assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/cams/HMcam/"), 0x74136f185b29e7f0); + assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/kilauea/update/deformation.html"), 0xb2f2b4590edb93b2); + assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/kilauea/update/images.html"), 0xb3608fce8b86ae04); + assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/kilauea/update/maps.html"), 0x4a3a865079359063); + assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/volcanowatch/current_issue.html"), 0x5b3a7ef496880a50); + assert_eq!(fnv1a(b"http://neo.jpl.nasa.gov/risk/"), 0x48fae3163854c23b); + assert_eq!(fnv1a(b"http://norvig.com/21-days.html"), 0x07aaa640476e0b9a); + assert_eq!(fnv1a(b"http://primes.utm.edu/curios/home.php"), 0x2f653656383a687d); + assert_eq!(fnv1a(b"http://slashdot.org/"), 0xa1031f8e7599d79c); + assert_eq!(fnv1a(b"http://tux.wr.usgs.gov/Maps/155.25-19.5.html"), 0xa31908178ff92477); + assert_eq!(fnv1a(b"http://volcano.wr.usgs.gov/kilaueastatus.php"), 0x097edf3c14c3fb83); + assert_eq!(fnv1a(b"http://www.avo.alaska.edu/activity/Redoubt.php"), 0xb51ca83feaa0971b); + assert_eq!(fnv1a(b"http://www.dilbert.com/fast/"), 0xdd3c0d96d784f2e9); + assert_eq!(fnv1a(b"http://www.fourmilab.ch/gravitation/orbits/"), 0x86cd26a9ea767d78); + assert_eq!(fnv1a(b"http://www.fpoa.net/"), 0xe6b215ff54a30c18); + assert_eq!(fnv1a(b"http://www.ioccc.org/index.html"), 0xec5b06a1c5531093); + assert_eq!(fnv1a(b"http://www.isthe.com/cgi-bin/number.cgi"), 0x45665a929f9ec5e5); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/bio.html"), 0x8c7609b4a9f10907); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/index.html"), 0x89aac3a491f0d729); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/src/calc/lucas-calc"), 0x32ce6b26e0f4a403); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/astro/venus2004.html"), 0x614ab44e02b53e01); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/astro/vita.html"), 0xfa6472eb6eef3290); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/comp/c/expert.html"), 0x9e5d75eb1948eb6a); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/comp/calc/index.html"), 0xb6d12ad4a8671852); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/comp/fnv/index.html"), 0x88826f56eba07af1); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/number/howhigh.html"), 0x44535bf2645bc0fd); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/number/number.html"), 0x169388ffc21e3728); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/prime/mersenne.html"), 0xf68aac9e396d8224); + assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/prime/mersenne.html#largest"), 0x8e87d7e7472b3883); + assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/corpspeak.cgi"), 0x295c26caa8b423de); + assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/haiku.cgi"), 0x322c814292e72176); + assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/rand-none.cgi"), 0x8a06550eb8af7268); + assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/randdist.cgi"), 0xef86d60e661bcf71); + assert_eq!(fnv1a(b"http://www.lavarnd.org/index.html"), 0x9e5426c87f30ee54); + assert_eq!(fnv1a(b"http://www.lavarnd.org/what/nist-test.html"), 0xf1ea8aa826fd047e); + assert_eq!(fnv1a(b"http://www.macosxhints.com/"), 0x0babaf9a642cb769); + assert_eq!(fnv1a(b"http://www.mellis.com/"), 0x4b3341d4068d012e); + assert_eq!(fnv1a(b"http://www.nature.nps.gov/air/webcams/parks/havoso2alert/havoalert.cfm"), 0xd15605cbc30a335c); + assert_eq!(fnv1a(b"http://www.nature.nps.gov/air/webcams/parks/havoso2alert/timelines_24.cfm"), 0x5b21060aed8412e5); + assert_eq!(fnv1a(b"http://www.paulnoll.com/"), 0x45e2cda1ce6f4227); + assert_eq!(fnv1a(b"http://www.pepysdiary.com/"), 0x50ae3745033ad7d4); + assert_eq!(fnv1a(b"http://www.sciencenews.org/index/home/activity/view"), 0xaa4588ced46bf414); + assert_eq!(fnv1a(b"http://www.skyandtelescope.com/"), 0xc1b0056c4a95467e); + assert_eq!(fnv1a(b"http://www.sput.nl/~rob/sirius.html"), 0x56576a71de8b4089); + assert_eq!(fnv1a(b"http://www.systemexperts.com/"), 0xbf20965fa6dc927e); + assert_eq!(fnv1a(b"http://www.tq-international.com/phpBB3/index.php"), 0x569f8383c2040882); + assert_eq!(fnv1a(b"http://www.travelquesttours.com/index.htm"), 0xe1e772fba08feca0); + assert_eq!(fnv1a(b"http://www.wunderground.com/global/stations/89606.html"), 0x4ced94af97138ac4); + assert_eq!(fnv1a(&repeat_10(b"21701")), 0xc4112ffb337a82fb); + assert_eq!(fnv1a(&repeat_10(b"M21701")), 0xd64a4fd41de38b7d); + assert_eq!(fnv1a(&repeat_10(b"2^21701-1")), 0x4cfc32329edebcbb); + assert_eq!(fnv1a(&repeat_10(b"\x54\xc5")), 0x0803564445050395); + assert_eq!(fnv1a(&repeat_10(b"\xc5\x54")), 0xaa1574ecf4642ffd); + assert_eq!(fnv1a(&repeat_10(b"23209")), 0x694bc4e54cc315f9); + assert_eq!(fnv1a(&repeat_10(b"M23209")), 0xa3d7cb273b011721); + assert_eq!(fnv1a(&repeat_10(b"2^23209-1")), 0x577c2f8b6115bfa5); + assert_eq!(fnv1a(&repeat_10(b"\x5a\xa9")), 0xb7ec8c1a769fb4c1); + assert_eq!(fnv1a(&repeat_10(b"\xa9\x5a")), 0x5d5cfce63359ab19); + assert_eq!(fnv1a(&repeat_10(b"391581216093")), 0x33b96c3cd65b5f71); + assert_eq!(fnv1a(&repeat_10(b"391581*2^216093-1")), 0xd845097780602bb9); + assert_eq!(fnv1a(&repeat_10(b"\x05\xf9\x9d\x03\x4c\x81")), 0x84d47645d02da3d5); + assert_eq!(fnv1a(&repeat_10(b"FEDCBA9876543210")), 0x83544f33b58773a5); + assert_eq!(fnv1a(&repeat_10(b"\xfe\xdc\xba\x98\x76\x54\x32\x10")), 0x9175cbb2160836c5); + assert_eq!(fnv1a(&repeat_10(b"EFCDAB8967452301")), 0xc71b3bc175e72bc5); + assert_eq!(fnv1a(&repeat_10(b"\xef\xcd\xab\x89\x67\x45\x23\x01")), 0x636806ac222ec985); + assert_eq!(fnv1a(&repeat_10(b"0123456789ABCDEF")), 0xb6ef0e6950f52ed5); + assert_eq!(fnv1a(&repeat_10(b"\x01\x23\x45\x67\x89\xab\xcd\xef")), 0xead3d8a0f3dfdaa5); + assert_eq!(fnv1a(&repeat_10(b"1032547698BADCFE")), 0x922908fe9a861ba5); + assert_eq!(fnv1a(&repeat_10(b"\x10\x32\x54\x76\x98\xba\xdc\xfe")), 0x6d4821de275fd5c5); + assert_eq!(fnv1a(&repeat_500(b"\x00")), 0x1fe3fce62bd816b5); + assert_eq!(fnv1a(&repeat_500(b"\x07")), 0xc23e9fccd6f70591); + assert_eq!(fnv1a(&repeat_500(b"~")), 0xc1af12bdfe16b5b5); + assert_eq!(fnv1a(&repeat_500(b"\x7f")), 0x39e9f18f2f85e221); + } +} diff --git a/src/vendor/futf/.cargo-checksum.json b/src/vendor/futf/.cargo-checksum.json new file mode 100644 index 0000000000..b8d3ecb761 --- /dev/null +++ b/src/vendor/futf/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"39ae258d4dbe2ca1b13ce787a9d4f6b29e7b0a007797cd6de0cabf39a78c0c51","Cargo.toml":"fee353c47f22c3c41951da1f37726cd10f0e6eb870c29742c17390a94b8689ed","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"60a7062291b01ba068f300612cdbdc20382ac1d4934f07bcdd7167c15299f309","README.md":"933cfbcce46af48e2dbaa75f042df2143d726b983a546f57ca0eb5fb93e220b5","src/lib.rs":"5965d06ed494bb387f214f976b8a0e692e74837d51ecafbfef649e4c53a5af3f","src/test.rs":"0ca773b918809aeb73f75c75c0640d78cdc824efd995f136603a567239578c47"},"package":"51f93f3de6ba1794dcd5810b3546d004600a59a98266487c8407bc4b24e398f3"} \ No newline at end of file diff --git a/src/vendor/futf/.cargo-ok b/src/vendor/futf/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/futf/.gitignore b/src/vendor/futf/.gitignore new file mode 100644 index 0000000000..a9d37c560c --- /dev/null +++ b/src/vendor/futf/.gitignore @@ -0,0 +1,2 @@ +target +Cargo.lock diff --git a/src/vendor/futf/.travis.yml b/src/vendor/futf/.travis.yml new file mode 100644 index 0000000000..b79b332a1e --- /dev/null +++ b/src/vendor/futf/.travis.yml @@ -0,0 +1,11 @@ +language: rust +rust: + - nightly + - beta + - stable +script: + - cargo build + - "if [ $TRAVIS_RUST_VERSION = nightly ]; then cargo test; fi" + - cargo doc +notifications: + webhooks: http://build.servo.org:54856/travis diff --git a/src/vendor/futf/Cargo.toml b/src/vendor/futf/Cargo.toml new file mode 100644 index 0000000000..0182c0d2f4 --- /dev/null +++ b/src/vendor/futf/Cargo.toml @@ -0,0 +1,12 @@ +[package] + +name = "futf" +version = "0.1.3" +authors = ["Keegan McAllister "] +license = "MIT / Apache-2.0" +repository = "https://github.com/servo/futf" +description = "Handling fragments of UTF-8" + +[dependencies] +mac = "0.1.0" +debug_unreachable = "0.1.1" diff --git a/src/vendor/futf/LICENSE-APACHE b/src/vendor/futf/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/src/vendor/futf/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/vendor/futf/LICENSE-MIT b/src/vendor/futf/LICENSE-MIT new file mode 100644 index 0000000000..2e0fee1057 --- /dev/null +++ b/src/vendor/futf/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2015 Keegan McAllister + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/futf/README.md b/src/vendor/futf/README.md new file mode 100644 index 0000000000..325b2e74d5 --- /dev/null +++ b/src/vendor/futf/README.md @@ -0,0 +1,18 @@ +# futf + +[![Build Status](https://travis-ci.org/servo/futf.svg?branch=master)](https://travis-ci.org/kmcallister/futf) + +futf is a library for *flexible* UTF-8, or UTF-8 *fragments*. I don't know. +Check out the [API documentation](http://doc.servo.org/futf/index.html). + +Anyway, it takes an index into a byte buffer and tells you things about the +UTF-8 codepoint containing that byte. It can deal with incomplete codepoint +prefixes / suffixes at the ends of a buffer, which is useful for incremental +I/O. It can also handle UTF-16 surrogate code units encoded in the manner of +[CESU-8][] or [WTF-8][]. + +This is a low-level helper for [tendril][] that might be useful more generally. + +[CESU-8]: http://www.unicode.org/reports/tr26/ +[WTF-8]: http://simonsapin.github.io/wtf-8/ +[tendril]: https://github.com/kmcallister/tendril diff --git a/src/vendor/futf/src/lib.rs b/src/vendor/futf/src/lib.rs new file mode 100644 index 0000000000..a47cda0031 --- /dev/null +++ b/src/vendor/futf/src/lib.rs @@ -0,0 +1,248 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![cfg_attr(test, feature(test, str_char))] + +#[macro_use] +extern crate debug_unreachable; + +#[macro_use] +extern crate mac; + +#[cfg(test)] +extern crate test as std_test; + +use std::{slice, char}; + +/// Meaning of a complete or partial UTF-8 codepoint. +/// +/// Not all checking is performed eagerly. That is, a codepoint `Prefix` or +/// `Suffix` may in reality have no valid completion. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub enum Meaning { + /// We found a whole codepoint. + Whole(char), + + /// We found something that isn't a valid Unicode codepoint, but + /// it *would* correspond to a UTF-16 leading surrogate code unit, + /// i.e. a value in the range `U+D800` - `U+DBFF`. + /// + /// The argument is the code unit's 10-bit index within that range. + /// + /// These are found in UTF-8 variants such as CESU-8 and WTF-8. + LeadSurrogate(u16), + + /// We found something that isn't a valid Unicode codepoint, but + /// it *would* correspond to a UTF-16 trailing surrogate code unit, + /// i.e. a value in the range `U+DC00` - `U+DFFF`. + /// + /// The argument is the code unit's 10-bit index within that range. + /// + /// These are found in UTF-8 variants such as CESU-8 and WTF-8. + TrailSurrogate(u16), + + /// We found only a prefix of a codepoint before the buffer ended. + /// + /// Includes the number of additional bytes needed. + Prefix(usize), + + /// We found only a suffix of a codepoint before running off the + /// start of the buffer. + /// + /// Up to 3 more bytes may be needed. + Suffix, +} + +/// Represents a complete or partial UTF-8 codepoint. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct Codepoint<'a> { + /// The bytes that make up the partial or full codepoint. + /// + /// For a `Suffix` this depends on `idx`. We don't scan forward + /// for additional continuation bytes after the reverse scan + /// failed to locate a multibyte sequence start. + pub bytes: &'a [u8], + + /// Start of the codepoint in the buffer, expressed as an offset + /// back from `idx`. + pub rewind: usize, + + /// Meaning of the partial or full codepoint. + pub meaning: Meaning, +} + +#[derive(Debug, PartialEq, Eq)] +enum Byte { + Ascii, + Start(usize), + Cont, +} + +impl Byte { + #[inline(always)] + fn classify(x: u8) -> Option { + match x & 0xC0 { + 0xC0 => match x { + x if x & 0b11111_000 == 0b11110_000 => Some(Byte::Start(4)), + x if x & 0b1111_0000 == 0b1110_0000 => Some(Byte::Start(3)), + x if x & 0b111_00000 == 0b110_00000 => Some(Byte::Start(2)), + _ => None, + }, + 0x80 => Some(Byte::Cont), + _ => Some(Byte::Ascii), + } + } +} + +#[inline(always)] +fn all_cont(buf: &[u8]) -> bool { + buf.iter().all(|&b| matches!(Byte::classify(b), Some(Byte::Cont))) +} + +// NOTE: Assumes the buffer is a syntactically valid multi-byte UTF-8 sequence: +// a starting byte followed by the correct number of continuation bytes. +#[inline(always)] +unsafe fn decode(buf: &[u8]) -> Option { + debug_assert!(buf.len() >= 2); + debug_assert!(buf.len() <= 4); + let n; + match buf.len() { + 2 => { + n = ((*buf.get_unchecked(0) & 0b11111) as u32) << 6 + | ((*buf.get_unchecked(1) & 0x3F) as u32); + if n < 0x80 { return None } // Overlong + } + 3 => { + n = ((*buf.get_unchecked(0) & 0b1111) as u32) << 12 + | ((*buf.get_unchecked(1) & 0x3F) as u32) << 6 + | ((*buf.get_unchecked(2) & 0x3F) as u32); + match n { + 0x0000 ... 0x07FF => return None, // Overlong + 0xD800 ... 0xDBFF => return Some(Meaning::LeadSurrogate(n as u16 - 0xD800)), + 0xDC00 ... 0xDFFF => return Some(Meaning::TrailSurrogate(n as u16 - 0xDC00)), + _ => {} + } + } + 4 => { + n = ((*buf.get_unchecked(0) & 0b111) as u32) << 18 + | ((*buf.get_unchecked(1) & 0x3F) as u32) << 12 + | ((*buf.get_unchecked(2) & 0x3F) as u32) << 6 + | ((*buf.get_unchecked(3) & 0x3F) as u32); + if n < 0x1_0000 { return None } // Overlong + } + _ => debug_unreachable!(), + } + + char::from_u32(n).map(Meaning::Whole) +} + +#[inline(always)] +unsafe fn unsafe_slice<'a>(buf: &'a [u8], start: usize, new_len: usize) -> &'a [u8] { + debug_assert!(start <= buf.len()); + debug_assert!(new_len <= (buf.len() - start)); + slice::from_raw_parts(buf.as_ptr().offset(start as isize), new_len) +} + +macro_rules! otry { + ($x:expr) => { unwrap_or_return!($x, None) } +} + +/// Describes the UTF-8 codepoint containing the byte at index `idx` within +/// `buf`. +/// +/// Returns `None` if `idx` is out of range, or if `buf` contains invalid UTF-8 +/// in the vicinity of `idx`. +#[inline] +pub fn classify<'a>(buf: &'a [u8], idx: usize) -> Option> { + if idx >= buf.len() { + return None; + } + + unsafe { + let x = *buf.get_unchecked(idx); + match otry!(Byte::classify(x)) { + Byte::Ascii => Some(Codepoint { + bytes: unsafe_slice(buf, idx, 1), + rewind: 0, + meaning: Meaning::Whole(x as char), + }), + Byte::Start(n) => { + let avail = buf.len() - idx; + if avail >= n { + let bytes = unsafe_slice(buf, idx, n); + if !all_cont(unsafe_slice(bytes, 1, n-1)) { + return None; + } + let meaning = otry!(decode(bytes)); + Some(Codepoint { + bytes: bytes, + rewind: 0, + meaning: meaning, + }) + } else { + Some(Codepoint { + bytes: unsafe_slice(buf, idx, avail), + rewind: 0, + meaning: Meaning::Prefix(n - avail), + }) + } + }, + Byte::Cont => { + let mut start = idx; + let mut checked = 0; + loop { + if start == 0 { + // Whoops, fell off the beginning. + return Some(Codepoint { + bytes: unsafe_slice(buf, 0, idx + 1), + rewind: idx, + meaning: Meaning::Suffix, + }); + } + + start -= 1; + checked += 1; + match otry!(Byte::classify(*buf.get_unchecked(start))) { + Byte::Cont => (), + Byte::Start(n) => { + let avail = buf.len() - start; + if avail >= n { + let bytes = unsafe_slice(buf, start, n); + if checked < n { + if !all_cont(unsafe_slice(bytes, checked, n-checked)) { + return None; + } + } + let meaning = otry!(decode(bytes)); + return Some(Codepoint { + bytes: bytes, + rewind: idx - start, + meaning: meaning, + }); + } else { + return Some(Codepoint { + bytes: unsafe_slice(buf, start, avail), + rewind: idx - start, + meaning: Meaning::Prefix(n - avail), + }); + } + } + _ => return None, + } + + if idx - start >= 3 { + // We looked at 3 bytes before a continuation byte + // and didn't find a start byte. + return None; + } + } + } + } + } +} + +#[cfg(test)] +mod test; diff --git a/src/vendor/futf/src/test.rs b/src/vendor/futf/src/test.rs new file mode 100644 index 0000000000..f8e0c9387a --- /dev/null +++ b/src/vendor/futf/src/test.rs @@ -0,0 +1,270 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{Meaning, Byte, classify, decode, all_cont}; + +use std::borrow::ToOwned; +use std::io::Write; +use std_test::Bencher; + +#[test] +fn classify_all_bytes() { + for n in 0x00..0x80 { assert_eq!(Byte::classify(n), Some(Byte::Ascii)); } + for n in 0x80..0xC0 { assert_eq!(Byte::classify(n), Some(Byte::Cont)); } + for n in 0xC0..0xE0 { assert_eq!(Byte::classify(n), Some(Byte::Start(2))); } + for n in 0xE0..0xF0 { assert_eq!(Byte::classify(n), Some(Byte::Start(3))); } + for n in 0xF0..0xF8 { assert_eq!(Byte::classify(n), Some(Byte::Start(4))); } + for n in 0xF8..0xFF { assert_eq!(Byte::classify(n), None); } + assert_eq!(Byte::classify(0xFF), None); +} + +#[test] +fn test_all_cont() { + assert!(all_cont(b"")); + assert!(all_cont(b"\x80")); + assert!(all_cont(b"\xBF")); + assert!(all_cont(b"\x80\xBF\x80\xBF")); + + assert!(!all_cont(b"z")); + assert!(!all_cont(b"\xC0\xBF")); + assert!(!all_cont(b"\xFF")); + assert!(!all_cont(b"\x80\xBFz\x80\xBF")); + assert!(!all_cont(b"\x80\xBF\xC0\x80\xBF")); + assert!(!all_cont(b"\x80\xBF\xFF\x80\xBF")); + assert!(!all_cont(b"\x80\xBF\x80\xBFz")); + assert!(!all_cont(b"\x80\xBF\x80\xBF\xC0")); + assert!(!all_cont(b"z\x80\xBF\x80\xBF")); + assert!(!all_cont(b"\xC0\x80\xBF\x80\xBF")); +} + +#[test] +fn test_decode() { + unsafe { + assert_eq!(Some(Meaning::Whole('ő')), decode(b"\xC5\x91")); + assert_eq!(Some(Meaning::Whole('\u{a66e}')), decode(b"\xEA\x99\xAE")); + assert_eq!(Some(Meaning::Whole('\u{1f4a9}')), decode(b"\xF0\x9F\x92\xA9")); + assert_eq!(Some(Meaning::Whole('\u{10ffff}')), decode(b"\xF4\x8F\xBF\xBF")); + + assert_eq!(Some(Meaning::LeadSurrogate(0x0000)), decode(b"\xED\xA0\x80")); + assert_eq!(Some(Meaning::LeadSurrogate(0x0001)), decode(b"\xED\xA0\x81")); + assert_eq!(Some(Meaning::LeadSurrogate(0x03FE)), decode(b"\xED\xAF\xBE")); + assert_eq!(Some(Meaning::LeadSurrogate(0x03FF)), decode(b"\xED\xAF\xBF")); + + assert_eq!(Some(Meaning::TrailSurrogate(0x0000)), decode(b"\xED\xB0\x80")); + assert_eq!(Some(Meaning::TrailSurrogate(0x0001)), decode(b"\xED\xB0\x81")); + assert_eq!(Some(Meaning::TrailSurrogate(0x03FE)), decode(b"\xED\xBF\xBE")); + assert_eq!(Some(Meaning::TrailSurrogate(0x03FF)), decode(b"\xED\xBF\xBF")); + + // The last 4-byte UTF-8 sequence. This would be U+1FFFFF, which is out of + // range. + assert_eq!(None, decode(b"\xF7\xBF\xBF\xBF")); + + // First otherwise-valid sequence (would be U+110000) that is out of range + assert_eq!(None, decode(b"\xF4\x90\x80\x80")); + + // Overlong sequences + assert_eq!(None, decode(b"\xC0\x80")); + assert_eq!(None, decode(b"\xC1\xBF")); + assert_eq!(None, decode(b"\xE0\x80\x80")); + assert_eq!(None, decode(b"\xE0\x9F\xBF")); + assert_eq!(None, decode(b"\xF0\x80\x80\x80")); + assert_eq!(None, decode(b"\xF0\x8F\xBF\xBF")); + + // For not-overlong sequence for each sequence length + assert_eq!(Some(Meaning::Whole('\u{80}')), decode(b"\xC2\x80")); + assert_eq!(Some(Meaning::Whole('\u{800}')), decode(b"\xE0\xA0\x80")); + assert_eq!(Some(Meaning::Whole('\u{10000}')), decode(b"\xF0\x90\x80\x80")); + } +} + +static JUNK: &'static [u8] = b"\ + \xf8\x0d\x07\x25\xa6\x7b\x95\xeb\x47\x01\x7f\xee\ + \x3b\x00\x60\x57\x1d\x9e\x5d\x0a\x0b\x0a\x7c\x75\ + \x13\xa1\x82\x46\x27\x34\xe9\x52\x61\x0d\xec\x10\ + \x54\x49\x6e\x54\xdf\x7b\xe1\x31\x8c\x06\x21\x83\ + \x0f\xb5\x1f\x4c\x6a\x71\x52\x42\x74\xe7\x7b\x50\ + \x59\x1f\x6a\xd4\xff\x06\x92\x33\xc4\x34\x97\xff\ + \xcc\xb5\xc4\x00\x7b\xc3\x4a\x7f\x7e\x63\x96\x58\ + \x51\x63\x21\x54\x53\x2f\x03\x8a\x7d\x41\x79\x98\ + \x5b\xcb\xb8\x94\x6b\x73\xf3\x0c\x5a\xd7\xc4\x12\ + \x7a\x2b\x9a\x2e\x67\x62\x2a\x00\x45\x2c\xfe\x7d\ + \x8d\xd6\x51\x4e\x59\x36\x72\x1b\xae\xaa\x06\xe8\ + \x71\x1b\x85\xd3\x35\xb5\xbe\x9e\x16\x96\x72\xd8\ + \x1a\x48\xba\x4d\x55\x4f\x1b\xa2\x77\xfa\x8f\x71\ + \x58\x7d\x03\x93\xa2\x3a\x76\x51\xda\x48\xe2\x3f\ + \xeb\x8d\xda\x89\xae\xf7\xbd\x3d\xb6\x37\x97\xca\ + \x99\xcc\x4a\x8d\x62\x89\x97\xe3\xc0\xd1\x8d\xc1\ + \x26\x11\xbb\x8d\x53\x61\x4f\x76\x03\x00\x30\xd3\ + \x5f\x86\x19\x52\x9c\x3e\x99\x8c\xb7\x21\x48\x1c\ + \x85\xae\xad\xd5\x74\x00\x6c\x3e\xd0\x17\xff\x76\ + \x5c\x32\xc3\xfb\x24\x99\xd4\x4c\xa4\x1f\x66\x46\ + \xe7\x2d\x44\x56\x7d\x14\xd9\x76\x91\x37\x2f\xb7\ + \xcc\x1b\xd3\xc2"; + +#[test] +fn classify_whole() { + assert_eq!(JUNK.len(), 256); + + for &c in &['\0', '\x01', 'o', 'z', 'ő', '\u{2764}', + '\u{a66e}', '\u{1f4a9}', '\u{1f685}'] { + for idx in 0 .. JUNK.len() - 3 { + let mut buf = JUNK.to_owned(); + let ch = format!("{}", c).into_bytes(); + (&mut buf[idx..]).write_all(&ch).unwrap(); + + for j in 0 .. ch.len() { + let class = classify(&buf, idx+j).unwrap(); + assert_eq!(class.bytes, &*ch); + assert_eq!(class.rewind, j); + assert_eq!(class.meaning, Meaning::Whole(c)); + } + } + } +} + +#[test] +fn classify_surrogates() { + for &(s, b) in &[ + (Meaning::LeadSurrogate(0x0000), b"\xED\xA0\x80"), + (Meaning::LeadSurrogate(0x0001), b"\xED\xA0\x81"), + (Meaning::LeadSurrogate(0x03FE), b"\xED\xAF\xBE"), + (Meaning::LeadSurrogate(0x03FF), b"\xED\xAF\xBF"), + + (Meaning::TrailSurrogate(0x0000), b"\xED\xB0\x80"), + (Meaning::TrailSurrogate(0x0001), b"\xED\xB0\x81"), + (Meaning::TrailSurrogate(0x03FE), b"\xED\xBF\xBE"), + (Meaning::TrailSurrogate(0x03FF), b"\xED\xBF\xBF"), + ] { + for idx in 0 .. JUNK.len() - 2 { + let mut buf = JUNK.to_owned(); + (&mut buf[idx..]).write_all(b).unwrap(); + + let class = classify(&buf, idx).unwrap(); + assert_eq!(class.bytes, b); + assert_eq!(class.rewind, 0); + assert_eq!(class.meaning, s); + } + } +} + +#[test] +fn classify_prefix_suffix() { + for &c in &['ő', '\u{a66e}', '\u{1f4a9}'] { + let ch = format!("{}", c).into_bytes(); + for pfx in 1 .. ch.len() - 1 { + let mut buf = JUNK.to_owned(); + let buflen = buf.len(); + (&mut buf[buflen - pfx .. buflen]).write_all(&ch[..pfx]).unwrap(); + for j in 0 .. pfx { + let idx = buflen - 1 - j; + let class = classify(&buf, idx).unwrap(); + assert_eq!(class.bytes, &ch[..pfx]); + assert_eq!(class.rewind, pfx - 1 - j); + assert_eq!(class.meaning, Meaning::Prefix(ch.len() - pfx)); + } + } + for sfx in 1 .. ch.len() - 1 { + let ch_bytes = &ch[ch.len() - sfx ..]; + let mut buf = JUNK.to_owned(); + (&mut *buf).write_all(ch_bytes).unwrap(); + for j in 0 .. sfx { + let class = classify(&buf, j).unwrap(); + assert!(ch_bytes.starts_with(class.bytes)); + assert_eq!(class.rewind, j); + assert_eq!(class.meaning, Meaning::Suffix); + } + } + } +} + +#[test] +fn out_of_bounds() { + assert!(classify(b"", 0).is_none()); + assert!(classify(b"", 7).is_none()); + assert!(classify(b"aaaaaaa", 7).is_none()); +} + +#[test] +fn malformed() { + assert_eq!(None, classify(b"\xFF", 0)); + assert_eq!(None, classify(b"\xC5\xC5", 0)); + assert_eq!(None, classify(b"x\x91", 1)); + assert_eq!(None, classify(b"\x91\x91\x91\x91", 3)); + assert_eq!(None, classify(b"\x91\x91\x91\x91\x91", 4)); + assert_eq!(None, classify(b"\xEA\x91\xFF", 1)); + assert_eq!(None, classify(b"\xF0\x90\x90\xF0", 0)); + assert_eq!(None, classify(b"\xF0\x90\x90\xF0", 1)); + assert_eq!(None, classify(b"\xF0\x90\x90\xF0", 2)); + + for i in 0..4 { + // out of range: U+110000 + assert_eq!(None, classify(b"\xF4\x90\x80\x80", i)); + + // out of range: U+1FFFFF + assert_eq!(None, classify(b"\xF7\xBF\xBF\xBF", i)); + + // Overlong sequences + assert_eq!(None, classify(b"\xC0\x80", i)); + assert_eq!(None, classify(b"\xC1\xBF", i)); + assert_eq!(None, classify(b"\xE0\x80\x80", i)); + assert_eq!(None, classify(b"\xE0\x9F\xBF", i)); + assert_eq!(None, classify(b"\xF0\x80\x80\x80", i)); + assert_eq!(None, classify(b"\xF0\x8F\xBF\xBF", i)); + } +} + +static TEXT: &'static str = " + All human beings are born free and equal in dignity and rights. + They are endowed with reason and conscience and should act + towards one another in a spirit of brotherhood. + + Minden emberi lény szabadon születik és egyenlő méltósága és + joga van. Az emberek, ésszel és lelkiismerettel bírván, + egymással szemben testvéri szellemben kell hogy viseltessenek. + + เราทุกคนเกิดมาอย่างอิสระ เราทุกคนมีความคิดและความเข้าใจเป็นของเราเอง + เราทุกคนควรได้รับการปฏิบัติในทางเดียวกัน. + + 모든 인간은 태어날 때부터 자유로우며 그 존엄과 권리에 있어 + 동등하다. 인간은 천부적으로 이성과 양심을 부여받았으며 서로 + 형제애의 정신으로 행동하여야 한다. + + ro remna cu se jinzi co zifre je simdu'i be le ry. nilselsi'a + .e lei ry. selcru .i ry. se menli gi'e se sezmarde .i .ei + jeseki'ubo ry. simyzu'e ta'i le tunba + + ᏂᎦᏓ ᎠᏂᏴᏫ ᏂᎨᎫᏓᎸᎾ ᎠᎴ ᎤᏂᏠᏱ ᎤᎾᏕᎿ ᏚᏳᎧᏛ ᎨᏒᎢ. ᎨᏥᏁᎳ ᎤᎾᏓᏅᏖᏗ ᎠᎴ ᎤᏃᏟᏍᏗ + ᎠᎴ ᏌᏊ ᎨᏒ ᏧᏂᎸᏫᏍᏓᏁᏗ ᎠᎾᏟᏅᏢ ᎠᏓᏅᏙ ᎬᏗ."; + +// random +static IXES: &'static [usize] + = &[778, 156, 87, 604, 1216, 365, 884, 311, + 469, 515, 709, 162, 871, 206, 634, 442]; + +static BOUNDARY: &'static [bool] + = &[false, true, true, false, false, true, true, true, + true, false, false, true, true, true, false, false]; + +#[bench] +fn std_utf8_check(b: &mut Bencher) { + b.iter(|| { + assert!(IXES.iter().zip(BOUNDARY.iter()).all(|(&ix, &expect)| { + expect == TEXT.is_char_boundary(ix) + })); + }); +} + +// We don't expect to be as fast as is_char_boundary, because we provide more +// information. But we shouldn't be tremendously slower, either. A factor of +// 5-10 is expected on this text. +#[bench] +fn futf_check(b: &mut Bencher) { + b.iter(|| { + assert!(IXES.iter().zip(BOUNDARY.iter()).all(|(&ix, &expect)| { + expect == (::classify(TEXT.as_bytes(), ix).unwrap().rewind == 0) + })); + }); +} diff --git a/src/vendor/gcc/.cargo-checksum.json b/src/vendor/gcc/.cargo-checksum.json deleted file mode 100644 index 36cf2dc127..0000000000 --- a/src/vendor/gcc/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"4cc6445feac7e9a1f8f1e1c51cc3afd0cf7bb931e3c5a6f18c41258401652702",".travis.yml":"6ce870c5fe92cc982b64a0301241c2b911f3864a0cca53277ce0697b9a91810b","Cargo.toml":"16170865c061907d466ef8d0d772450a706e81dbdb99a6c5031f1531869b7899","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"b1a639560fd536f2c3ab708a8e1066b675edd4d018dfa4e5e18d0d7327d81c15","appveyor.yml":"46c77d913eaa45871296942c2cd96ef092c9dcaf19201cb5c500a5107faeb06f","src/bin/gcc-shim.rs":"d6be9137cb48b86891e7b263adbf492e1193ffe682db9ba4a88eb1079b874b58","src/com.rs":"0cb06f5db0fb70d27db0e5917ca337de6e7032119e6aabfea1bad9c719f5f34b","src/lib.rs":"d6a022eba51292e379c0ec092dfa2a931e2aa2dd3b5348740c61419a12676dd0","src/registry.rs":"3876ef9573e3bbc050aef41a684b9a510cc1a91b15ae874fe032cf4377b4d116","src/setup_config.rs":"1a3eeb11c6847c31f2a4685b62ab35c76f0b6d5a17f7ed99e9df164283a771f7","src/winapi.rs":"b8240086866595c57bc562bbf423e44fd5a286c9f25bbff8694fe4631e918518","src/windows_registry.rs":"54a4c024213dde0e0df4ed2e9e54fb4f4d72dbdc64a18cdab4c483adaf4aaeea","tests/cc_env.rs":"d92c5e3d3d43ac244e63b2cd2c93a521fcf124bf1ccf8d4c6bfa7f8333d88976","tests/support/mod.rs":"56bcfd1e2ff5ae8e581c71229444a3d96094bf689808808dd80e315bd6632083","tests/test.rs":"c1cbdff76d5912aa47732aed77aa845e989807ca3c84af3423b4c25232c72237"},"package":"120d07f202dcc3f72859422563522b66fe6463a4c513df062874daad05f85f0a"} \ No newline at end of file diff --git a/src/vendor/getopts/.cargo-checksum.json b/src/vendor/getopts/.cargo-checksum.json index 0c13fda1c1..7d497e0852 100644 --- a/src/vendor/getopts/.cargo-checksum.json +++ b/src/vendor/getopts/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"f01015154ac55bebd8ff25742496135c40395959f772005bdf7c63bc9b373c12","Cargo.toml":"a027aa6d21622b42c545707ba04f78341cc28079b46da775827ab1ec37fe3ca7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"4002d78e71c4e1fb82c77590eddb999371f40dce037d895f96e6d6df42c728d3","appveyor.yml":"da991211b72fa6f231af7adb84c9fb72f5a9131d1c0a3d47b8ceffe5a82c8542","src/lib.rs":"9512dd4ec1053c9fc61f630d869053ca50c55e0839e3ab7091246a8654423bf0","tests/smoke.rs":"26a95ac42e42b766ae752fe8531fb740fd147d5cdff352dec0763d175ce91806"},"package":"d9047cfbd08a437050b363d35ef160452c5fe8ea5187ae0a624708c91581d685"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"9bd13f3af0ba30109de76084b2919d8a833ccad8d2af09d74e881ddbef62ab78","Cargo.toml":"ef98664e41cbe8418d3a2f9c1d6e7ddbde679513021e35c8ef23e6244b42c6bf","Cargo.toml.orig":"bf356109ae7c5e10bff75ed3a1f62521758bd6b354312cde45ea5d4445dc505d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"0c9febfa2e3bd4fd773ae635d7d04db6abdd7f6ca381b5b6c2548a898a0426dc","appveyor.yml":"da991211b72fa6f231af7adb84c9fb72f5a9131d1c0a3d47b8ceffe5a82c8542","src/lib.rs":"744ff40ffeaf08fb6fdbb0159ac358d6a82bd9b4d3387f7ce998a5309800d3ce","tests/smoke.rs":"26a95ac42e42b766ae752fe8531fb740fd147d5cdff352dec0763d175ce91806"},"package":"65922871abd2f101a2eb0eaebadc66668e54a87ad9c3dd82520b5f86ede5eff9"} \ No newline at end of file diff --git a/src/vendor/getopts/.travis.yml b/src/vendor/getopts/.travis.yml index d7e3f4787a..9a68a89d9c 100644 --- a/src/vendor/getopts/.travis.yml +++ b/src/vendor/getopts/.travis.yml @@ -1,6 +1,6 @@ language: rust rust: - - 1.0.0 + - stable - beta - nightly sudo: false @@ -14,7 +14,8 @@ after_success: - travis-cargo --only nightly doc-upload env: global: - secure: by+Jo/boBPbcF5c1N6RNCA008oJm2aRFE5T0SUc3OIfTXxY08dZc0WCBJCHrplp44VjpeKRp/89Y+k1CKncIeU8LiS6ZgsKqaQcCglE2O1KS90B6FYB7+rBqT3ib25taq1nW38clnBHYHV9nz4gOElSdKGRxCcBy+efQ5ZXr2tY= + secure: "v7JYTHRJff4zKegaYmbqCZd3/dGAgRwPyLIvM9rRy92rUhv5eCQqsNMR3r9SYR/h7fiSLqLiQi06WmhY5i+pLG9q0Ga2YhqsVBskj5tCGk6NrzmnNKjQTyJ4Go/U8GXZRyknKOuU71e8A0RJVI2FbhQwFvnOp1KArJqgVU5/WL8=" + notifications: email: on_success: never diff --git a/src/vendor/getopts/Cargo.toml b/src/vendor/getopts/Cargo.toml index f84899fe81..28416ba802 100644 --- a/src/vendor/getopts/Cargo.toml +++ b/src/vendor/getopts/Cargo.toml @@ -1,16 +1,25 @@ -[package] +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) +[package] name = "getopts" -version = "0.2.14" +version = "0.2.15" authors = ["The Rust Project Developers"] -license = "MIT/Apache-2.0" +description = "getopts-like option parsing.\n" +homepage = "https://github.com/rust-lang/getopts" +documentation = "https://doc.rust-lang.org/getopts" readme = "README.md" +categories = ["command-line-interface"] +license = "MIT/Apache-2.0" repository = "https://github.com/rust-lang/getopts" -documentation = "http://doc.rust-lang.org/getopts" -homepage = "https://github.com/rust-lang/getopts" -description = """ -getopts-like option parsing. -""" - -[dev-dependencies] -log = "0.3" +[dev-dependencies.log] +version = "0.3" diff --git a/src/vendor/getopts/Cargo.toml.orig b/src/vendor/getopts/Cargo.toml.orig new file mode 100644 index 0000000000..0fe5c37eca --- /dev/null +++ b/src/vendor/getopts/Cargo.toml.orig @@ -0,0 +1,17 @@ +[package] + +name = "getopts" +version = "0.2.15" +authors = ["The Rust Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/rust-lang/getopts" +documentation = "https://doc.rust-lang.org/getopts" +homepage = "https://github.com/rust-lang/getopts" +description = """ +getopts-like option parsing. +""" +categories = ["command-line-interface"] + +[dev-dependencies] +log = "0.3" diff --git a/src/vendor/getopts/README.md b/src/vendor/getopts/README.md index c19f48fb06..32a89d2513 100644 --- a/src/vendor/getopts/README.md +++ b/src/vendor/getopts/README.md @@ -3,9 +3,9 @@ getopts A Rust library for option parsing for CLI utilities. -[![Build Status](https://travis-ci.org/rust-lang/getopts.svg?branch=master)](https://travis-ci.org/rust-lang/getopts) +[![Build Status](https://travis-ci.org/rust-lang-nursery/getopts.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/getopts) -[Documentation](http://doc.rust-lang.org/getopts) +[Documentation](https://doc.rust-lang.org/getopts) ## Usage diff --git a/src/vendor/getopts/src/lib.rs b/src/vendor/getopts/src/lib.rs index 8f0c866fae..e27f8582be 100644 --- a/src/vendor/getopts/src/lib.rs +++ b/src/vendor/getopts/src/lib.rs @@ -92,9 +92,9 @@ //! } //! ``` -#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "http://www.rust-lang.org/favicon.ico", - html_root_url = "http://doc.rust-lang.org/getopts/")] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/getopts/")] #![deny(missing_docs)] #![cfg_attr(test, deny(warnings))] #![cfg_attr(rust_build, feature(staged_api))] @@ -123,7 +123,8 @@ use std::result; /// A description of the options that a program can handle. pub struct Options { grps: Vec, - parsing_style : ParsingStyle + parsing_style : ParsingStyle, + long_only: bool } impl Options { @@ -131,7 +132,8 @@ impl Options { pub fn new() -> Options { Options { grps: Vec::new(), - parsing_style: ParsingStyle::FloatingFrees + parsing_style: ParsingStyle::FloatingFrees, + long_only: false } } @@ -141,11 +143,23 @@ impl Options { self } + /// Set or clear "long options only" mode. + /// + /// In "long options only" mode, short options cannot be clustered + /// together, and long options can be given with either a single + /// "-" or the customary "--". This mode also changes the meaning + /// of "-a=b"; in the ordinary mode this will parse a short option + /// "-a" with argument "=b"; whereas in long-options-only mode the + /// argument will be simply "b". + pub fn long_only(&mut self, long_only: bool) -> &mut Options { + self.long_only = long_only; + self + } + /// Create a generic option group, stating all parameters explicitly. pub fn opt(&mut self, short_name: &str, long_name: &str, desc: &str, hint: &str, hasarg: HasArg, occur: Occur) -> &mut Options { - let len = short_name.len(); - assert!(len == 1 || len == 0); + validate_names(short_name, long_name); self.grps.push(OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), @@ -164,8 +178,7 @@ impl Options { /// * `desc` - Description for usage help pub fn optflag(&mut self, short_name: &str, long_name: &str, desc: &str) -> &mut Options { - let len = short_name.len(); - assert!(len == 1 || len == 0); + validate_names(short_name, long_name); self.grps.push(OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), @@ -185,8 +198,7 @@ impl Options { /// * `desc` - Description for usage help pub fn optflagmulti(&mut self, short_name: &str, long_name: &str, desc: &str) -> &mut Options { - let len = short_name.len(); - assert!(len == 1 || len == 0); + validate_names(short_name, long_name); self.grps.push(OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), @@ -207,8 +219,7 @@ impl Options { /// e.g. `"FILE"` for a `-o FILE` option pub fn optflagopt(&mut self, short_name: &str, long_name: &str, desc: &str, hint: &str) -> &mut Options { - let len = short_name.len(); - assert!(len == 1 || len == 0); + validate_names(short_name, long_name); self.grps.push(OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), @@ -230,8 +241,7 @@ impl Options { /// e.g. `"FILE"` for a `-o FILE` option pub fn optmulti(&mut self, short_name: &str, long_name: &str, desc: &str, hint: &str) -> &mut Options { - let len = short_name.len(); - assert!(len == 1 || len == 0); + validate_names(short_name, long_name); self.grps.push(OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), @@ -252,8 +262,7 @@ impl Options { /// e.g. `"FILE"` for a `-o FILE` option pub fn optopt(&mut self, short_name: &str, long_name: &str, desc: &str, hint: &str) -> &mut Options { - let len = short_name.len(); - assert!(len == 1 || len == 0); + validate_names(short_name, long_name); self.grps.push(OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), @@ -274,8 +283,7 @@ impl Options { /// e.g. `"FILE"` for a `-o FILE` option pub fn reqopt(&mut self, short_name: &str, long_name: &str, desc: &str, hint: &str) -> &mut Options { - let len = short_name.len(); - assert!(len == 1 || len == 0); + validate_names(short_name, long_name); self.grps.push(OptGroup { short_name: short_name.to_string(), long_name: long_name.to_string(), @@ -333,17 +341,24 @@ impl Options { } else { let mut names; let mut i_arg = None; - if cur.as_bytes()[1] == b'-' { - let tail = &cur[2..curlen]; + let mut was_long = true; + if cur.as_bytes()[1] == b'-' || self.long_only { + let tail = if cur.as_bytes()[1] == b'-' { + &cur[2..curlen] + } else { + assert!(self.long_only); + &cur[1..curlen] + }; let tail_eq: Vec<&str> = tail.splitn(2, '=').collect(); if tail_eq.len() <= 1 { - names = vec!(Long(tail.to_string())); + names = vec!(Name::from_str(tail)); } else { names = - vec!(Long(tail_eq[0].to_string())); + vec!(Name::from_str(tail_eq[0])); i_arg = Some(tail_eq[1].to_string()); } } else { + was_long = false; names = Vec::new(); for (j, ch) in cur.char_indices().skip(1) { let opt = Short(ch); @@ -391,11 +406,17 @@ impl Options { vals[optid].push(Given); } Maybe => { + // Note that here we do not handle `--arg value`. + // This matches GNU getopt behavior; but also + // makes sense, because if this were accepted, + // then users could only write a "Maybe" long + // option at the end of the arguments when + // FloatingFrees is in use. if !i_arg.is_none() { vals[optid] .push(Val((i_arg.clone()) .unwrap())); - } else if name_pos < names.len() || i + 1 == l || + } else if was_long || name_pos < names.len() || i + 1 == l || is_arg(&args[i + 1]) { vals[optid].push(Given); } else { @@ -436,18 +457,16 @@ impl Options { } /// Derive a short one-line usage summary from a set of long options. - #[allow(deprecated)] // connect => join in 1.3 pub fn short_usage(&self, program_name: &str) -> String { let mut line = format!("Usage: {} ", program_name); line.push_str(&self.grps.iter() .map(format_option) .collect::>() - .connect(" ")); + .join(" ")); line } /// Derive a usage message from a set of options. - #[allow(deprecated)] // connect => join in 1.3 pub fn usage(&self, brief: &str) -> String { let desc_sep = format!("\n{}", repeat(" ").take(24).collect::()); @@ -490,7 +509,11 @@ impl Options { match long_name.len() { 0 => {} _ => { - row.push_str("--"); + if self.long_only { + row.push('-'); + } else { + row.push_str("--"); + } row.push_str(&long_name); row.push(' '); } @@ -537,16 +560,27 @@ impl Options { // FIXME: #5516 should be graphemes not codepoints // wrapped description - row.push_str(&desc_rows.connect(&desc_sep)); + row.push_str(&desc_rows.join(&desc_sep)); row }); format!("{}\n\nOptions:\n{}\n", brief, - rows.collect::>().connect("\n")) + rows.collect::>().join("\n")) } } +fn validate_names(short_name: &str, long_name: &str) { + let len = short_name.len(); + assert!(len == 1 || len == 0, + "the short_name (first argument) should be a single character, \ + or an empty string for none"); + let len = long_name.len(); + assert!(len == 0 || len > 1, + "the long_name (second argument) should be longer than a single \ + character, or an empty string for none"); +} + /// What parsing style to use when parsing arguments. #[derive(Clone, Copy, PartialEq, Eq)] pub enum ParsingStyle { @@ -669,17 +703,6 @@ impl Error for Fail { } } -/// The type of failure that occurred. -#[derive(Clone, Copy, PartialEq, Eq)] -#[allow(missing_docs)] -pub enum FailType { - ArgumentMissing_, - UnrecognizedOption_, - OptionMissing_, - OptionDuplicated_, - UnexpectedArgument_, -} - /// The result of parsing a command line with a set of options. pub type Result = result::Result; @@ -755,6 +778,10 @@ impl Matches { fn opt_val(&self, nm: &str) -> Option { self.opt_vals(nm).into_iter().next() } + /// Returns true if an option was defined + pub fn opt_defined(&self, nm: &str) -> bool { + find_opt(&self.opts, Name::from_str(nm)).is_some() + } /// Returns true if an option was matched. pub fn opt_present(&self, nm: &str) -> bool { @@ -848,19 +875,19 @@ impl fmt::Display for Fail { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ArgumentMissing(ref nm) => { - write!(f, "Argument to option '{}' missing.", *nm) + write!(f, "Argument to option '{}' missing", *nm) } UnrecognizedOption(ref nm) => { - write!(f, "Unrecognized option: '{}'.", *nm) + write!(f, "Unrecognized option: '{}'", *nm) } OptionMissing(ref nm) => { - write!(f, "Required option '{}' missing.", *nm) + write!(f, "Required option '{}' missing", *nm) } OptionDuplicated(ref nm) => { - write!(f, "Option '{}' given more than once.", *nm) + write!(f, "Option '{}' given more than once", *nm) } UnexpectedArgument(ref nm) => { - write!(f, "Option '{}' does not take an argument.", *nm) + write!(f, "Option '{}' does not take an argument", *nm) } } } @@ -924,16 +951,12 @@ enum LengthLimit { /// Splits a string into substrings with possibly internal whitespace, -/// each of them at most `lim` bytes long. The substrings have leading and trailing -/// whitespace removed, and are only cut at whitespace boundaries. +/// each of them at most `lim` bytes long, if possible. The substrings +/// have leading and trailing whitespace removed, and are only cut at +/// whitespace boundaries. /// /// Note: Function was moved here from `std::str` because this module is the only place that /// uses it, and because it was too specific for a general string function. -/// -/// # Panics -/// -/// Panics during iteration if the string contains a non-whitespace -/// sequence longer than the limit. fn each_split_within<'a, F>(ss: &'a str, lim: usize, mut it: F) -> bool where F: FnMut(&'a str) -> bool { // Just for fun, let's write this as a state machine: @@ -961,9 +984,11 @@ fn each_split_within<'a, F>(ss: &'a str, lim: usize, mut it: F) (A, Cr, _) => { slice_start = i; last_start = i; B } (B, Cr, UnderLim) => { B } - (B, Cr, OverLim) if (i - last_start + 1) > lim - => panic!("word starting with {} longer than limit!", - &ss[last_start..i + 1]), + (B, Cr, OverLim) if (i - last_start + 1) > lim => { + // A single word has gone over the limit. In this + // case we just accept that the word will be too long. + B + } (B, Cr, OverLim) => { *cont = it(&ss[slice_start..last_end]); slice_start = last_start; @@ -1329,7 +1354,7 @@ mod tests { fn test_optflagopt() { let long_args = vec!("--test".to_string()); let mut opts = Options::new(); - opts.optflag("t", "test", "testing"); + opts.optflagopt("t", "test", "testing", "ARG"); match opts.parse(&long_args) { Ok(ref m) => { assert!(m.opt_present("test")); @@ -1345,6 +1370,30 @@ mod tests { } _ => panic!() } + let short_args = vec!("-t".to_string(), "x".to_string()); + match opts.parse(&short_args) { + Ok(ref m) => { + assert_eq!(m.opt_str("t").unwrap(), "x"); + assert_eq!(m.opt_str("test").unwrap(), "x"); + } + _ => panic!() + } + let long_args = vec!("--test=x".to_string()); + match opts.parse(&long_args) { + Ok(ref m) => { + assert_eq!(m.opt_str("t").unwrap(), "x"); + assert_eq!(m.opt_str("test").unwrap(), "x"); + } + _ => panic!() + } + let long_args = vec!("--test".to_string(), "x".to_string()); + match opts.parse(&long_args) { + Ok(ref m) => { + assert_eq!(m.opt_str("t"), None); + assert_eq!(m.opt_str("test"), None); + } + _ => panic!() + } let no_args: Vec = vec!(); match opts.parse(&no_args) { Ok(ref m) => { @@ -1708,6 +1757,8 @@ Options: "This is a long description which won't be wrapped..+.."); // 54 opts.optflag("a", "apple", "This is a long description which _will_ be wrapped..+.."); + opts.optflag("b", "banana", + "HereWeNeedOneSingleWordThatIsLongerThanTheWrappingLengthAndThisIsIt"); let expected = "Usage: fruits @@ -1716,6 +1767,7 @@ Options: -k, --kiwi This is a long description which won't be wrapped..+.. -a, --apple This is a long description which _will_ be wrapped..+.. + -b, --banana HereWeNeedOneSingleWordThatIsLongerThanTheWrappingLengthAndThisIsIt "; let usage = opts.usage("Usage: fruits"); @@ -1812,7 +1864,15 @@ Options: debug!("generated: <<{}>>", generated_usage); assert_eq!(generated_usage, expected); } - + #[test] + fn test_nonexistant_opt() { + let mut opts = Options::new(); + opts.optflag("b", "bar", "Desc"); + let args: Vec = Vec::new(); + let matches = opts.parse(&args).unwrap(); + assert_eq!(matches.opt_defined("foo"), false); + assert_eq!(matches.opt_defined("bar"), true); + } #[test] fn test_args_with_equals() { let mut opts = Options::new(); @@ -1828,4 +1888,98 @@ Options: assert_eq!(matches.opts_str(&["o".to_string()]).unwrap(), "A=B"); assert_eq!(matches.opts_str(&["t".to_string()]).unwrap(), "C=D"); } + + #[test] + fn test_long_only_usage() { + let mut opts = Options::new(); + opts.long_only(true); + opts.optflag("k", "kiwi", "Description"); + opts.optflag("a", "apple", "Description"); + + let expected = +"Usage: fruits + +Options: + -k, -kiwi Description + -a, -apple Description +"; + + let usage = opts.usage("Usage: fruits"); + + debug!("expected: <<{}>>", expected); + debug!("generated: <<{}>>", usage); + assert!(usage == expected) + } + + #[test] + fn test_long_only_mode() { + let mut opts = Options::new(); + opts.long_only(true); + opts.optopt("a", "apple", "Description", "X"); + opts.optopt("b", "banana", "Description", "X"); + opts.optopt("c", "currant", "Description", "X"); + opts.optopt("", "durian", "Description", "X"); + opts.optopt("e", "", "Description", "X"); + opts.optopt("", "fruit", "Description", "X"); + + let args = vec!("-a", "A", "-b=B", "--c=C", "-durian", "D", "--e", "E", + "-fruit=any"); + let matches = &match opts.parse(&args) { + Ok(m) => m, + Err(e) => panic!("{}", e) + }; + assert_eq!(matches.opts_str(&["a".to_string()]).unwrap(), "A"); + assert_eq!(matches.opts_str(&["b".to_string()]).unwrap(), "B"); + assert_eq!(matches.opts_str(&["c".to_string()]).unwrap(), "C"); + assert_eq!(matches.opts_str(&["durian".to_string()]).unwrap(), "D"); + assert_eq!(matches.opts_str(&["e".to_string()]).unwrap(), "E"); + assert_eq!(matches.opts_str(&["fruit".to_string()]).unwrap(), "any"); + } + + #[test] + fn test_long_only_mode_no_short_parse() { + let mut opts = Options::new(); + opts.long_only(true); + opts.optflag("h", "help", "Description"); + opts.optflag("i", "ignore", "Description"); + opts.optflag("", "hi", "Description"); + + let args = vec!("-hi"); + let matches = &match opts.parse(&args) { + Ok(m) => m, + Err(e) => panic!("{}", e) + }; + assert!(matches.opt_present("hi")); + assert!(!matches.opt_present("h")); + assert!(!matches.opt_present("i")); + } + + #[test] + fn test_normal_mode_no_long_parse() { + // Like test_long_only_mode_no_short_parse, but we make sure + // that long_only can be disabled, and the right thing + // happens. + let mut opts = Options::new(); + opts.long_only(true); + opts.optflag("h", "help", "Description"); + opts.optflag("i", "ignore", "Description"); + opts.optflag("", "hi", "Description"); + opts.long_only(false); + + let args = vec!("-hi"); + let matches = &match opts.parse(&args) { + Ok(m) => m, + Err(e) => panic!("{}", e) + }; + assert!(!matches.opt_present("hi")); + assert!(matches.opt_present("h")); + assert!(matches.opt_present("i")); + } + + #[test] + #[should_panic] + fn test_long_name_too_short() { + let mut opts = Options::new(); + opts.optflag("", "a", "Oops, long option too short"); + } } diff --git a/src/vendor/handlebars/.cargo-checksum.json b/src/vendor/handlebars/.cargo-checksum.json index 1caf34d0e2..17fe10c19e 100644 --- a/src/vendor/handlebars/.cargo-checksum.json +++ b/src/vendor/handlebars/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f1c0192dc8fc292fbf8af1d3bad677e08cde66918d0b0bb275444eca0a0d7c66",".travis.yml":"bf45dc352cc63522c3025f054b13d00b531c012cb00fb8248b2604998e8ee938","CHANGELOG.md":"a1dd78c236dfee5f333f25c2e622fe693dff6f36ac416d80cf24f44106c48681","Cargo.toml":"820ee35ce812d397c4d2f077dc783272fd20511dddbe840495d6ffa030674c04","LICENSE":"63b308fad3db82dc12067a8d7ff4b672fae97d12d0b4b3bb59179e27d640b3a4","README.md":"b1cac6d0ca13bc37b63c8c75fc7c6809a7bea101561319859525f00fa83dbc21","benches/bench.rs":"06e9aa880b868737be477fb5cc3ba1294a447131d734e1dde8571501f8bf0230","examples/decorator.rs":"6eafe38b9fa8cbd446325eb2db30e1d68678bb31b1a688deb47ba1da54fbd587","examples/decorator/template.hbs":"57a7a8a4243999de126e835c11ee3fd5ea5dc5b3495df6199bbee0e883afade6","examples/error.rs":"ccf768470b3978d118875c9db27819b9770d60372352d005611faeddf24c96d5","examples/error/error.hbs":"4e8fac806d444a5bc41436fe0a51b4d7850c7645f8996db7a99a821c88622eef","examples/error/template.hbs":"67110cc252b7d74908db40aaf4c61c4e6a628596d25b7069d1f8de8fab578742","examples/partials.rs":"d9b4c861f94c8b0e8491c8ae614029d840efe415a1140414c40a5952ef8afdcc","examples/partials/base0.hbs":"3f9c9e5a27acf06ebbd53af99546165d7824cc26b3627ce4d06185f4152aa27f","examples/partials/base1.hbs":"1f32a70cbe0e992df23fb9072b11340ac38f130ff1495bb52bc75b0f040751de","examples/partials/template2.hbs":"0611352294d490b2827a85a9871aa42547d2f1a022ee67a357224546bff07bdd","examples/partials_legacy/base0.hbs":"22b2ab14ef29a1615fcd39456df5985e4a35cdaaee276a03a1f28b453e37040a","examples/partials_legacy/base1.hbs":"2161f1f3bc3802d7ce83b71a24d008c148dea5874affe5e0d87bbf69fbd45fc3","examples/partials_legacy/template2.hbs":"058b4c0cedc5b55baadd48e3e514362991fe783e56a2e345388187d22760f896","examples/render-cli.rs":"8caba13d4fde3fdbb509202e20685eba8d30c6d240e46ea2ef9a403b47ac620d","examples/render.rs":"7abddd744369c1e1452b3237144caebe0ad770bdffe5ef68f0fee03e9eb4278a","examples/render/template.hbs":"c66f4cf729af7bc9250bb0154423ceb5228e56a384b90dd95fce018699fdfffb","examples/render_cli/simple.hbs":"a2e0df51e7c1a25c4e9e1bbe0f4911c7c86200d358fc6c9195b61e89adddc970","examples/render_file.rs":"64f90b3b5f30d40df6e3f1f4d3788d1eb39854c83cb803247a94e3c4199f8935","examples/render_file/template.hbs":"c66f4cf729af7bc9250bb0154423ceb5228e56a384b90dd95fce018699fdfffb","rustfmt.toml":"8e99dabd6aadb112f9f9ed8709e7388b57bf43d19cd08342c093c870534a3f97","src/context.rs":"8eafcecc689ee10039ef6b572affccf33a0fed91238edfbca351f3dd5be8e927","src/directives/inline.rs":"c5136ce14d763e59fd72e9524af70b06f7a40fe606b0c49c41b2acec4a897114","src/directives/mod.rs":"606c25c3f2b5e7bf0c822e02b3cf503416bbe98f11b175a72ffa436e76251f5f","src/error.rs":"2b59bca2c5e9340224d024d29f79f3d3361a0d88a0a1d2325f7d9afc5704c372","src/grammar.rs":"4c5b6c238de9856c0baea61f4da34e138b7b7c0d9d71c089c6cda8f9d45626e8","src/helpers/helper_each.rs":"52c1777425cdd39479348c3db1c39b641faf4edbdecc4b16fb2feae7dce2b8d2","src/helpers/helper_if.rs":"c345ec23fe2036ee8ca698d831c79f7fc5128af0d2042f54a7d334410298a508","src/helpers/helper_log.rs":"3905aaea34df04a85ee17e9c869db6ed118bf1cd71dcd101a0c49007fc29b544","src/helpers/helper_lookup.rs":"6ce0377cef0040c429d0f6377f5321f1e16250fb8d71eb7d8019174ac791535c","src/helpers/helper_partial.rs":"8fc2f08985a9e3b94733f5b93ef94bccea4d143eb2a7786ce371be30aa91d36f","src/helpers/helper_raw.rs":"6383ff8dc868b044f5cf0ccbfbb8d81cab5feb44c27ae776237fcd350e1d51c8","src/helpers/helper_with.rs":"c4b7f3def10f41193a6b2fc29ca95014062e66183170209a83b6301aa4c12cc8","src/helpers/mod.rs":"5edb1652ea0cb808aca13c25a933865d58022d0884ba2136fb7afe6ad747f1d8","src/lib.rs":"f97104c49e82140e86594d2f2ce6a5a4f30a67c3377ea8087b0ac1ab3031afda","src/partial.rs":"692a61d5d9c8b5b0b04527eb29b586e9a9b6774849aeeb768866b7debc6c6525","src/registry.rs":"5fe4d889fa3079c0e36b40dcb3acac5271399a81f442146f0d59276c047bd4a7","src/render.rs":"b0484d8695ab6826d1d4d325b84d69788b005aa95a9e6a6e510632347b3192ed","src/support.rs":"7e8e1a57745d02bfdf88b1bdec8e243bb37cb17021a75454329b4fb079c28f32","src/template.rs":"5939103b44b2da0d0d21baf0ae3f5cc0b087ca77db7a4d75fb26aa54f4eec3cf"},"package":"fbba80e74e9591a5f6a4ffff6b7f9d645759a896e431cfbdc853e9184370294a"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f1c0192dc8fc292fbf8af1d3bad677e08cde66918d0b0bb275444eca0a0d7c66",".travis.yml":"447bd035663ad4e57c2ba24d869cc8d1019209a9387b603f6b76342c6e5a7188","CHANGELOG.md":"c532ea5ff7d9533e8d16c004a1c2a911bc451cf8a7a4800a666089e081c490ea","Cargo.toml":"bb4f67b357a3da61fbaf9e8bbd8312439b9760da4486ad701e76fa1391c91c60","Cargo.toml.orig":"88fd574ecd3ebcf2e53bbcb3929fd479fe18562056d7d39fdda77f4edcc24803","LICENSE":"63b308fad3db82dc12067a8d7ff4b672fae97d12d0b4b3bb59179e27d640b3a4","README.md":"d1f6b6396bc0f341ff436a89194a0d6349e27a08ecfba46d6b487077c9ec8ca0","benches/bench.rs":"4d7933540520f7ee4453573a8ed497fa9ca69834845133aa4f63ebd8a780c193","examples/decorator.rs":"5cca741e7488842dfcf1d7e1b1d3e9bd5c9fc9a4b650372bd9e47d6ba11a4f0d","examples/decorator/template.hbs":"57a7a8a4243999de126e835c11ee3fd5ea5dc5b3495df6199bbee0e883afade6","examples/error.rs":"0e42911bb9889ce8a9ac592dcef24e5201a6d25e27c92dfc58ba04bbb0310a2a","examples/error/error.hbs":"4e8fac806d444a5bc41436fe0a51b4d7850c7645f8996db7a99a821c88622eef","examples/error/template.hbs":"67110cc252b7d74908db40aaf4c61c4e6a628596d25b7069d1f8de8fab578742","examples/partials.rs":"9b86eaef7a76430d4e2aff01a89ac149a1e9fca284b5df734af7958ba32ae543","examples/partials/base0.hbs":"3f9c9e5a27acf06ebbd53af99546165d7824cc26b3627ce4d06185f4152aa27f","examples/partials/base1.hbs":"1f32a70cbe0e992df23fb9072b11340ac38f130ff1495bb52bc75b0f040751de","examples/partials/template2.hbs":"0611352294d490b2827a85a9871aa42547d2f1a022ee67a357224546bff07bdd","examples/quick.rs":"552e810199545c7e89d4d06ff7d5281db23180bddfcefdd8b303681aa62848b4","examples/render-cli.rs":"9c4880e95ba8a8d5885473e64064bba9f49327fc96fb6adb283e8bb71a384cec","examples/render.rs":"72182427627c15fe3bc9f969db80ceb09de836d1500f37ea39a273171fc51024","examples/render/template.hbs":"c66f4cf729af7bc9250bb0154423ceb5228e56a384b90dd95fce018699fdfffb","examples/render_cli/simple.hbs":"a2e0df51e7c1a25c4e9e1bbe0f4911c7c86200d358fc6c9195b61e89adddc970","examples/render_file.rs":"a322762ea077810b3126d590ebb78d5b5a688bf0970de63af082df626c3217b8","examples/render_file/template.hbs":"c66f4cf729af7bc9250bb0154423ceb5228e56a384b90dd95fce018699fdfffb","rustfmt.toml":"8e99dabd6aadb112f9f9ed8709e7388b57bf43d19cd08342c093c870534a3f97","src/context.rs":"c5c022f81afdfb55a123b7e225a5d9d84f21ea7d1905bd6ff136a67d22b70353","src/directives/inline.rs":"780d5de79393ba6c1169ffce3e59beb9d90ca1dee955f6a29b72428c564f6059","src/directives/mod.rs":"57adc0dc3da578caeacb1cf30430f003f50ecba7cc6c619140f04ef109809168","src/error.rs":"7e52e786a8599040d810ae26e866a0d431a2a45782207097b47ea0be155169aa","src/grammar.rs":"df32af02608e4140a833f2bcd29047d3321c40626bbdbf455a97b487515b991c","src/helpers/helper_each.rs":"1c9a7c57b243eaf2bffd094c9e476690a424e4069d48ad23c01dea0ad704593d","src/helpers/helper_if.rs":"3eacbac3436fd99932400ff3435635e31e38a64b2694488f80544557e2eafa91","src/helpers/helper_log.rs":"42e00a82215553cc0ea0687c2b993cba37da94c992a751c111d3a66f1d2094f1","src/helpers/helper_lookup.rs":"15808852b658c54f064be5256169285ad7b4a9aa451d1a088206efb4d34fc5b3","src/helpers/helper_raw.rs":"c4d3f759e9626d92d7fee9da2d839daacca34b2f4ebfd4e446bbcd6fef317e07","src/helpers/helper_with.rs":"df438515a52fe641fca46ece8e4361ce34a6554bdba936209ee618ac16e063fd","src/helpers/mod.rs":"69236af69895d601bef857b6acdf5bfffbc9dd1a2af801b0cc69b25734e4ab1e","src/lib.rs":"b28c2572d509a2c1f1e6ef80af2139099208e02d194f587f6f6accbb73a10566","src/partial.rs":"52f7f8670f7141382c5d60abd1ebe02237e9809b73212f83154736e7d05d9e9b","src/registry.rs":"bd7bfcc4330fd7c48a8e5a6de2a0c2d58f716dcea521d08b189f761998a0d16c","src/render.rs":"11f68408f5a2a35d73d3d6d25e47eef3d90e1198d80147c6c609e5534a625f8c","src/support.rs":"7e8e1a57745d02bfdf88b1bdec8e243bb37cb17021a75454329b4fb079c28f32","src/template.rs":"d7aee980beddeb0484915e5fe97faba30d9e2b78b8effe11589dd4cfb2d8ead5"},"package":"fb04af2006ea09d985fef82b81e0eb25337e51b691c76403332378a53d521edc"} \ No newline at end of file diff --git a/src/vendor/handlebars/.travis.yml b/src/vendor/handlebars/.travis.yml index 9b7717bade..4ecd9b6b4d 100644 --- a/src/vendor/handlebars/.travis.yml +++ b/src/vendor/handlebars/.travis.yml @@ -3,7 +3,3 @@ rust: - nightly - beta - stable -script: - - | - cargo test && - cargo test --features partial_legacy diff --git a/src/vendor/handlebars/CHANGELOG.md b/src/vendor/handlebars/CHANGELOG.md index eceb8e7eff..28d5667e00 100644 --- a/src/vendor/handlebars/CHANGELOG.md +++ b/src/vendor/handlebars/CHANGELOG.md @@ -1,5 +1,42 @@ # Change Log +## [0.29.1](https://github.com/sunng87/handlebars-rust/compare/0.29.0...0.29.1) - 2017-09-01 + +* [Changed] Remove `debug!` logging from render to avoid conflict when + using handlebars as logging backend + +## [0.29.0](https://github.com/sunng87/handlebars-rust/compare/0.28.3...0.29.0) - 2017-08-23 + +* [Changed] Align JSON path with original JavaScript implementation + +## [0.28.3](https://github.com/sunng87/handlebars-rust/compare/0.28.2...0.28.3) - 2017-08-02 + +* [Changed] fixed support for escape, again + +## [0.28.2](https://github.com/sunng87/handlebars-rust/compare/0.28.1...0.28.2) - 2017-08-01 + +* [Changed] Fixed support for escape `\\{{`. [#170](https://github.com/sunng87/handlebars-rust/issues/170) + +## [0.28.1](https://github.com/sunng87/handlebars-rust/compare/0.28.0...0.28.1) - 2017-07-16 + +* [Changed] Mark `RenderError` with `Send` trait + +## [0.28.0](https://github.com/sunng87/handlebars-rust/compare/0.27.0...0.28.0) - 2017-07-15 + +* [Changed] Fixed performance issue discussed in [#166](https://github.com/sunng87/handlebars-rust/issues/166) +* [Added] Added error cause `RenderError` + +## [0.27.0](https://github.com/sunng87/handlebars-rust/compare/0.26.2...0.27.0) - 2017-06-03 + +* [Changed] `partial_legacy` is dropped +* [Changed] `context.navigate` now returns a `Result<&Json,RenderError>`. Error is raised when + given path cannot be not parsed. +* [Changed] removed `context::extend` because it's like to ruin your context outside the helper. +* [Changed] `RenderContext` now owns `Context`, you can host a new Context for particular block + helper. +* [Changed] Added some convenience functions to `RenderContext`. However, `RenderContext` may + still change in future release. + ## [0.26.1](https://github.com/sunng87/handlebars-rust/compare/0.25.3...0.26.1) - 2017-04-23 * [Changed] Updated to Serde 1.0 diff --git a/src/vendor/handlebars/Cargo.toml b/src/vendor/handlebars/Cargo.toml index c28b6f5337..988de877f8 100644 --- a/src/vendor/handlebars/Cargo.toml +++ b/src/vendor/handlebars/Cargo.toml @@ -1,42 +1,64 @@ -[package] +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) +[package] name = "handlebars" -version = "0.26.2" +version = "0.29.1" authors = ["Ning Sun "] description = "Handlebars templating implemented in Rust." -license = "MIT" -keywords = ["handlebars", "templating", "web"] -categories = ["template-engine"] homepage = "https://github.com/sunng87/handlebars-rust" -repository = "https://github.com/sunng87/handlebars-rust" documentation = "https://docs.rs/handlebars/" readme = "README.md" +keywords = ["handlebars", "templating", "web"] +categories = ["template-engine"] +license = "MIT" +repository = "https://github.com/sunng87/handlebars-rust" +[package.metadata.release] +sign-commit = true +upload-doc = true -[lib] +[[package.metadata.release.pre-release-replacements]] +file = "CHANGELOG.md" +replace = "{{version}}" +search = "Unreleased" +[lib] name = "handlebars" path = "src/lib.rs" +[dependencies.serde] +version = "^1.0.0" -[dependencies] +[dependencies.regex] +version = "^0.2.0" -log = "^0.3.1" -quick-error = "^1.0.0" -pest = "^0.3.0" -serde = "^1.0.0" -serde_json = "^1.0.0" -regex = "^0.2.0" -lazy_static = "^0.2.2" +[dependencies.quick-error] +version = "^1.0.0" -[features] -default = ["partial4"] -partial4 = [] -partial_legacy = [] +[dependencies.lazy_static] +version = "^0.2.2" -[dev-dependencies] -env_logger = "^0.4.0" -maplit = "^0.1.3" -serde_derive = "^1.0.0" +[dependencies.log] +version = "^0.3.1" -[package.metadata.release] -sign-commit = true -upload-doc = true +[dependencies.pest] +version = "^0.3.0" + +[dependencies.serde_json] +version = "^1.0.0" +[dev-dependencies.maplit] +version = "^0.1.3" + +[dev-dependencies.env_logger] +version = "^0.4.0" + +[dev-dependencies.serde_derive] +version = "^1.0.0" diff --git a/src/vendor/handlebars/Cargo.toml.orig b/src/vendor/handlebars/Cargo.toml.orig new file mode 100644 index 0000000000..92e2a18b2c --- /dev/null +++ b/src/vendor/handlebars/Cargo.toml.orig @@ -0,0 +1,38 @@ +[package] + +name = "handlebars" +version = "0.29.1" +authors = ["Ning Sun "] +description = "Handlebars templating implemented in Rust." +license = "MIT" +keywords = ["handlebars", "templating", "web"] +categories = ["template-engine"] +homepage = "https://github.com/sunng87/handlebars-rust" +repository = "https://github.com/sunng87/handlebars-rust" +documentation = "https://docs.rs/handlebars/" +readme = "README.md" + +[lib] + +name = "handlebars" +path = "src/lib.rs" + +[dependencies] + +log = "^0.3.1" +quick-error = "^1.0.0" +pest = "^0.3.0" +serde = "^1.0.0" +serde_json = "^1.0.0" +regex = "^0.2.0" +lazy_static = "^0.2.2" + +[dev-dependencies] +env_logger = "^0.4.0" +maplit = "^0.1.3" +serde_derive = "^1.0.0" + +[package.metadata.release] +sign-commit = true +upload-doc = true +pre-release-replacements = [ {file="CHANGELOG.md", search="Unreleased", replace="{{version}}"} ] diff --git a/src/vendor/handlebars/README.md b/src/vendor/handlebars/README.md index 2266ca37aa..4013091165 100644 --- a/src/vendor/handlebars/README.md +++ b/src/vendor/handlebars/README.md @@ -12,6 +12,35 @@ Rust templating with [Handlebars templating language](https://handlebarsjs.com). ## Getting Started +### Quick Start + +```rust +extern crate handlebars; +#[macro_use] +extern crate serde_json; + +use handlebars::Handlebars; + +fn main() { + let mut reg = Handlebars::new(); + // render without register + println!( + "{}", + reg.template_render("Hello {{name}}", &json!({"name": "foo"})) + .unwrap() + ); + + // register template using given name + reg.register_template_string("tpl_1", "Good afternoon, {{name}}") + .unwrap(); + println!("{}", reg.render("tpl_1", &json!({"name": "foo"})).unwrap()); +} +``` + +Note that I use `unwrap` here which is not recommended in your real code. + +### Code Example + If you are not familiar with [handlebars language syntax](https://handlebarsjs.com), it is recommended to walk through their introduction first. @@ -107,11 +136,6 @@ embed you page into this parent. You can find a real example for template inheritance in `examples/partials.rs`, and templates used by this file. -From 0.23 we support Handlebars 4.0 partial syntax by -default. Original partial syntax via `block`, `partial` helpers are -still supported via feature flag `partial_legacy`. Examples can be -find in `examples/partials.rs`. - #### WebAssembly compatible You can use this handlebars implementation in your rust project that @@ -124,13 +148,9 @@ compiles to WebAssembly. Checkout my fork of javascript version. Specifically, mustache list iteration and null check doesn't work. But you can use `#each` and `#if` for same behavior. -* You will need to make your data `ToJson`-able, so we can render - it. If you were on nightly channel, we have [a syntax - extension](https://github.com/sunng87/tojson_macros) to generate - default `ToJson` implementation for you. If you use - [serde](https://github.com/serde-rs/serde), you can enable - `serde_type` feature of handlebars-rust and add `#derive[Serialize]` - for your types. +* You will need to make your data `Serializable` on serde. We don't + actually serialize data into JSON string or similar. However, we use + JSON data type system in template render process. ### Handlebars-js features supported in Handlebars-rust diff --git a/src/vendor/handlebars/benches/bench.rs b/src/vendor/handlebars/benches/bench.rs index adaf3c79f9..fb4c3b8165 100644 --- a/src/vendor/handlebars/benches/bench.rs +++ b/src/vendor/handlebars/benches/bench.rs @@ -2,6 +2,9 @@ extern crate handlebars; extern crate serde; extern crate serde_json; +#[macro_use] +extern crate serde_derive; + extern crate test; use std::collections::BTreeMap; @@ -32,11 +35,13 @@ fn make_data() -> BTreeMap { let mut teams = Vec::new(); - for v in vec![("Jiangsu", 43u16), - ("Beijing", 27u16), - ("Guangzhou", 22u16), - ("Shandong", 12u16)] - .iter() { + for v in vec![ + ("Jiangsu", 43u16), + ("Beijing", 27u16), + ("Guangzhou", 22u16), + ("Shandong", 12u16), + ].iter() + { let (name, score) = *v; let mut t = BTreeMap::new(); t.insert("name".to_string(), to_json(&name)); @@ -56,8 +61,42 @@ fn parse_template(b: &mut test::Bencher) { #[bench] fn render_template(b: &mut test::Bencher) { let mut handlebars = Handlebars::new(); - handlebars.register_template_string("table", SOURCE).ok().expect("Invalid template format"); + handlebars + .register_template_string("table", SOURCE) + .ok() + .expect("Invalid template format"); let data = make_data(); b.iter(|| handlebars.render("table", &data).ok().unwrap()) } + +#[derive(Serialize)] +struct DataWrapper { + v: String, +} + +#[derive(Serialize)] +struct RowWrapper { + real: Vec, + dummy: Vec, +} + +#[bench] +fn large_loop_helper(b: &mut test::Bencher) { + let mut handlebars = Handlebars::new(); + handlebars + .register_template_string("test", "BEFORE\n{{#each real}}{{this.v}}{{/each}}AFTER") + .ok() + .expect("Invalid template format"); + + let real: Vec = (1..1000) + .into_iter() + .map(|i| DataWrapper { v: format!("n={}", i) }) + .collect(); + let dummy: Vec = (1..1000) + .into_iter() + .map(|i| DataWrapper { v: format!("n={}", i) }) + .collect(); + let rows = RowWrapper { real, dummy }; + b.iter(|| handlebars.render("test", &rows).ok().unwrap()); +} diff --git a/src/vendor/handlebars/examples/decorator.rs b/src/vendor/handlebars/examples/decorator.rs index ac1e289123..a62ca37eed 100644 --- a/src/vendor/handlebars/examples/decorator.rs +++ b/src/vendor/handlebars/examples/decorator.rs @@ -6,46 +6,50 @@ extern crate serde_json; extern crate serde_derive; use serde_json::value::{Value as Json, Map}; -use handlebars::{Handlebars, RenderError, RenderContext, Helper, JsonRender, Decorator, - to_json}; +use handlebars::{Handlebars, RenderError, RenderContext, Helper, JsonRender, Decorator, to_json}; // default format helper fn format_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { // get parameter from helper or throw an error - let param = try!(h.param(0).ok_or(RenderError::new("Param 0 is required for format helper."))); + let param = try!(h.param(0).ok_or(RenderError::new( + "Param 0 is required for format helper.", + ))); let rendered = format!("{} pts", param.value().render()); try!(rc.writer.write(rendered.into_bytes().as_ref())); Ok(()) } -fn format_decorator(d: &Decorator, - _: &Handlebars, - rc: &mut RenderContext) - -> Result<(), RenderError> { - let suffix = d.param(0).map(|v| v.value().render()).unwrap_or("".to_owned()); - rc.register_local_helper("format", - Box::new(move |h: &Helper, - _: &Handlebars, - rc: &mut RenderContext| { - // get parameter from helper or throw an error - let param = - try!(h.param(0).ok_or(RenderError::new("Param 0 is required for format helper."))); - let rendered = format!("{} {}", param.value().render(), suffix); - try!(rc.writer.write(rendered.into_bytes().as_ref())); - Ok(()) - })); +fn format_decorator( + d: &Decorator, + _: &Handlebars, + rc: &mut RenderContext, +) -> Result<(), RenderError> { + let suffix = d.param(0).map(|v| v.value().render()).unwrap_or( + "".to_owned(), + ); + rc.register_local_helper( + "format", + Box::new(move |h: &Helper, _: &Handlebars, rc: &mut RenderContext| { + // get parameter from helper or throw an error + let param = try!(h.param(0).ok_or(RenderError::new( + "Param 0 is required for format helper.", + ))); + let rendered = format!("{} {}", param.value().render(), suffix); + try!(rc.writer.write(rendered.into_bytes().as_ref())); + Ok(()) + }), + ); Ok(()) } // another custom helper fn rank_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { - let rank = try!(h.param(0) - .and_then(|v| v.value().as_u64()) - .ok_or(RenderError::new("Param 0 with u64 type is required for rank helper."))) as usize; - let teams = - try!(h.param(1) - .and_then(|v| v.value().as_array()) - .ok_or(RenderError::new("Param 1 with array type is required for rank helper"))); + let rank = try!(h.param(0).and_then(|v| v.value().as_u64()).ok_or( + RenderError::new("Param 0 with u64 type is required for rank helper."), + )) as usize; + let teams = try!(h.param(1).and_then(|v| v.value().as_array()).ok_or( + RenderError::new("Param 1 with array type is required for rank helper"), + )); let total = teams.len(); if rank == 0 { try!(rc.writer.write("champion".as_bytes())); @@ -72,38 +76,40 @@ pub fn make_data() -> Map { data.insert("year".to_string(), to_json(&"2015".to_owned())); - let teams = vec![Team { - name: "Jiangsu Suning".to_string(), - pts: 43u16, - }, - Team { - name: "Shanghai SIPG".to_string(), - pts: 39u16, - }, - Team { - name: "Hebei CFFC".to_string(), - pts: 27u16, - }, - Team { - name: "Guangzhou Evergrand".to_string(), - pts: 22u16, - }, - Team { - name: "Shandong Luneng".to_string(), - pts: 12u16, - }, - Team { - name: "Beijing Guoan".to_string(), - pts: 7u16, - }, - Team { - name: "Hangzhou Greentown".to_string(), - pts: 7u16, - }, - Team { - name: "Shanghai Shenhua".to_string(), - pts: 4u16, - }]; + let teams = vec![ + Team { + name: "Jiangsu Suning".to_string(), + pts: 43u16, + }, + Team { + name: "Shanghai SIPG".to_string(), + pts: 39u16, + }, + Team { + name: "Hebei CFFC".to_string(), + pts: 27u16, + }, + Team { + name: "Guangzhou Evergrand".to_string(), + pts: 22u16, + }, + Team { + name: "Shandong Luneng".to_string(), + pts: 12u16, + }, + Team { + name: "Beijing Guoan".to_string(), + pts: 7u16, + }, + Team { + name: "Hangzhou Greentown".to_string(), + pts: 7u16, + }, + Team { + name: "Shanghai Shenhua".to_string(), + pts: 4u16, + }, + ]; data.insert("teams".to_string(), to_json(&teams)); data.insert("engine".to_string(), to_json(&TYPES.to_owned())); @@ -117,8 +123,11 @@ fn main() { // register template from a file and assign a name to it // deal with errors - if let Err(e) = - handlebars.register_template_file("table", "./examples/decorator/template.hbs") { + if let Err(e) = handlebars.register_template_file( + "table", + "./examples/decorator/template.hbs", + ) + { panic!("{}", e); } @@ -129,6 +138,10 @@ fn main() { // make data and render it let data = make_data(); - println!("{}", - handlebars.render("table", &data).unwrap_or_else(|e| format!("{}", e))); + println!( + "{}", + handlebars.render("table", &data).unwrap_or_else( + |e| format!("{}", e), + ) + ); } diff --git a/src/vendor/handlebars/examples/error.rs b/src/vendor/handlebars/examples/error.rs index f5e452a469..1e84054e8f 100644 --- a/src/vendor/handlebars/examples/error.rs +++ b/src/vendor/handlebars/examples/error.rs @@ -10,20 +10,21 @@ use serde_json::value::{Value as Json, Map}; use handlebars::{Handlebars, RenderError, RenderContext, Helper, JsonRender, to_json}; fn format_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { - let param = try!(h.param(0).ok_or(RenderError::new("Param 0 is required for format helper."))); + let param = try!(h.param(0).ok_or(RenderError::new( + "Param 0 is required for format helper.", + ))); let rendered = format!("{} pts", param.value().render()); try!(rc.writer.write(rendered.into_bytes().as_ref())); Ok(()) } fn rank_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { - let rank = try!(h.param(0) - .and_then(|v| v.value().as_u64()) - .ok_or(RenderError::new("Param 0 with u64 type is required for rank helper."))) as usize; - let teams = - try!(h.param(1) - .and_then(|v| v.value().as_array()) - .ok_or(RenderError::new("Param 1 with array type is required for rank helper"))); + let rank = try!(h.param(0).and_then(|v| v.value().as_u64()).ok_or( + RenderError::new("Param 0 with u64 type is required for rank helper."), + )) as usize; + let teams = try!(h.param(1).and_then(|v| v.value().as_array()).ok_or( + RenderError::new("Param 1 with array type is required for rank helper"), + )); let total = teams.len(); if rank == 0 { try!(rc.writer.write("champion".as_bytes())); @@ -50,38 +51,40 @@ pub fn make_data() -> Map { data.insert("year".to_string(), to_json(&"2015".to_owned())); - let teams = vec![Team { - name: "Jiangsu Suning".to_string(), - pts: 43u16, - }, - Team { - name: "Shanghai SIPG".to_string(), - pts: 39u16, - }, - Team { - name: "Hebei CFFC".to_string(), - pts: 27u16, - }, - Team { - name: "Guangzhou Evergrand".to_string(), - pts: 22u16, - }, - Team { - name: "Shandong Luneng".to_string(), - pts: 12u16, - }, - Team { - name: "Beijing Guoan".to_string(), - pts: 7u16, - }, - Team { - name: "Hangzhou Greentown".to_string(), - pts: 7u16, - }, - Team { - name: "Shanghai Shenhua".to_string(), - pts: 4u16, - }]; + let teams = vec![ + Team { + name: "Jiangsu Suning".to_string(), + pts: 43u16, + }, + Team { + name: "Shanghai SIPG".to_string(), + pts: 39u16, + }, + Team { + name: "Hebei CFFC".to_string(), + pts: 27u16, + }, + Team { + name: "Guangzhou Evergrand".to_string(), + pts: 22u16, + }, + Team { + name: "Shandong Luneng".to_string(), + pts: 12u16, + }, + Team { + name: "Beijing Guoan".to_string(), + pts: 7u16, + }, + Team { + name: "Hangzhou Greentown".to_string(), + pts: 7u16, + }, + Team { + name: "Shanghai Shenhua".to_string(), + pts: 4u16, + }, + ]; data.insert("teams".to_string(), to_json(&teams)); data.insert("engine".to_string(), to_json(&TYPES.to_owned())); @@ -102,13 +105,20 @@ fn main() { println!("{}", e); } - handlebars.register_template_file("table", "./examples/error/template.hbs").ok().unwrap(); + handlebars + .register_template_file("table", "./examples/error/template.hbs") + .ok() + .unwrap(); handlebars.register_helper("format", Box::new(format_helper)); handlebars.register_helper("ranking_label", Box::new(rank_helper)); // handlebars.register_helper("format", Box::new(FORMAT_HELPER)); let data = make_data(); - println!("{}", - handlebars.render("table", &data).unwrap_or_else(|e| format!("{}", e))); + println!( + "{}", + handlebars.render("table", &data).unwrap_or_else( + |e| format!("{}", e), + ) + ); } diff --git a/src/vendor/handlebars/examples/partials.rs b/src/vendor/handlebars/examples/partials.rs index 950fd677e9..ea178e2b69 100644 --- a/src/vendor/handlebars/examples/partials.rs +++ b/src/vendor/handlebars/examples/partials.rs @@ -6,72 +6,49 @@ extern crate maplit; use std::path::Path; use handlebars::Handlebars; -#[cfg(feature = "partial_legacy")] fn main() { env_logger::init().unwrap(); let mut handlebars = Handlebars::new(); - handlebars.register_template_file("template", - &Path::new("./examples/partials_legacy/template2.hbs")) - .ok() - .unwrap(); - handlebars.register_template_file("base0", &Path::new("./examples/partials_legacy/base0.hbs")) - .ok() - .unwrap(); - handlebars.register_template_file("base1", &Path::new("./examples/partials_legacy/base1.hbs")) - .ok() - .unwrap(); - - let data0 = btreemap! { - "title".to_string() => "example 0".to_string(), - "parent".to_string() => "base0".to_string() - }; - let data1 = btreemap! { - "title".to_string() => "example 1".to_string(), - "parent".to_string() => "base1".to_string() - }; - - println!("Page 0"); - println!("{}", - handlebars.render("template", &data0).unwrap_or_else(|e| format!("{}", e))); - println!("======================================================="); - - println!("Page 1"); - println!("{}", - handlebars.render("template", &data1).unwrap_or_else(|e| format!("{}", e))); -} - -#[cfg(not(feature = "partial_legacy"))] -fn main() { - env_logger::init().unwrap(); - let mut handlebars = Handlebars::new(); - - handlebars.register_template_file("template", &Path::new("./examples/partials/template2.hbs")) + handlebars + .register_template_file("template", &Path::new("./examples/partials/template2.hbs")) .ok() .unwrap(); - handlebars.register_template_file("base0", &Path::new("./examples/partials/base0.hbs")) + handlebars + .register_template_file("base0", &Path::new("./examples/partials/base0.hbs")) .ok() .unwrap(); - handlebars.register_template_file("base1", &Path::new("./examples/partials/base1.hbs")) + handlebars + .register_template_file("base1", &Path::new("./examples/partials/base1.hbs")) .ok() .unwrap(); - let data0 = btreemap! { + let data0 = + btreemap! { "title".to_string() => "example 0".to_string(), "parent".to_string() => "base0".to_string() }; - let data1 = btreemap! { + let data1 = + btreemap! { "title".to_string() => "example 1".to_string(), "parent".to_string() => "base1".to_string() }; println!("Page 0"); - println!("{}", - handlebars.render("template", &data0).unwrap_or_else(|e| format!("{}", e))); + println!( + "{}", + handlebars.render("template", &data0).unwrap_or_else( + |e| format!("{}", e), + ) + ); println!("======================================================="); println!("Page 1"); - println!("{}", - handlebars.render("template", &data1).unwrap_or_else(|e| format!("{}", e))); + println!( + "{}", + handlebars.render("template", &data1).unwrap_or_else( + |e| format!("{}", e), + ) + ); } diff --git a/src/vendor/handlebars/examples/partials_legacy/base0.hbs b/src/vendor/handlebars/examples/partials_legacy/base0.hbs deleted file mode 100644 index 0b0126dd0b..0000000000 --- a/src/vendor/handlebars/examples/partials_legacy/base0.hbs +++ /dev/null @@ -1,7 +0,0 @@ - - {{title}} - -

Derived from base0.hbs

- {{~#block page}}{{/block~}} - - diff --git a/src/vendor/handlebars/examples/partials_legacy/base1.hbs b/src/vendor/handlebars/examples/partials_legacy/base1.hbs deleted file mode 100644 index b15af963ec..0000000000 --- a/src/vendor/handlebars/examples/partials_legacy/base1.hbs +++ /dev/null @@ -1,7 +0,0 @@ - - {{title}} - -

Derived from base1.hbs

- {{~#block page}}{{/block~}} - - diff --git a/src/vendor/handlebars/examples/partials_legacy/template2.hbs b/src/vendor/handlebars/examples/partials_legacy/template2.hbs deleted file mode 100644 index 283b57f278..0000000000 --- a/src/vendor/handlebars/examples/partials_legacy/template2.hbs +++ /dev/null @@ -1,5 +0,0 @@ -{{#partial page}} -

Rendered in partial, parent is {{parent}}

-{{/partial}} -{{! remove whitespaces with ~ }} -{{~> (parent)~}} diff --git a/src/vendor/handlebars/examples/quick.rs b/src/vendor/handlebars/examples/quick.rs new file mode 100644 index 0000000000..1ae5292db9 --- /dev/null +++ b/src/vendor/handlebars/examples/quick.rs @@ -0,0 +1,20 @@ +extern crate handlebars; +#[macro_use] +extern crate serde_json; + +use handlebars::Handlebars; + +fn main() { + let mut reg = Handlebars::new(); + // render without register + println!( + "{}", + reg.template_render("Hello {{name}}", &json!({"name": "foo"})) + .unwrap() + ); + + // register template using given name + reg.register_template_string("tpl_1", "Good afternoon, {{name}}") + .unwrap(); + println!("{}", reg.render("tpl_1", &json!({"name": "foo"})).unwrap()); +} diff --git a/src/vendor/handlebars/examples/render-cli.rs b/src/vendor/handlebars/examples/render-cli.rs index f4b08dfca2..de2467ce0c 100644 --- a/src/vendor/handlebars/examples/render-cli.rs +++ b/src/vendor/handlebars/examples/render-cli.rs @@ -13,10 +13,11 @@ use handlebars::Handlebars; fn usage() -> ! { - writeln!(&mut io::stderr(), - "{}", - r#"Usage: ./render-cli template.hbs '{"json": "data"}'"#) - .ok(); + writeln!( + &mut io::stderr(), + "{}", + r#"Usage: ./render-cli template.hbs '{"json": "data"}'"# + ).ok(); process::exit(1); } @@ -40,7 +41,10 @@ fn main() { let mut handlebars = Handlebars::new(); - handlebars.register_template_file(&filename, &filename).ok().unwrap(); + handlebars + .register_template_file(&filename, &filename) + .ok() + .unwrap(); match handlebars.render(&filename, &data) { Ok(data) => { println!("{}", data); diff --git a/src/vendor/handlebars/examples/render.rs b/src/vendor/handlebars/examples/render.rs index 8637b013ec..3a867f116c 100644 --- a/src/vendor/handlebars/examples/render.rs +++ b/src/vendor/handlebars/examples/render.rs @@ -13,7 +13,9 @@ use handlebars::{Handlebars, RenderError, RenderContext, Helper, JsonRender, to_ // define a custom helper fn format_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { // get parameter from helper or throw an error - let param = try!(h.param(0).ok_or(RenderError::new("Param 0 is required for format helper."))); + let param = try!(h.param(0).ok_or(RenderError::new( + "Param 0 is required for format helper.", + ))); let rendered = format!("{} pts", param.value().render()); try!(rc.writer.write(rendered.into_bytes().as_ref())); Ok(()) @@ -21,13 +23,12 @@ fn format_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<( // another custom helper fn rank_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { - let rank = try!(h.param(0) - .and_then(|v| v.value().as_u64()) - .ok_or(RenderError::new("Param 0 with u64 type is required for rank helper."))) as usize; - let teams = - try!(h.param(1) - .and_then(|v| v.value().as_array()) - .ok_or(RenderError::new("Param 1 with array type is required for rank helper"))); + let rank = try!(h.param(0).and_then(|v| v.value().as_u64()).ok_or( + RenderError::new("Param 0 with u64 type is required for rank helper."), + )) as usize; + let teams = try!(h.param(1).and_then(|v| v.value().as_array()).ok_or( + RenderError::new("Param 1 with array type is required for rank helper"), + )); let total = teams.len(); if rank == 0 { try!(rc.writer.write("champion".as_bytes())); @@ -54,38 +55,40 @@ pub fn make_data() -> Map { data.insert("year".to_string(), to_json(&"2015".to_owned())); - let teams = vec![Team { - name: "Jiangsu Suning".to_string(), - pts: 43u16, - }, - Team { - name: "Shanghai SIPG".to_string(), - pts: 39u16, - }, - Team { - name: "Hebei CFFC".to_string(), - pts: 27u16, - }, - Team { - name: "Guangzhou Evergrand".to_string(), - pts: 22u16, - }, - Team { - name: "Shandong Luneng".to_string(), - pts: 12u16, - }, - Team { - name: "Beijing Guoan".to_string(), - pts: 7u16, - }, - Team { - name: "Hangzhou Greentown".to_string(), - pts: 7u16, - }, - Team { - name: "Shanghai Shenhua".to_string(), - pts: 4u16, - }]; + let teams = vec![ + Team { + name: "Jiangsu Suning".to_string(), + pts: 43u16, + }, + Team { + name: "Shanghai SIPG".to_string(), + pts: 39u16, + }, + Team { + name: "Hebei CFFC".to_string(), + pts: 27u16, + }, + Team { + name: "Guangzhou Evergrand".to_string(), + pts: 22u16, + }, + Team { + name: "Shandong Luneng".to_string(), + pts: 12u16, + }, + Team { + name: "Beijing Guoan".to_string(), + pts: 7u16, + }, + Team { + name: "Hangzhou Greentown".to_string(), + pts: 7u16, + }, + Team { + name: "Shanghai Shenhua".to_string(), + pts: 4u16, + }, + ]; data.insert("teams".to_string(), to_json(&teams)); data.insert("engine".to_string(), to_json(&TYPES.to_owned())); @@ -109,6 +112,10 @@ fn main() { // make data and render it let data = make_data(); - println!("{}", - handlebars.render("table", &data).unwrap_or_else(|e| format!("{}", e))); + println!( + "{}", + handlebars.render("table", &data).unwrap_or_else( + |e| format!("{}", e), + ) + ); } diff --git a/src/vendor/handlebars/examples/render_file.rs b/src/vendor/handlebars/examples/render_file.rs index 81156130ae..d271bef61a 100644 --- a/src/vendor/handlebars/examples/render_file.rs +++ b/src/vendor/handlebars/examples/render_file.rs @@ -15,7 +15,9 @@ use handlebars::{Handlebars, RenderError, RenderContext, Helper, Context, JsonRe // define a custom helper fn format_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { - let param = try!(h.param(0).ok_or(RenderError::new("Param 0 is required for format helper."))); + let param = try!(h.param(0).ok_or(RenderError::new( + "Param 0 is required for format helper.", + ))); let rendered = format!("{} pts", param.value().render()); try!(rc.writer.write(rendered.into_bytes().as_ref())); Ok(()) @@ -23,13 +25,12 @@ fn format_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<( // another custom helper fn rank_helper(h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { - let rank = try!(h.param(0) - .and_then(|v| v.value().as_u64()) - .ok_or(RenderError::new("Param 0 with u64 type is required for rank helper."))) as usize; - let teams = - try!(h.param(1) - .and_then(|v| v.value().as_array()) - .ok_or(RenderError::new("Param 1 with array type is required for rank helper"))); + let rank = try!(h.param(0).and_then(|v| v.value().as_u64()).ok_or( + RenderError::new("Param 0 with u64 type is required for rank helper."), + )) as usize; + let teams = try!(h.param(1).and_then(|v| v.value().as_array()).ok_or( + RenderError::new("Param 1 with array type is required for rank helper"), + )); let total = teams.len(); if rank == 0 { try!(rc.writer.write("champion".as_bytes())); @@ -56,38 +57,40 @@ pub fn make_data() -> Map { data.insert("year".to_string(), to_json(&"2015".to_owned())); - let teams = vec![Team { - name: "Jiangsu Suning".to_string(), - pts: 43u16, - }, - Team { - name: "Shanghai SIPG".to_string(), - pts: 39u16, - }, - Team { - name: "Hebei CFFC".to_string(), - pts: 27u16, - }, - Team { - name: "Guangzhou Evergrand".to_string(), - pts: 22u16, - }, - Team { - name: "Shandong Luneng".to_string(), - pts: 12u16, - }, - Team { - name: "Beijing Guoan".to_string(), - pts: 7u16, - }, - Team { - name: "Hangzhou Greentown".to_string(), - pts: 7u16, - }, - Team { - name: "Shanghai Shenhua".to_string(), - pts: 4u16, - }]; + let teams = vec![ + Team { + name: "Jiangsu Suning".to_string(), + pts: 43u16, + }, + Team { + name: "Shanghai SIPG".to_string(), + pts: 39u16, + }, + Team { + name: "Hebei CFFC".to_string(), + pts: 27u16, + }, + Team { + name: "Guangzhou Evergrand".to_string(), + pts: 22u16, + }, + Team { + name: "Shandong Luneng".to_string(), + pts: 12u16, + }, + Team { + name: "Beijing Guoan".to_string(), + pts: 7u16, + }, + Team { + name: "Hangzhou Greentown".to_string(), + pts: 7u16, + }, + Team { + name: "Shanghai Shenhua".to_string(), + pts: 4u16, + }, + ]; data.insert("teams".to_string(), to_json(&teams)); data.insert("engine".to_string(), to_json(&TYPES.to_owned())); diff --git a/src/vendor/handlebars/src/context.rs b/src/vendor/handlebars/src/context.rs index 2c739d46c9..72df51c6ad 100644 --- a/src/vendor/handlebars/src/context.rs +++ b/src/vendor/handlebars/src/context.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use serde::Serialize; use serde_json::value::{Value as Json, Map, to_value}; @@ -5,6 +7,7 @@ use pest::prelude::*; use std::collections::{VecDeque, BTreeMap}; use grammar::{Rdp, Rule}; +use error::RenderError; static DEFAULT_VALUE: Json = Json::Null; @@ -14,11 +17,14 @@ pub type Object = BTreeMap; /// #[derive(Debug, Clone)] pub struct Context { - data: Json, + data: Arc, } #[inline] -fn parse_json_visitor_inner<'a>(path_stack: &mut VecDeque<&'a str>, path: &'a str) { +fn parse_json_visitor_inner<'a>( + path_stack: &mut VecDeque<&'a str>, + path: &'a str, +) -> Result<(), RenderError> { let path_in = StringInput::new(path); let mut parser = Rdp::new(path_in); @@ -26,7 +32,6 @@ fn parse_json_visitor_inner<'a>(path_stack: &mut VecDeque<&'a str>, path: &'a st if parser.path() { for seg in parser.queue().iter() { match seg.rule { - Rule::path_var | Rule::path_idx | Rule::path_key => {} Rule::path_up => { path_stack.pop_back(); if let Some(p) = seg_stack.pop_back() { @@ -37,8 +42,7 @@ fn parse_json_visitor_inner<'a>(path_stack: &mut VecDeque<&'a str>, path: &'a st } } Rule::path_id | - Rule::path_raw_id | - Rule::path_num_id => { + Rule::path_raw_id => { seg_stack.push_back(seg); } _ => {} @@ -49,14 +53,19 @@ fn parse_json_visitor_inner<'a>(path_stack: &mut VecDeque<&'a str>, path: &'a st let id = &path[i.start..i.end]; path_stack.push_back(id); } + Ok(()) + } else { + Err(RenderError::new("Invalid JSON path")) } } #[inline] -fn parse_json_visitor<'a>(path_stack: &mut VecDeque<&'a str>, - base_path: &'a str, - path_context: &'a VecDeque, - relative_path: &'a str) { +fn parse_json_visitor<'a>( + path_stack: &mut VecDeque<&'a str>, + base_path: &'a str, + path_context: &'a VecDeque, + relative_path: &'a str, +) -> Result<(), RenderError> { let path_in = StringInput::new(relative_path); let mut parser = Rdp::new(path_in); @@ -78,27 +87,26 @@ fn parse_json_visitor<'a>(path_stack: &mut VecDeque<&'a str>, if path_context_depth >= 0 { if let Some(context_base_path) = path_context.get(path_context_depth as usize) { - parse_json_visitor_inner(path_stack, context_base_path); + parse_json_visitor_inner(path_stack, context_base_path)?; } else { - parse_json_visitor_inner(path_stack, base_path); + parse_json_visitor_inner(path_stack, base_path)?; } } else { - parse_json_visitor_inner(path_stack, base_path); + parse_json_visitor_inner(path_stack, base_path)?; } - parse_json_visitor_inner(path_stack, relative_path); + parse_json_visitor_inner(path_stack, relative_path)?; + Ok(()) + } else { + Err(RenderError::new("Invalid JSON path.")) } - // TODO: report invalid path + } -fn merge_json(base: &Json, addition: &Object) -> Json { +pub fn merge_json(base: &Json, addition: &Object) -> Json { let mut base_map = match base { &Json::Object(ref m) => m.clone(), - _ => { - let mut map = Map::new(); - map.insert("this".to_owned(), base.clone()); - map - } + _ => Map::new(), }; for (k, v) in addition.iter() { @@ -111,21 +119,14 @@ fn merge_json(base: &Json, addition: &Object) -> Json { impl Context { /// Create a context with null data pub fn null() -> Context { - Context { data: Json::Null } + Context { data: Arc::new(Json::Null) } } /// Create a context with given data - pub fn wraps(e: &T) -> Context { - Context { data: to_json(e) } - } - - /// Extend current context with another JSON object - /// If current context is a JSON object, it's identical to a normal merge - /// Otherwise, the current value will be stored in new JSON object with key `this`, and merged - /// keys are also available. - pub fn extend(&self, hash: &Object) -> Context { - let new_data = merge_json(&self.data, hash); - Context { data: new_data } + pub fn wraps(e: &T) -> Result { + to_value(e).map_err(RenderError::from).map(|d| { + Context { data: Arc::new(d) } + }) } /// Navigate the context with base path and relative path @@ -133,18 +134,19 @@ impl Context { /// and set relative path to helper argument or so. /// /// If you want to navigate from top level, set the base path to `"."` - pub fn navigate(&self, - base_path: &str, - path_context: &VecDeque, - relative_path: &str) - -> &Json { + pub fn navigate( + &self, + base_path: &str, + path_context: &VecDeque, + relative_path: &str, + ) -> Result<&Json, RenderError> { let mut path_stack: VecDeque<&str> = VecDeque::new(); - parse_json_visitor(&mut path_stack, base_path, path_context, relative_path); + parse_json_visitor(&mut path_stack, base_path, path_context, relative_path)?; let paths: Vec<&str> = path_stack.iter().map(|x| *x).collect(); - let mut data: &Json = &self.data; + let mut data: &Json = self.data.as_ref(); for p in paths.iter() { - if *p == "this" && data.as_object().and_then(|m| m.get("this")).is_none() { + if *p == "this" { continue; } data = match *data { @@ -157,15 +159,15 @@ impl Context { _ => &DEFAULT_VALUE, } } - data + Ok(data) } pub fn data(&self) -> &Json { - &self.data + self.data.as_ref() } pub fn data_mut(&mut self) -> &mut Json { - &mut self.data + Arc::make_mut(&mut self.data) } } @@ -201,9 +203,10 @@ impl JsonRender for Json { } pub fn to_json(src: &T) -> Json - where T: Serialize +where + T: Serialize, { - to_value(src).unwrap_or(Json::Null) + to_value(src).unwrap_or_default() } pub fn as_string(src: &Json) -> Option<&str> { @@ -226,7 +229,7 @@ impl JsonTruthy for Json { #[cfg(test)] mod test { use context::{self, JsonRender, Context}; - use std::collections::{VecDeque, BTreeMap}; + use std::collections::VecDeque; use serde_json::value::{Value as Json, Map}; #[test] @@ -254,9 +257,13 @@ mod test { #[test] fn test_render() { let v = "hello"; - let ctx = Context::wraps(&v.to_string()); - assert_eq!(ctx.navigate(".", &VecDeque::new(), "this").render(), - v.to_string()); + let ctx = Context::wraps(&v.to_string()).unwrap(); + assert_eq!( + ctx.navigate(".", &VecDeque::new(), "this") + .unwrap() + .render(), + v.to_string() + ); } #[test] @@ -273,30 +280,48 @@ mod test { titles: vec!["programmer".to_string(), "cartographier".to_string()], }; - let ctx = Context::wraps(&person); - assert_eq!(ctx.navigate(".", &VecDeque::new(), "./name/../addr/country").render(), - "China".to_string()); - assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[country]").render(), - "China".to_string()); - assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[\"country\"]").render(), - "China".to_string()); - assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.['country']").render(), - "China".to_string()); + let ctx = Context::wraps(&person).unwrap(); + assert_eq!( + ctx.navigate(".", &VecDeque::new(), "./name/../addr/country") + .unwrap() + .render(), + "China".to_string() + ); + assert_eq!( + ctx.navigate(".", &VecDeque::new(), "addr.[country]") + .unwrap() + .render(), + "China".to_string() + ); let v = true; - let ctx2 = Context::wraps(&v); - assert_eq!(ctx2.navigate(".", &VecDeque::new(), "this").render(), - "true".to_string()); - - assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]").render(), - "programmer".to_string()); - assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles.[0]").render(), - "programmer".to_string()); - - assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]/../../age").render(), - "27".to_string()); - assert_eq!(ctx.navigate(".", &VecDeque::new(), "this.titles[0]/../../age").render(), - "27".to_string()); + let ctx2 = Context::wraps(&v).unwrap(); + assert_eq!( + ctx2.navigate(".", &VecDeque::new(), "this") + .unwrap() + .render(), + "true".to_string() + ); + + assert_eq!( + ctx.navigate(".", &VecDeque::new(), "titles.[0]") + .unwrap() + .render(), + "programmer".to_string() + ); + + assert_eq!( + ctx.navigate(".", &VecDeque::new(), "titles.[0]/../../age") + .unwrap() + .render(), + "27".to_string() + ); + assert_eq!( + ctx.navigate(".", &VecDeque::new(), "this.titles.[0]/../../age") + .unwrap() + .render(), + "27".to_string() + ); } @@ -305,50 +330,90 @@ mod test { let mut map_with_this = Map::new(); map_with_this.insert("this".to_string(), context::to_json(&"hello")); map_with_this.insert("age".to_string(), context::to_json(&5usize)); - let ctx1 = Context::wraps(&map_with_this); + let ctx1 = Context::wraps(&map_with_this).unwrap(); let mut map_without_this = Map::new(); map_without_this.insert("age".to_string(), context::to_json(&4usize)); - let ctx2 = Context::wraps(&map_without_this); - - assert_eq!(ctx1.navigate(".", &VecDeque::new(), "this").render(), - "hello".to_owned()); - assert_eq!(ctx2.navigate(".", &VecDeque::new(), "age").render(), - "4".to_owned()); + let ctx2 = Context::wraps(&map_without_this).unwrap(); + + assert_eq!( + ctx1.navigate(".", &VecDeque::new(), "this") + .unwrap() + .render(), + "[object]".to_owned() + ); + assert_eq!( + ctx2.navigate(".", &VecDeque::new(), "age") + .unwrap() + .render(), + "4".to_owned() + ); } #[test] - fn test_extend() { - let mut map = Map::new(); - map.insert("age".to_string(), context::to_json(&4usize)); - let ctx1 = Context::wraps(&map); - + fn test_merge_json() { + let map = json!({ "age": 4 }); let s = "hello".to_owned(); - let ctx2 = Context::wraps(&s); - - let mut hash = BTreeMap::new(); - hash.insert("tag".to_owned(), context::to_json(&"h1")); + let hash = + btreemap!{ + "tag".to_owned() => context::to_json(&"h1") + }; - let ctx_a1 = ctx1.extend(&hash); - assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "age").render(), + let ctx_a1 = Context::wraps(&context::merge_json(&map, &hash)).unwrap(); + assert_eq!(ctx_a1 + .navigate(".", &VecDeque::new(), "age") + .unwrap() + .render(), "4".to_owned()); - assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "tag").render(), + assert_eq!(ctx_a1 + .navigate(".", &VecDeque::new(), "tag") + .unwrap() + .render(), "h1".to_owned()); - let ctx_a2 = ctx2.extend(&hash); - assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "this").render(), - "hello".to_owned()); - assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "tag").render(), + let ctx_a2 = Context::wraps(&context::merge_json(&context::to_json(&s), &hash)).unwrap(); + assert_eq!(ctx_a2 + .navigate(".", &VecDeque::new(), "this") + .unwrap() + .render(), + "[object]".to_owned()); + assert_eq!(ctx_a2 + .navigate(".", &VecDeque::new(), "tag") + .unwrap() + .render(), "h1".to_owned()); } #[test] fn test_key_name_with_this() { - let m = btreemap!{ + let m = + btreemap!{ "this_name".to_string() => "the_value".to_string() }; - let ctx = Context::wraps(&m); - assert_eq!(ctx.navigate(".", &VecDeque::new(), "this_name").render(), + let ctx = Context::wraps(&m).unwrap(); + assert_eq!(ctx.navigate(".", &VecDeque::new(), "this_name") + .unwrap() + .render(), "the_value".to_string()); } + + use serde::{Serialize, Serializer}; + use serde::ser::Error as SerdeError; + + struct UnserializableType {} + + impl Serialize for UnserializableType { + fn serialize(&self, _: S) -> Result + where + S: Serializer, + { + Err(SerdeError::custom("test")) + } + } + + #[test] + fn test_serialize_error() { + let d = UnserializableType {}; + assert!(Context::wraps(&d).is_err()); + } } diff --git a/src/vendor/handlebars/src/directives/inline.rs b/src/vendor/handlebars/src/directives/inline.rs index afb798b9cf..fa434f616f 100644 --- a/src/vendor/handlebars/src/directives/inline.rs +++ b/src/vendor/handlebars/src/directives/inline.rs @@ -1,26 +1,30 @@ use directives::DirectiveDef; use registry::Registry; -use render::{RenderError, RenderContext, Directive}; +use render::{RenderContext, Directive}; +use error::RenderError; #[derive(Clone, Copy)] pub struct InlineDirective; fn get_name<'a>(d: &'a Directive) -> Result<&'a str, RenderError> { d.param(0) - .ok_or_else(|| RenderError::new("Param required for directive \"inline\"")) + .ok_or_else(|| { + RenderError::new("Param required for directive \"inline\"") + }) .and_then(|v| { - v.value() - .as_str() - .ok_or_else(|| RenderError::new("inline name must be string")) - }) + v.value().as_str().ok_or_else(|| { + RenderError::new("inline name must be string") + }) + }) } impl DirectiveDef for InlineDirective { fn call(&self, d: &Directive, _: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { let name = try!(get_name(d)); - let template = - try!(d.template().ok_or_else(|| RenderError::new("inline should have a block"))); + let template = try!(d.template().ok_or_else(|| { + RenderError::new("inline should have a block") + })); rc.set_partial(name.to_owned(), template.clone()); @@ -41,19 +45,18 @@ mod test { #[test] fn test_inline() { - let t0 = - Template::compile("{{#*inline \"hello\"}}the hello world inline partial.{{/inline}}" - .to_string()) - .ok() - .unwrap(); + let t0 = Template::compile( + "{{#*inline \"hello\"}}the hello world inline partial.{{/inline}}".to_string(), + ).ok() + .unwrap(); let hbs = Registry::new(); let mut sw = StringWriter::new(); - let mut ctx = Context::null(); + let ctx = Context::null(); let mut hlps = HashMap::new(); - let mut rc = RenderContext::new(&mut ctx, &mut hlps, &mut sw); + let mut rc = RenderContext::new(ctx, &mut hlps, &mut sw); t0.elements[0].eval(&hbs, &mut rc).unwrap(); assert!(rc.get_partial(&"hello".to_owned()).is_some()); diff --git a/src/vendor/handlebars/src/directives/mod.rs b/src/vendor/handlebars/src/directives/mod.rs index 81e693fe56..df2950dc66 100644 --- a/src/vendor/handlebars/src/directives/mod.rs +++ b/src/vendor/handlebars/src/directives/mod.rs @@ -1,5 +1,6 @@ -use render::{RenderContext, RenderError, Directive}; +use render::{RenderContext, Directive}; use registry::Registry; +use error::RenderError; pub use self::inline::INLINE_DIRECTIVE; @@ -20,8 +21,8 @@ pub use self::inline::INLINE_DIRECTIVE; /// fn update_data(_: &Decorator, _: &Handlebars, rc: &mut RenderContext) /// -> Result<(), RenderError> { /// // modify json object -/// let mut ctx_ref = rc.context_mut(); -/// if let Some(ref mut m) = ctx_ref.data_mut().as_object_mut() { +/// let mut data = rc.context_mut().data_mut(); +/// if let Some(ref mut m) = data.as_object_mut() { /// m.insert("hello".to_string(), to_json(&"world".to_owned())); /// } /// Ok(()) @@ -53,9 +54,13 @@ pub trait DirectiveDef: Send + Sync { } /// implement DirectiveDef for bare function so we can use function as directive -impl Fn(&'b Directive, &'c Registry, &'d mut RenderContext) - -> Result<(), RenderError>> DirectiveDef for F { - fn call(&self, d: &Directive, r: &Registry, rc: &mut RenderContext) -> Result<(), RenderError>{ +impl< + F: Send + + Sync + + for<'b, 'c, 'd, 'e> Fn(&'b Directive, &'c Registry, &'d mut RenderContext) + -> Result<(), RenderError>, +> DirectiveDef for F { + fn call(&self, d: &Directive, r: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { (*self)(d, r, rc) } } @@ -66,135 +71,182 @@ mod inline; mod test { use registry::Registry; use context::{self, as_string, Context}; - use render::{RenderContext, RenderError, Directive, Helper}; + use render::{RenderContext, Directive, Helper}; + use error::RenderError; #[test] fn test_register_decorator() { let mut handlebars = Registry::new(); - handlebars.register_template_string("t0", "{{*foo}}".to_string()).unwrap(); + handlebars + .register_template_string("t0", "{{*foo}}".to_string()) + .unwrap(); - let data = btreemap! { + let data = + btreemap! { "hello".to_string() => "world".to_string() }; assert!(handlebars.render("t0", &data).is_err()); - handlebars.register_decorator("foo", - Box::new(|_: &Directive, - _: &Registry, - _: &mut RenderContext| - -> Result<(), RenderError> { - Ok(()) - })); + handlebars.register_decorator( + "foo", + Box::new(|_: &Directive, + _: &Registry, + _: &mut RenderContext| + -> Result<(), RenderError> { + Ok(()) + }), + ); assert_eq!(handlebars.render("t0", &data).ok().unwrap(), "".to_string()); } #[test] fn test_update_data_with_decorator() { let mut handlebars = Registry::new(); - handlebars.register_template_string("t0", "{{hello}}{{*foo}}{{hello}}".to_string()) + handlebars + .register_template_string("t0", "{{hello}}{{*foo}}{{hello}}".to_string()) .unwrap(); - let data = btreemap! { + let data = + btreemap! { "hello".to_string() => "world".to_string() }; - handlebars.register_decorator("foo", - Box::new(|_: &Directive, - _: &Registry, - rc: &mut RenderContext| - -> Result<(), RenderError> { - // modify json object - let mut ctx_ref = rc.context_mut(); - if let Some(ref mut m) = ctx_ref.data_mut().as_object_mut().as_mut() { - m.insert("hello".to_string(), context::to_json(&"war".to_owned())); - } - Ok(()) - })); - - assert_eq!(handlebars.render("t0", &data).ok().unwrap(), - "worldwar".to_string()); + handlebars.register_decorator( + "foo", + Box::new(|_: &Directive, + _: &Registry, + rc: &mut RenderContext| + -> Result<(), RenderError> { + // modify json object + let mut ctx_ref = rc.context_mut(); + let mut data = ctx_ref.data_mut(); + + if let Some(ref mut m) = data.as_object_mut().as_mut() { + m.insert("hello".to_string(), context::to_json(&"war".to_owned())); + } + + Ok(()) + }), + ); + + assert_eq!( + handlebars.render("t0", &data).ok().unwrap(), + "worldwar".to_string() + ); let data2 = 0; - handlebars.register_decorator("bar", - Box::new(|d: &Directive, - _: &Registry, - rc: &mut RenderContext| - -> Result<(), RenderError> { - // modify value - let v = d.param(0) - .map(|v| Context::wraps(v.value())) - .unwrap_or(Context::null()); - *rc.context_mut() = v; - Ok(()) - })); - handlebars.register_template_string("t1", "{{this}}{{*bar 1}}{{this}}".to_string()) + handlebars.register_decorator( + "bar", + Box::new(|d: &Directive, + _: &Registry, + rc: &mut RenderContext| + -> Result<(), RenderError> { + // modify value + let v = d.param(0) + .and_then(|v| Context::wraps(v.value()).ok()) + .unwrap_or(Context::null()); + *rc.context_mut() = v; + Ok(()) + }), + ); + handlebars + .register_template_string("t1", "{{this}}{{*bar 1}}{{this}}".to_string()) .unwrap(); - assert_eq!(handlebars.render("t1", &data2).ok().unwrap(), - "01".to_string()); - - handlebars.register_template_string("t2", - "{{this}}{{*bar \"string_literal\"}}{{this}}" - .to_string()) + assert_eq!( + handlebars.render("t1", &data2).ok().unwrap(), + "01".to_string() + ); + + handlebars + .register_template_string( + "t2", + "{{this}}{{*bar \"string_literal\"}}{{this}}".to_string(), + ) .unwrap(); - assert_eq!(handlebars.render("t2", &data2).ok().unwrap(), - "0string_literal".to_string()); + assert_eq!( + handlebars.render("t2", &data2).ok().unwrap(), + "0string_literal".to_string() + ); - handlebars.register_template_string("t3", "{{this}}{{*bar}}{{this}}".to_string()).unwrap(); - assert_eq!(handlebars.render("t3", &data2).ok().unwrap(), - "0".to_string()); + handlebars + .register_template_string("t3", "{{this}}{{*bar}}{{this}}".to_string()) + .unwrap(); + assert_eq!( + handlebars.render("t3", &data2).ok().unwrap(), + "0".to_string() + ); } #[test] fn test_local_helper_with_decorator() { let mut handlebars = Registry::new(); - handlebars.register_template_string("t0", - "{{distance 4.5}},{{*foo \"miles\"}}{{distance 10.1}},{{*bar}}{{distance 3.4}}" - .to_string()) - .unwrap(); - - handlebars.register_helper("distance", - Box::new(|h: &Helper, - _: &Registry, - rc: &mut RenderContext| - -> Result<(), RenderError> { - let s = format!("{}m", - h.param(0).map(|v| v.value()).unwrap_or(&context::to_json(&0))); - try!(rc.writer().write(s.into_bytes().as_ref())); - Ok(()) - })); - handlebars.register_decorator("foo", - Box::new(|d: &Directive, - _: &Registry, - rc: &mut RenderContext| - -> Result<(), RenderError> { - let new_unit = d.param(0) - .and_then(|v| as_string(v.value())) - .unwrap_or("") - .to_owned(); - let new_helper = move |h: &Helper, - _: &Registry, - rc: &mut RenderContext| - -> Result<(), RenderError> { - let s = format!("{}{}", - h.param(0).map(|v| v.value()).unwrap_or(&context::to_json(&0)), - new_unit); + handlebars + .register_template_string( + "t0", + "{{distance 4.5}},{{*foo \"miles\"}}{{distance 10.1}},{{*bar}}{{distance 3.4}}" + .to_string(), + ) + .unwrap(); + + handlebars.register_helper( + "distance", + Box::new(|h: &Helper, + _: &Registry, + rc: &mut RenderContext| + -> Result<(), RenderError> { + let s = format!( + "{}m", + h.param(0).map(|v| v.value()).unwrap_or( + &context::to_json(&0), + ) + ); try!(rc.writer().write(s.into_bytes().as_ref())); Ok(()) - }; - - rc.register_local_helper("distance", Box::new(new_helper)); - Ok(()) - })); - handlebars.register_decorator("bar", - Box::new(|_: &Directive, - _: &Registry, - rc: &mut RenderContext| - -> Result<(), RenderError> { - rc.unregister_local_helper("distance"); - Ok(()) - })); - assert_eq!(handlebars.render("t0", &0).ok().unwrap(), - "4.5m,10.1miles,3.4m".to_owned()); + }), + ); + handlebars.register_decorator( + "foo", + Box::new(|d: &Directive, + _: &Registry, + rc: &mut RenderContext| + -> Result<(), RenderError> { + let new_unit = d.param(0) + .and_then(|v| as_string(v.value())) + .unwrap_or("") + .to_owned(); + let new_helper = move |h: &Helper, + _: &Registry, + rc: &mut RenderContext| + -> Result<(), RenderError> { + let s = format!( + "{}{}", + h.param(0).map(|v| v.value()).unwrap_or( + &context::to_json(&0), + ), + new_unit + ); + try!(rc.writer().write(s.into_bytes().as_ref())); + Ok(()) + }; + + rc.register_local_helper("distance", Box::new(new_helper)); + Ok(()) + }), + ); + handlebars.register_decorator( + "bar", + Box::new(|_: &Directive, + _: &Registry, + rc: &mut RenderContext| + -> Result<(), RenderError> { + rc.unregister_local_helper("distance"); + Ok(()) + }), + ); + assert_eq!( + handlebars.render("t0", &0).ok().unwrap(), + "4.5m,10.1miles,3.4m".to_owned() + ); } } diff --git a/src/vendor/handlebars/src/error.rs b/src/vendor/handlebars/src/error.rs index 7706839a5a..f8502c2c58 100644 --- a/src/vendor/handlebars/src/error.rs +++ b/src/vendor/handlebars/src/error.rs @@ -2,8 +2,84 @@ use std::io::Error as IOError; use std::error::Error; use std::fmt; +use serde_json::error::Error as SerdeError; + use template::Parameter; -use render::RenderError; + +/// Error when rendering data on template. +#[derive(Debug)] +pub struct RenderError { + pub desc: String, + pub template_name: Option, + pub line_no: Option, + pub column_no: Option, + cause: Option>, +} + +impl fmt::Display for RenderError { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match (self.line_no, self.column_no) { + (Some(line), Some(col)) => { + write!( + f, + "Error rendering \"{}\" line {}, col {}: {}", + self.template_name.as_ref().unwrap_or( + &"Unnamed template".to_owned(), + ), + line, + col, + self.desc + ) + } + _ => write!(f, "{}", self.desc), + } + + } +} + +impl Error for RenderError { + fn description(&self) -> &str { + &self.desc[..] + } + + fn cause(&self) -> Option<&Error> { + self.cause.as_ref().map(|e| &**e as &Error) + } +} + +impl From for RenderError { + fn from(e: IOError) -> RenderError { + RenderError::with(e) + } +} + +impl From for RenderError { + fn from(e: SerdeError) -> RenderError { + RenderError::with(e) + } +} + +impl RenderError { + pub fn new>(desc: T) -> RenderError { + RenderError { + desc: desc.as_ref().to_owned(), + template_name: None, + line_no: None, + column_no: None, + cause: None, + } + } + + pub fn with(cause: E) -> RenderError + where + E: Error + Send + 'static, + { + let mut e = RenderError::new(cause.description()); + e.cause = Some(Box::new(cause)); + + e + } +} quick_error! { /// Template parsing error @@ -75,12 +151,16 @@ impl fmt::Display for TemplateError { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match (self.line_no, self.column_no) { (Some(line), Some(col)) => { - write!(f, - "Template \"{}\" line {}, col {}: {}", - self.template_name.as_ref().unwrap_or(&"Unnamed template".to_owned()), - line, - col, - self.reason) + write!( + f, + "Template \"{}\" line {}, col {}: {}", + self.template_name.as_ref().unwrap_or( + &"Unnamed template".to_owned(), + ), + line, + col, + self.reason + ) } _ => write!(f, "{}", self.reason), } diff --git a/src/vendor/handlebars/src/grammar.rs b/src/vendor/handlebars/src/grammar.rs index 8d67079494..b2039f9fd0 100644 --- a/src/vendor/handlebars/src/grammar.rs +++ b/src/vendor/handlebars/src/grammar.rs @@ -1,6 +1,5 @@ use pest::prelude::*; -#[cfg(feature="partial_legacy")] impl_rdp! { grammar! { whitespace = _{ [" "]|["\t"]|["\n"]|["\r"] } @@ -20,139 +19,18 @@ impl_rdp! { null_literal = { ["null"] } boolean_literal = { ["true"]|["false"] } - number_literal = @{ ["-"]? ~ ['0'..'9']+ ~ ["."]? ~ ['0'..'9']* ~ (["E"] ~ ["-"]? ~ ['0'..'9']+)? } + number_literal = @{ ["-"]? ~ ['0'..'9']+ ~ ["."]? ~ ['0'..'9']* + ~ (["E"] ~ ["-"]? ~ ['0'..'9']+)? } string_literal = @{ ["\""] ~ (!["\""] ~ (["\\\""] | any))* ~ ["\""] } array_literal = { ["["] ~ literal? ~ ([","] ~ literal)* ~ ["]"] } - object_literal = { ["{"] ~ (string_literal ~ [":"] ~ literal)? ~ ([","] ~ string_literal ~ [":"] ~ literal)* ~ ["}"] } + object_literal = { ["{"] ~ (string_literal ~ [":"] ~ literal)? + ~ ([","] ~ string_literal ~ [":"] ~ literal)* ~ ["}"] } -// FIXME: a[0], a["b] - symbol_char = _{ ['a'..'z']|['A'..'Z']|['0'..'9']|["_"]|["."]|["@"]|["$"]|["-"]|["<"]|[">"] } + symbol_char = _{['a'..'z']|['A'..'Z']|['0'..'9']|["-"]|["_"]|['\u{80}'..'\u{7ff}']|['\u{800}'..'\u{ffff}']|['\u{10000}'..'\u{10ffff}']} path_char = _{ ["/"] } - identifier = @{ symbol_char ~ ( symbol_char | path_char )* } - reference = @{ identifier ~ (["["] ~ (string_literal|['0'..'9']+) ~ ["]"])* ~ ["-"]* ~ reference* } - name = _{ subexpression | reference } - - param = { !["as"] ~ (literal | reference | subexpression) } - hash = { identifier ~ ["="] ~ param } - block_param = { ["as"] ~ ["|"] ~ identifier ~ identifier? ~ ["|"]} - exp_line = _{ identifier ~ (hash|param)* ~ block_param?} - partial_exp_line = _{ name ~ (hash|param)* } - - subexpression = { ["("] ~ name ~ (hash|param)* ~ [")"] } - - pre_whitespace_omitter = { ["~"] } - pro_whitespace_omitter = { ["~"] } - escape = { ["\\"] } - - expression = { !escape ~ !invert_tag ~ ["{{"] ~ pre_whitespace_omitter? ~ name ~ - pro_whitespace_omitter? ~ ["}}"] } - - html_expression = { !escape ~ ["{{{"] ~ pre_whitespace_omitter? ~ name ~ - pro_whitespace_omitter? ~ ["}}}"] } - - helper_expression = { !escape ~ !invert_tag ~ ["{{"] ~ pre_whitespace_omitter? ~ exp_line ~ - pro_whitespace_omitter? ~ ["}}"] } - - directive_expression = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["*"] ~ exp_line ~ - pro_whitespace_omitter? ~ ["}}"] } - partial_expression = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ [">"] ~ partial_exp_line ~ - pro_whitespace_omitter? ~ ["}}"] } - - invert_tag_item = { ["else"]|["^"] } - invert_tag = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ invert_tag_item - ~ pro_whitespace_omitter? ~ ["}}"]} - - helper_block_start = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["#"] ~ exp_line ~ - pro_whitespace_omitter? ~ ["}}"] } - helper_block_end = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["/"] ~ name ~ - pro_whitespace_omitter? ~ ["}}"] } - helper_block = _{ helper_block_start ~ template ~ - (invert_tag ~ template)? ~ - helper_block_end } - - directive_block_start = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["#"] ~ ["*"] ~ exp_line ~ - pro_whitespace_omitter? ~ ["}}"] } - directive_block_end = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["/"] ~ name ~ - pro_whitespace_omitter? ~ ["}}"] } - directive_block = _{ directive_block_start ~ template ~ - directive_block_end } - - partial_block_start = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["#"] ~ [">"] ~ partial_exp_line ~ - pro_whitespace_omitter? ~ ["}}"] } - partial_block_end = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["/"] ~ name ~ - pro_whitespace_omitter? ~ ["}}"] } - partial_block = _{ partial_block_start ~ template ~ partial_block_end } - - raw_block_start = { !escape ~ ["{{{{"] ~ pre_whitespace_omitter? ~ exp_line ~ - pro_whitespace_omitter? ~ ["}}}}"] } - raw_block_end = { !escape ~ ["{{{{"] ~ pre_whitespace_omitter? ~ ["/"] ~ name ~ - pro_whitespace_omitter? ~ ["}}}}"] } - raw_block = _{ raw_block_start ~ raw_block_text ~ raw_block_end } - - hbs_comment = { !escape ~ ["{{!"] ~ (!["}}"] ~ any)* ~ ["}}"] } - - template = { ( - raw_text | - expression | - html_expression | - helper_expression | - helper_block | - raw_block | - hbs_comment | - directive_expression | - directive_block )* - } - - parameter = _{ param ~ eoi } - handlebars = _{ template ~ eoi } - -// json path visitor - path_ident = _{ ['a'..'z']|['A'..'Z']|['0'..'9']|["_"]|["@"]|["$"]|["<"]|[">"]|["-"]} - path_id = { path_ident+ } - path_num_id = { ['0'..'9']+ } - path_raw_id = { (path_ident|["/"])* } - path_sep = _{ ["/"] | ["."] } - path_up = { [".."] } - path_var = { path_id } - path_key = { ["["] ~ (["\""]|["'"])? ~ path_raw_id ~ (["\""]|["'"])? ~ ["]"] } - path_idx = { ["["] ~ path_num_id ~ ["]"]} - path_item = _{ path_up|path_var } - path = _{ ["./"]? ~ path_item ~ ((path_sep ~ path_item) | (path_sep? ~ (path_key | path_idx)))* ~ eoi } - } -} - -#[cfg(not(feature="partial_legacy"))] -impl_rdp! { - grammar! { - whitespace = _{ [" "]|["\t"]|["\n"]|["\r"] } - - raw_text = @{ ( ["\\{{{{"]? ~ ["\\{{"]? ~ !["{{"] ~ any )+ } - raw_block_text = @{ ( !["{{{{"] ~ any )* } - -// Note: this is not full and strict json literal definition, just for tokenize string, -// array and object types which may contains whitespace. We will use a real json parser -// for real json processing - literal = { string_literal | - array_literal | - object_literal | - number_literal | - null_literal | - boolean_literal } - - null_literal = { ["null"] } - boolean_literal = { ["true"]|["false"] } - number_literal = @{ ["-"]? ~ ['0'..'9']+ ~ ["."]? ~ ['0'..'9']* ~ (["E"] ~ ["-"]? ~ ['0'..'9']+)? } - string_literal = @{ ["\""] ~ (!["\""] ~ (["\\\""] | any))* ~ ["\""] } - array_literal = { ["["] ~ literal? ~ ([","] ~ literal)* ~ ["]"] } - object_literal = { ["{"] ~ (string_literal ~ [":"] ~ literal)? ~ ([","] ~ string_literal ~ [":"] ~ literal)* ~ ["}"] } - -// FIXME: a[0], a["b] - symbol_char = _{ ['a'..'z']|['A'..'Z']|['0'..'9']|["_"]|["."]|["@"]|["$"]|["-"] } - path_char = _{ ["/"] } - - identifier = @{ symbol_char ~ ( symbol_char | path_char )* } - reference = @{ identifier ~ (["["] ~ (string_literal|['0'..'9']+) ~ ["]"])* ~ ["-"]* ~ reference* } + identifier = @{ symbol_char+ } + reference = @{ ["@"]? ~ path_inline } name = _{ subexpression | reference } param = { !["as"] ~ (literal | reference | subexpression) } @@ -178,8 +56,8 @@ impl_rdp! { directive_expression = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["*"] ~ exp_line ~ pro_whitespace_omitter? ~ ["}}"] } - partial_expression = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ [">"] ~ partial_exp_line ~ - pro_whitespace_omitter? ~ ["}}"] } + partial_expression = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ [">"] ~ partial_exp_line + ~ pro_whitespace_omitter? ~ ["}}"] } invert_tag_item = { ["else"]|["^"] } invert_tag = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ invert_tag_item ~ pro_whitespace_omitter? ~ ["}}"]} @@ -191,15 +69,15 @@ impl_rdp! { (invert_tag ~ template)? ~ helper_block_end } - directive_block_start = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["#"] ~ ["*"] ~ exp_line ~ - pro_whitespace_omitter? ~ ["}}"] } + directive_block_start = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["#"] ~ ["*"] + ~ exp_line ~ pro_whitespace_omitter? ~ ["}}"] } directive_block_end = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["/"] ~ name ~ pro_whitespace_omitter? ~ ["}}"] } directive_block = _{ directive_block_start ~ template ~ directive_block_end } - partial_block_start = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["#"] ~ [">"] ~ partial_exp_line ~ - pro_whitespace_omitter? ~ ["}}"] } + partial_block_start = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["#"] ~ [">"] + ~ partial_exp_line ~ pro_whitespace_omitter? ~ ["}}"] } partial_block_end = { !escape ~ ["{{"] ~ pre_whitespace_omitter? ~ ["/"] ~ name ~ pro_whitespace_omitter? ~ ["}}"] } partial_block = _{ partial_block_start ~ template ~ partial_block_end } @@ -229,27 +107,31 @@ impl_rdp! { parameter = _{ param ~ eoi } handlebars = _{ template ~ eoi } -// json path visitor - path_ident = _{ ['a'..'z']|['A'..'Z']|['0'..'9']|["_"]|["@"]|["$"]|["<"]|[">"]|["-"]} - path_id = { path_ident+ } - path_num_id = { ['0'..'9']+ } - path_raw_id = { (path_ident|["/"])* } + // json path visitor + // Disallowed chars: Whitespace ! " # % & ' ( ) * + , . / ; < = > @ [ \ ] ^ ` { | } ~ + + path_id = { symbol_char+ } + + path_raw_id = { (!["]"] ~ any)* } path_sep = _{ ["/"] | ["."] } path_up = { [".."] } - path_var = { path_id } - path_key = { ["["] ~ (["\""]|["'"])? ~ path_raw_id ~ (["\""]|["'"])? ~ ["]"] } - path_idx = { ["["] ~ path_num_id ~ ["]"]} - path_item = _{ path_up|path_var } - path = _{ ["./"]? ~ path_item ~ ((path_sep ~ path_item) | (path_sep? ~ (path_key | path_idx)))* ~ eoi } + path_key = _{ ["["] ~ path_raw_id ~ ["]"] } + path_item = _{ path_up|path_id|path_current|path_key } + path_current = { ["this"] | ["."] } + + path_inline = _{ path_item ~ (path_sep ~ path_item)* } + path = _{ path_inline ~ eoi } } } #[test] fn test_raw_text() { - let s = vec!["

helloworld

", - "hello\\{{world}}", - "hello\\{{#if world}}nice\\{{/if}}", - "hello \\{{{{raw}}}}hello\\{{{{/raw}}}}"]; + let s = vec![ + "

helloworld

", + "hello\\{{world}}", + "hello\\{{#if world}}nice\\{{/if}}", + "hello \\{{{{raw}}}}hello\\{{{{/raw}}}}", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.raw_text()); @@ -266,16 +148,18 @@ fn test_raw_block_text() { #[test] fn test_reference() { - let s = vec!["a", - "abc", - "../a", - "a.b", - "@abc", - "a[\"abc\"]", - "aBc[\"abc\"]", - "abc[0][\"nice\"]", - "some-name", - "this.[0].ok"]; + let s = vec![ + "a", + "abc", + "../a", + "a.b", + "@abc", + "a.[abc]", + "aBc.[abc]", + "abc.[0].[nice]", + "some-name", + "this.[0].ok", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.reference()); @@ -305,7 +189,12 @@ fn test_param() { #[test] fn test_hash() { - let s = vec!["hello=world", "hello=\"world\"", "hello=(world)", "hello=(world 0)"]; + let s = vec![ + "hello=world", + "hello=\"world\"", + "hello=(world)", + "hello=(world 0)", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.hash()); @@ -315,14 +204,16 @@ fn test_hash() { #[test] fn test_json_literal() { - let s = vec!["\"json string\"", - "\"quot: \\\"\"", - "[]", - "[\"hello\"]", - "[1,2,3,4,true]", - "{\"hello\": \"world\"}", - "{}", - "{\"a\":1, \"b\":2 }"]; + let s = vec![ + "\"json string\"", + "\"quot: \\\"\"", + "[]", + "[\"hello\"]", + "[1,2,3,4,true]", + "{\"hello\": \"world\"}", + "{}", + "{\"a\":1, \"b\":2 }", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.literal()); @@ -362,17 +253,19 @@ fn test_expression() { #[test] fn test_helper_expression() { - let s = vec!["{{exp 1}}", - "{{exp \"literal\"}}", - "{{exp ref}}", - "{{exp (sub)}}", - "{{exp (sub 123)}}", - "{{exp []}}", - "{{exp {}}}", - "{{exp key=1}}", - "{{exp key=ref}}", - "{{exp key=(sub)}}", - "{{exp key=(sub 0)}}"]; + let s = vec![ + "{{exp 1}}", + "{{exp \"literal\"}}", + "{{exp ref}}", + "{{exp (sub)}}", + "{{exp (sub 123)}}", + "{{exp []}}", + "{{exp {}}}", + "{{exp key=1}}", + "{{exp key=ref}}", + "{{exp key=(sub)}}", + "{{exp key=(sub 0)}}", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.helper_expression()); @@ -404,16 +297,18 @@ fn test_html_expression() { #[test] fn test_helper_start() { - let s = vec!["{{#if hello}}", - "{{#if (hello)}}", - "{{#if hello=world}}", - "{{#if hello hello=world}}", - "{{#if []}}", - "{{#if {}}}", - "{{#if}}", - "{{~#if hello~}}", - "{{#each people as |person|}}", - "{{#each-obj obj as |key val|}}"]; + let s = vec![ + "{{#if hello}}", + "{{#if (hello)}}", + "{{#if hello=world}}", + "{{#if hello hello=world}}", + "{{#if []}}", + "{{#if {}}}", + "{{#if}}", + "{{~#if hello~}}", + "{{#each people as |person|}}", + "{{#each-obj obj as |key val|}}", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.helper_block_start()); @@ -433,16 +328,18 @@ fn test_helper_end() { #[test] fn test_helper_block() { - let s = vec!["{{#if hello}}hello{{/if}}", - "{{#if true}}hello{{/if}}", - "{{#if nice ok=1}}hello{{/if}}", - "{{#if}}hello{{else}}world{{/if}}", - "{{#if}}hello{{^}}world{{/if}}", - "{{#if}}{{#if}}hello{{/if}}{{/if}}", - "{{#if}}hello{{~else}}world{{/if}}", - "{{#if}}hello{{else~}}world{{/if}}", - "{{#if}}hello{{~^~}}world{{/if}}", - "{{#if}}{{/if}}"]; + let s = vec![ + "{{#if hello}}hello{{/if}}", + "{{#if true}}hello{{/if}}", + "{{#if nice ok=1}}hello{{/if}}", + "{{#if}}hello{{else}}world{{/if}}", + "{{#if}}hello{{^}}world{{/if}}", + "{{#if}}{{#if}}hello{{/if}}{{/if}}", + "{{#if}}hello{{~else}}world{{/if}}", + "{{#if}}hello{{else~}}world{{/if}}", + "{{#if}}hello{{~^~}}world{{/if}}", + "{{#if}}{{/if}}", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.helper_block()); @@ -452,8 +349,10 @@ fn test_helper_block() { #[test] fn test_raw_block() { - let s = vec!["{{{{if hello}}}}good {{hello}}{{{{/if}}}}", - "{{{{if hello}}}}{{#if nice}}{{/if}}{{{{/if}}}}"]; + let s = vec![ + "{{{{if hello}}}}good {{hello}}{{{{/if}}}}", + "{{{{if hello}}}}{{#if nice}}{{/if}}{{{{/if}}}}", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.raw_block()); @@ -473,17 +372,25 @@ fn test_block_param() { #[test] fn test_path() { - let s = vec!["a", - "a.b.c.d", - "a[0][1][2]", - "a[\"abc\"]", - "a/v/c.d.s", - "a[0]/b/c/../d", - "a[\"bbc\"]/b/c/../d", - "../a/b[0][1]", - "./this[0][1]/this/../a", - "./this_name", - "./goo[/bar]"]; + let s = vec![ + "a", + "a.b.c.d", + "a.[0].[1].[2]", + "a.[abc]", + "a/v/c.d.s", + "a.[0]/b/c/../d", + "a.[bb c]/b/c/../d", + "a.[0].[#hello]", + "../a/b.[0].[1]", + "./this.[0]/[1]/this/../a", + "./this_name", + "./goo/[/bar]", + "a.[你好]", + "a.[10].[#comment]", + "a.[]", // empty key + "././[/foo]", + "[foo]", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.path()); @@ -503,9 +410,11 @@ fn test_directive_expression() { #[test] fn test_directive_block() { - let s = vec!["{{#* inline}}something{{/inline}}", - "{{~#* inline}}hello{{/inline}}", - "{{#* inline \"partialname\"}}something{{/inline}}"]; + let s = vec![ + "{{#* inline}}something{{/inline}}", + "{{~#* inline}}hello{{/inline}}", + "{{#* inline \"partialname\"}}something{{/inline}}", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.directive_block()); @@ -515,7 +424,12 @@ fn test_directive_block() { #[test] fn test_partial_expression() { - let s = vec!["{{> hello}}", "{{> (hello)}}", "{{~> hello a}}", "{{> hello a=1}}"]; + let s = vec![ + "{{> hello}}", + "{{> (hello)}}", + "{{~> hello a}}", + "{{> hello a=1}}", + ]; for i in s.iter() { let mut rdp = Rdp::new(StringInput::new(i)); assert!(rdp.partial_expression()); diff --git a/src/vendor/handlebars/src/helpers/helper_each.rs b/src/vendor/handlebars/src/helpers/helper_each.rs index fe23074e18..57b78bcbfc 100644 --- a/src/vendor/handlebars/src/helpers/helper_each.rs +++ b/src/vendor/handlebars/src/helpers/helper_each.rs @@ -5,22 +5,26 @@ use serde_json::value::Value as Json; use helpers::HelperDef; use registry::Registry; use context::{JsonTruthy, to_json}; -use render::{Renderable, RenderContext, RenderError, Helper}; +use render::{Renderable, RenderContext, Helper}; +use error::RenderError; #[derive(Clone, Copy)] pub struct EachHelper; impl HelperDef for EachHelper { fn call(&self, h: &Helper, r: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { - let value = - try!(h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"each\""))); + let value = try!(h.param(0).ok_or_else(|| { + RenderError::new("Param not found for helper \"each\"") + })); let template = h.template(); match template { Some(t) => { rc.promote_local_vars(); - let local_path_root = value.path_root().map(|p| format!("{}/{}", rc.get_path(), p)); + let local_path_root = value.path_root().map( + |p| format!("{}/{}", rc.get_path(), p), + ); debug!("each value {:?}", value.value()); let rendered = match (value.value().is_truthy(), value.value()) { @@ -38,7 +42,7 @@ impl HelperDef for EachHelper { if let Some(inner_path) = value.path() { let new_path = - format!("{}/{}.[{}]", local_rc.get_path(), inner_path, i); + format!("{}/{}/[{}]", local_rc.get_path(), inner_path, i); debug!("each path {:?}", new_path); local_rc.set_path(new_path.clone()); } @@ -46,7 +50,7 @@ impl HelperDef for EachHelper { if let Some(block_param) = h.block_param() { let mut map = BTreeMap::new(); map.insert(block_param.to_string(), to_json(&list[i])); - local_rc.push_block_context(&map); + local_rc.push_block_context(&map)?; } try!(t.render(r, &mut local_rc)); @@ -77,7 +81,7 @@ impl HelperDef for EachHelper { if let Some(inner_path) = value.path() { let new_path = - format!("{}/{}.[{}]", local_rc.get_path(), inner_path, k); + format!("{}/{}/[{}]", local_rc.get_path(), inner_path, k); local_rc.set_path(new_path); } @@ -85,7 +89,7 @@ impl HelperDef for EachHelper { let mut map = BTreeMap::new(); map.insert(bp_key.to_string(), to_json(k)); map.insert(bp_val.to_string(), to_json(obj.get(k).unwrap())); - local_rc.push_block_context(&map); + local_rc.push_block_context(&map)?; } try!(t.render(r, &mut local_rc)); @@ -108,7 +112,9 @@ impl HelperDef for EachHelper { Ok(()) } _ => { - Err(RenderError::new(format!("Param type is not iterable: {:?}", template))) + Err(RenderError::new( + format!("Param type is not iterable: {:?}", template), + )) } }; @@ -134,14 +140,28 @@ mod test { #[test] fn test_each() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#each this}}{{@first}}|{{@last}}|{{@index}}:{{this}}|{{/each}}").is_ok()); - assert!(handlebars.register_template_string("t1", - "{{#each this}}{{@first}}|{{@key}}:{{this}}|{{/each}}") - .is_ok()); + assert!( + handlebars + .register_template_string( + "t0", + "{{#each this}}{{@first}}|{{@last}}|{{@index}}:{{this}}|{{/each}}", + ) + .is_ok() + ); + assert!( + handlebars + .register_template_string( + "t1", + "{{#each this}}{{@first}}|{{@key}}:{{this}}|{{/each}}", + ) + .is_ok() + ); let r0 = handlebars.render("t0", &vec![1u16, 2u16, 3u16]); - assert_eq!(r0.ok().unwrap(), - "true|false|0:1|false|false|1:2|false|true|2:3|".to_string()); + assert_eq!( + r0.ok().unwrap(), + "true|false|0:1|false|false|1:2|false|true|2:3|".to_string() + ); let mut m: BTreeMap = BTreeMap::new(); m.insert("ftp".to_string(), 21); @@ -162,9 +182,11 @@ mod test { // previously, to access the parent in an each block, // a user would need to specify ../../b, as the path // that is computed includes the array index: ./a.c.[0] - assert!(handlebars.register_template_string("t0", - "{{#each a.c}} d={{d}} b={{../a.a}} {{/each}}") - .is_ok()); + assert!( + handlebars + .register_template_string("t0", "{{#each a.c}} d={{d}} b={{../a.a}} {{/each}}") + .is_ok() + ); let r1 = handlebars.render("t0", &data); assert_eq!(r1.ok().unwrap(), " d=100 b=99 d=200 b=99 ".to_string()); @@ -177,7 +199,14 @@ mod test { let data = Json::from_str(json_str).unwrap(); let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#each a}}{{#each b}}{{d}}:{{../c}}{{/each}}{{/each}}").is_ok()); + assert!( + handlebars + .register_template_string( + "t0", + "{{#each a}}{{#each b}}{{d}}:{{../c}}{{/each}}{{/each}}", + ) + .is_ok() + ); let r1 = handlebars.render("t0", &data); assert_eq!(r1.ok().unwrap(), "100:200".to_string()); @@ -191,7 +220,14 @@ mod test { let data = Json::from_str(json_str).unwrap(); let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#each b}}{{#if ../a}}{{#each this}}{{this}}{{/each}}{{/if}}{{/each}}").is_ok()); + assert!( + handlebars + .register_template_string( + "t0", + "{{#each b}}{{#if ../a}}{{#each this}}{{this}}{{/each}}{{/if}}{{/each}}", + ) + .is_ok() + ); let r1 = handlebars.render("t0", &data); assert_eq!(r1.ok().unwrap(), "12345".to_string()); @@ -201,8 +237,11 @@ mod test { #[test] fn test_nested_array() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#each this.[0]}}{{this}}{{/each}}") - .is_ok()); + assert!( + handlebars + .register_template_string("t0", "{{#each this.[0]}}{{this}}{{/each}}") + .is_ok() + ); let r0 = handlebars.render("t0", &(vec![vec![1, 2, 3]])); @@ -212,25 +251,30 @@ mod test { #[test] fn test_empty_key() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", - "{{#each this}}{{@key}}-{{value}}\n{{/each}}") - .is_ok()); - - let r0 = handlebars.render("t0", - &({ - let mut rv = BTreeMap::new(); - rv.insert("foo".to_owned(), { - let mut rv = BTreeMap::new(); - rv.insert("value".to_owned(), "bar".to_owned()); - rv - }); - rv.insert("".to_owned(), { - let mut rv = BTreeMap::new(); - rv.insert("value".to_owned(), "baz".to_owned()); - rv - }); - rv - })) + assert!( + handlebars + .register_template_string("t0", "{{#each this}}{{@key}}-{{value}}\n{{/each}}") + .is_ok() + ); + + let r0 = handlebars + .render( + "t0", + &({ + let mut rv = BTreeMap::new(); + rv.insert("foo".to_owned(), { + let mut rv = BTreeMap::new(); + rv.insert("value".to_owned(), "bar".to_owned()); + rv + }); + rv.insert("".to_owned(), { + let mut rv = BTreeMap::new(); + rv.insert("value".to_owned(), "baz".to_owned()); + rv + }); + rv + }), + ) .unwrap(); let mut r0_sp: Vec<_> = r0.split('\n').collect(); @@ -242,15 +286,20 @@ mod test { #[test] fn test_each_else() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#each a}}1{{else}}empty{{/each}}") - .is_ok()); - let m1 = btreemap! { + assert!( + handlebars + .register_template_string("t0", "{{#each a}}1{{else}}empty{{/each}}") + .is_ok() + ); + let m1 = + btreemap! { "a".to_string() => Vec::::new(), }; let r0 = handlebars.render("t0", &m1).unwrap(); assert_eq!(r0, "empty"); - let m2 = btreemap!{ + let m2 = + btreemap!{ "b".to_string() => Vec::::new() }; let r1 = handlebars.render("t0", &m2).unwrap(); @@ -260,9 +309,13 @@ mod test { #[test] fn test_block_param() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#each a as |i|}}{{i}}{{/each}}") - .is_ok()); - let m1 = btreemap! { + assert!( + handlebars + .register_template_string("t0", "{{#each a as |i|}}{{i}}{{/each}}") + .is_ok() + ); + let m1 = + btreemap! { "a".to_string() => vec![1,2,3,4,5] }; let r0 = handlebars.render("t0", &m1).unwrap(); @@ -274,7 +327,8 @@ mod test { let mut handlebars = Registry::new(); assert!(handlebars.register_template_string("t0", "{{#each this as |k v|}}{{#with k as |inner_k|}}{{inner_k}}{{/with}}:{{v}}|{{/each}}").is_ok()); - let m = btreemap!{ + let m = + btreemap!{ "ftp".to_string() => 21, "http".to_string() => 80 }; @@ -285,9 +339,17 @@ mod test { #[test] fn test_nested_each_with_path_ups() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#each a.b}}{{#each c}}{{../../d}}{{/each}}{{/each}}").is_ok()); - - let data = btreemap! { + assert!( + handlebars + .register_template_string( + "t0", + "{{#each a.b}}{{#each c}}{{../../d}}{{/each}}{{/each}}", + ) + .is_ok() + ); + + let data = + btreemap! { "a".to_string() => to_json(&btreemap! { "b".to_string() => vec![btreemap!{"c".to_string() => vec![1]}] }), @@ -302,11 +364,35 @@ mod test { fn test_nested_each_with_path_up_this() { let mut handlebars = Registry::new(); assert!(handlebars.register_template_string("t0", "{{#each variant}}{{#each ../typearg}}{{#if @first}}template<{{/if}}{{this}}{{#if @last}}>{{else}},{{/if}}{{/each}}{{/each}}").is_ok()); - let data = btreemap! { + let data = + btreemap! { "typearg".to_string() => vec!["T".to_string()], "variant".to_string() => vec!["1".to_string(), "2".to_string()] }; let r0 = handlebars.render("t0", &data); assert_eq!(r0.ok().unwrap(), "templatetemplate".to_string()); } + + #[test] + fn test_key_iteration_with_unicode() { + let mut handlebars = Registry::new(); + assert!( + handlebars + .register_template_string("t0", "{{#each this}}{{@key}}: {{this}}\n{{/each}}") + .is_ok() + ); + let data = json!({ + "normal": 1, + "你好": 2, + "#special key": 3, + "😂": 4, + "me.dot.key": 5 + }); + let r0 = handlebars.render("t0", &data).ok().unwrap(); + assert!(r0.contains("normal: 1")); + assert!(r0.contains("你好: 2")); + assert!(r0.contains("#special key: 3")); + assert!(r0.contains("😂: 4")); + assert!(r0.contains("me.dot.key: 5")); + } } diff --git a/src/vendor/handlebars/src/helpers/helper_if.rs b/src/vendor/handlebars/src/helpers/helper_if.rs index 65e5d516ff..d3ee1e0cad 100644 --- a/src/vendor/handlebars/src/helpers/helper_if.rs +++ b/src/vendor/handlebars/src/helpers/helper_if.rs @@ -1,7 +1,8 @@ use helpers::HelperDef; use registry::Registry; use context::JsonTruthy; -use render::{Renderable, RenderContext, RenderError, Helper}; +use render::{Renderable, RenderContext, Helper}; +use error::RenderError; #[derive(Clone, Copy)] pub struct IfHelper { @@ -10,8 +11,9 @@ pub struct IfHelper { impl HelperDef for IfHelper { fn call(&self, h: &Helper, r: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { - let param = - try!(h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"if\""))); + let param = try!(h.param(0).ok_or_else(|| { + RenderError::new("Param not found for helper \"if\"") + })); let mut value = param.value().is_truthy(); @@ -40,10 +42,16 @@ mod test { #[test] fn test_if() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#if this}}hello{{/if}}").is_ok()); - assert!(handlebars.register_template_string("t1", - "{{#unless this}}hello{{else}}world{{/unless}}") - .is_ok()); + assert!( + handlebars + .register_template_string("t0", "{{#if this}}hello{{/if}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t1", "{{#unless this}}hello{{else}}world{{/unless}}") + .is_ok() + ); let r0 = handlebars.render("t0", &true); assert_eq!(r0.ok().unwrap(), "hello".to_string()); @@ -62,9 +70,19 @@ mod test { let mut handlebars = Registry::new(); handlebars.register_helper("with", Box::new(WITH_HELPER)); - assert!(handlebars.register_template_string("t0", "{{#if a.c.d}}hello {{a.b}}{{/if}}") - .is_ok()); - assert!(handlebars.register_template_string("t1", "{{#with a}}{{#if c.d}}hello {{../a.b}}{{/if}}{{/with}}").is_ok()); + assert!( + handlebars + .register_template_string("t0", "{{#if a.c.d}}hello {{a.b}}{{/if}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string( + "t1", + "{{#with a}}{{#if c.d}}hello {{../a.b}}{{/if}}{{/with}}", + ) + .is_ok() + ); let r0 = handlebars.render("t0", &data); assert_eq!(r0.ok().unwrap(), "hello 99".to_string()); diff --git a/src/vendor/handlebars/src/helpers/helper_log.rs b/src/vendor/handlebars/src/helpers/helper_log.rs index acb397229b..503deed6dd 100644 --- a/src/vendor/handlebars/src/helpers/helper_log.rs +++ b/src/vendor/handlebars/src/helpers/helper_log.rs @@ -1,19 +1,23 @@ use helpers::HelperDef; use registry::Registry; use context::JsonRender; -use render::{RenderContext, RenderError, Helper}; +use render::{RenderContext, Helper}; +use error::RenderError; #[derive(Clone, Copy)] pub struct LogHelper; impl HelperDef for LogHelper { fn call(&self, h: &Helper, _: &Registry, _: &mut RenderContext) -> Result<(), RenderError> { - let param = - try!(h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"log\""))); + let param = try!(h.param(0).ok_or_else(|| { + RenderError::new("Param not found for helper \"log\"") + })); - info!("{}: {}", - param.path().unwrap_or(&"".to_owned()), - param.value().render()); + info!( + "{}: {}", + param.path().unwrap_or(&"".to_owned()), + param.value().render() + ); Ok(()) } diff --git a/src/vendor/handlebars/src/helpers/helper_lookup.rs b/src/vendor/handlebars/src/helpers/helper_lookup.rs index 64c5215cdb..d36aee1f01 100644 --- a/src/vendor/handlebars/src/helpers/helper_lookup.rs +++ b/src/vendor/handlebars/src/helpers/helper_lookup.rs @@ -3,7 +3,8 @@ use serde_json::value::Value as Json; use helpers::HelperDef; use registry::Registry; use context::JsonRender; -use render::{RenderContext, RenderError, Helper}; +use render::{RenderContext, Helper}; +use error::RenderError; #[derive(Clone, Copy)] pub struct LookupHelper; @@ -20,17 +21,17 @@ impl HelperDef for LookupHelper { let null = Json::Null; let value = match collection_value.value() { &Json::Array(ref v) => { - index.value() + index + .value() .as_u64() .and_then(|u| Some(u as usize)) .and_then(|u| v.get(u)) .unwrap_or(&null) } &Json::Object(ref m) => { - index.value() - .as_str() - .and_then(|k| m.get(k)) - .unwrap_or(&null) + index.value().as_str().and_then(|k| m.get(k)).unwrap_or( + &null, + ) } _ => &null, }; @@ -51,17 +52,28 @@ mod test { #[test] fn test_lookup() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#each v1}}{{lookup ../../v2 @index}}{{/each}}").is_ok()); - assert!(handlebars.register_template_string("t1", - "{{#each v1}}{{lookup ../../v2 1}}{{/each}}") - .is_ok()); - assert!(handlebars.register_template_string("t2", "{{lookup kk \"a\"}}").is_ok()); + assert!( + handlebars + .register_template_string("t0", "{{#each v1}}{{lookup ../../v2 @index}}{{/each}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t1", "{{#each v1}}{{lookup ../../v2 1}}{{/each}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t2", "{{lookup kk \"a\"}}") + .is_ok() + ); let mut m: BTreeMap> = BTreeMap::new(); m.insert("v1".to_string(), vec![1u16, 2u16, 3u16]); m.insert("v2".to_string(), vec![9u16, 8u16, 7u16]); - let m2 = btreemap!{ + let m2 = + btreemap!{ "kk".to_string() => btreemap!{"a".to_string() => "world".to_string()} }; diff --git a/src/vendor/handlebars/src/helpers/helper_partial.rs b/src/vendor/handlebars/src/helpers/helper_partial.rs deleted file mode 100644 index cb8a220ced..0000000000 --- a/src/vendor/handlebars/src/helpers/helper_partial.rs +++ /dev/null @@ -1,198 +0,0 @@ -use std::collections::BTreeMap; -use std::iter::FromIterator; - -use helpers::HelperDef; -use registry::Registry; -use context::JsonRender; -use render::{Renderable, RenderContext, RenderError, Helper}; - -#[derive(Clone, Copy)] -pub struct IncludeHelper; - -#[derive(Clone, Copy)] -pub struct BlockHelper; - -#[derive(Clone, Copy)] -pub struct PartialHelper; - -impl HelperDef for IncludeHelper { - fn call(&self, h: &Helper, r: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { - let template = try!(h.params() - .get(0) - .ok_or(RenderError::new("Param not found for helper")) - .and_then(|ref t| { - t.path() - .or(Some(&t.value().render())) - .ok_or(RenderError::new("Invalid template name to include")) - .and_then(|p| if rc.is_current_template(p) { - Err(RenderError::new("Cannot include self in >")) - } else { - Ok(r.get_template(&p)) - }) - })); - - let context_param = h.params().get(1).and_then(|p| p.path()); - let old_path = match context_param { - Some(p) => { - let old_path = rc.get_path().clone(); - rc.promote_local_vars(); - let new_path = format!("{}/{}", old_path, p); - rc.set_path(new_path); - Some(old_path) - } - None => None, - }; - - let result = match template { - Some(t) => { - if h.hash().is_empty() { - t.render(r, rc) - } else { - let hash_ctx = BTreeMap::from_iter(h.hash().iter().map(|(k, v)| { - (k.clone(), - v.value().clone()) - })); - let mut local_rc = rc.derive(); - - { - let mut ctx_ref = local_rc.context_mut(); - *ctx_ref = ctx_ref.extend(&hash_ctx); - } - t.render(r, &mut local_rc) - } - } - None => Err(RenderError::new("Template not found.")), - }; - - if let Some(path) = old_path { - rc.set_path(path); - rc.demote_local_vars(); - } - - result - } -} - -impl HelperDef for BlockHelper { - fn call(&self, h: &Helper, r: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { - let param = try!(h.param(0).ok_or_else(|| RenderError::new("Param not found for helper"))); - - if let Some(partial_path) = param.path() { - let partial_template = rc.get_partial(partial_path); - - match partial_template { - Some(partial_template) => partial_template.render(r, rc), - None => h.template().unwrap().render(r, rc), - } - } else { - Err(RenderError::new("Do not use literal here, use template name directly.")) - } - } -} - -impl HelperDef for PartialHelper { - fn call(&self, h: &Helper, _: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { - let param = try!(h.param(0).ok_or_else(|| RenderError::new("Param not found for helper"))); - - if let Some(partial_path) = param.path() { - rc.set_partial(partial_path.to_owned(), h.template().unwrap().clone()); - } - - Ok(()) - } -} - -pub static INCLUDE_HELPER: IncludeHelper = IncludeHelper; -pub static BLOCK_HELPER: BlockHelper = BlockHelper; -pub static PARTIAL_HELPER: PartialHelper = PartialHelper; - -#[cfg(test)] -mod test { - use registry::Registry; - use std::collections::BTreeMap; - - #[test] - fn test() { - let t0 = "

{{#block title}}default{{/block}}

"; - let t1 = "{{#partial title}}{{this}}{{/partial}}{{> t0}}"; - let t2 = "{{> t0}}

{{this}}

"; - - let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", t0).is_ok()); - assert!(handlebars.register_template_string("t1", t1).is_ok()); - assert!(handlebars.register_template_string("t2", t2).is_ok()); - - let r0 = handlebars.render("t1", &true); - assert_eq!(r0.ok().unwrap(), "

true

".to_string()); - - let r1 = handlebars.render("t2", &true); - assert_eq!(r1.ok().unwrap(), "

default

true

".to_string()); - } - - #[test] - fn test_context() { - let t0 = "

{{> (body) data}}

"; - let t1 = "

{{this}}

"; - - let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", t0).is_ok()); - assert!(handlebars.register_template_string("t1", t1).is_ok()); - - let mut map: BTreeMap = BTreeMap::new(); - map.insert("body".into(), "t1".into()); - map.insert("data".into(), "hello".into()); - - let r0 = handlebars.render("t0", &map); - assert_eq!(r0.ok().unwrap(), "

hello

".to_string()); - } - - #[test] - fn test_partial_hash_context() { - let t0 = "

{{> t1 hello=\"world\"}}

"; - let t1 = "

{{data}}

{{hello}}

"; - - let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", t0).is_ok()); - assert!(handlebars.register_template_string("t1", t1).is_ok()); - - let mut map: BTreeMap = BTreeMap::new(); - map.insert("data".into(), "hello".into()); - - let r0 = handlebars.render("t0", &map); - assert_eq!(r0.ok().unwrap(), - "

hello

world

".to_string()); - } - - #[test] - fn test_inline_partial() { - let t0 = "{{#partial title}}hello {{name}}{{/partial}}

include partial: {{#block title}}{{/block}}

"; - let t1 = "{{#block none_partial}}Partial not found{{/block}}"; - - let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", t0).is_ok()); - assert!(handlebars.register_template_string("t1", t1).is_ok()); - - let mut map: BTreeMap = BTreeMap::new(); - map.insert("name".into(), "world".into()); - - let r0 = handlebars.render("t0", &map); - assert_eq!(r0.ok().unwrap(), - "

include partial: hello world

".to_string()); - - let r1 = handlebars.render("t1", &map); - assert_eq!(r1.ok().unwrap(), "Partial not found".to_string()); - } - - #[test] - fn test_include_self() { - let t0 = "

{{> t0}}

"; - let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", t0).is_ok()); - - let map: BTreeMap = BTreeMap::new(); - - let r0 = handlebars.render("t0", &map); - assert!(r0.is_err()); - } - -} diff --git a/src/vendor/handlebars/src/helpers/helper_raw.rs b/src/vendor/handlebars/src/helpers/helper_raw.rs index e017820a1e..ae1fcc8da0 100644 --- a/src/vendor/handlebars/src/helpers/helper_raw.rs +++ b/src/vendor/handlebars/src/helpers/helper_raw.rs @@ -1,6 +1,7 @@ use helpers::HelperDef; use registry::Registry; -use render::{Renderable, RenderContext, RenderError, Helper}; +use render::{Renderable, RenderContext, Helper}; +use error::RenderError; #[derive(Clone, Copy)] pub struct RawHelper; @@ -25,7 +26,11 @@ mod test { #[test] fn test_raw_helper() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "a{{{{raw}}}}{{content}}{{else}}hello{{{{/raw}}}}").is_ok()); + assert!( + handlebars + .register_template_string("t0", "a{{{{raw}}}}{{content}}{{else}}hello{{{{/raw}}}}") + .is_ok() + ); let r = handlebars.render("t0", &()); assert_eq!(r.ok().unwrap(), "a{{content}}{{else}}hello"); diff --git a/src/vendor/handlebars/src/helpers/helper_with.rs b/src/vendor/handlebars/src/helpers/helper_with.rs index c22ca913af..a040a8870e 100644 --- a/src/vendor/handlebars/src/helpers/helper_with.rs +++ b/src/vendor/handlebars/src/helpers/helper_with.rs @@ -3,15 +3,17 @@ use std::collections::BTreeMap; use helpers::HelperDef; use registry::Registry; use context::{JsonTruthy, to_json}; -use render::{Renderable, RenderContext, RenderError, Helper}; +use render::{Renderable, RenderContext, Helper}; +use error::RenderError; #[derive(Clone, Copy)] pub struct WithHelper; impl HelperDef for WithHelper { fn call(&self, h: &Helper, r: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { - let param = - try!(h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"with\""))); + let param = try!(h.param(0).ok_or_else(|| { + RenderError::new("Param not found for helper \"with\"") + })); rc.promote_local_vars(); @@ -34,7 +36,7 @@ impl HelperDef for WithHelper { if let Some(block_param) = h.block_param() { let mut map = BTreeMap::new(); map.insert(block_param.to_string(), to_json(param.value())); - local_rc.push_block_context(&map); + local_rc.push_block_context(&map)?; } } @@ -95,14 +97,21 @@ mod test { }; let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#with addr}}{{city}}{{/with}}") - .is_ok()); - assert!(handlebars.register_template_string("t1", - "{{#with notfound}}hello{{else}}world{{/with}}") - .is_ok()); - assert!(handlebars.register_template_string("t2", - "{{#with addr/country}}{{this}}{{/with}}") - .is_ok()); + assert!( + handlebars + .register_template_string("t0", "{{#with addr}}{{city}}{{/with}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t1", "{{#with notfound}}hello{{else}}world{{/with}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t2", "{{#with addr/country}}{{this}}{{/with}}") + .is_ok() + ); let r0 = handlebars.render("t0", &person); assert_eq!(r0.ok().unwrap(), "Beijing".to_string()); @@ -129,13 +138,24 @@ mod test { }; let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", - "{{#with addr as |a|}}{{a.city}}{{/with}}") - .is_ok()); - assert!(handlebars.register_template_string("t1", "{{#with notfound as |c|}}hello{{else}}world{{/with}}").is_ok()); - assert!(handlebars.register_template_string("t2", - "{{#with addr/country as |t|}}{{t}}{{/with}}") - .is_ok()); + assert!( + handlebars + .register_template_string("t0", "{{#with addr as |a|}}{{a.city}}{{/with}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string( + "t1", + "{{#with notfound as |c|}}hello{{else}}world{{/with}}", + ) + .is_ok() + ); + assert!( + handlebars + .register_template_string("t2", "{{#with addr/country as |t|}}{{t}}{{/with}}") + .is_ok() + ); let r0 = handlebars.render("t0", &person); assert_eq!(r0.ok().unwrap(), "Beijing".to_string()); @@ -176,9 +196,30 @@ mod test { let people = vec![person, person2]; let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{#each this}}{{#with addr}}{{city}}{{/with}}{{/each}}").is_ok()); - assert!(handlebars.register_template_string("t1", "{{#each this}}{{#with addr}}{{../age}}{{/with}}{{/each}}").is_ok()); - assert!(handlebars.register_template_string("t2", "{{#each this}}{{#with addr}}{{@../index}}{{/with}}{{/each}}").is_ok()); + assert!( + handlebars + .register_template_string( + "t0", + "{{#each this}}{{#with addr}}{{city}}{{/with}}{{/each}}", + ) + .is_ok() + ); + assert!( + handlebars + .register_template_string( + "t1", + "{{#each this}}{{#with addr}}{{../age}}{{/with}}{{/each}}", + ) + .is_ok() + ); + assert!( + handlebars + .register_template_string( + "t2", + "{{#each this}}{{#with addr}}{{@../index}}{{/with}}{{/each}}", + ) + .is_ok() + ); let r0 = handlebars.render("t0", &people); assert_eq!(r0.ok().unwrap(), "BeijingBeijing".to_string()); @@ -193,10 +234,16 @@ mod test { #[test] fn test_path_up() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", - "{{#with a}}{{#with b}}{{../../d}}{{/with}}{{/with}}") - .is_ok()); - let data = btreemap! { + assert!( + handlebars + .register_template_string( + "t0", + "{{#with a}}{{#with b}}{{../../d}}{{/with}}{{/with}}", + ) + .is_ok() + ); + let data = + btreemap! { "a".to_string() => to_json(&btreemap! { "b".to_string() => vec![btreemap!{"c".to_string() => vec![1]}] }), diff --git a/src/vendor/handlebars/src/helpers/mod.rs b/src/vendor/handlebars/src/helpers/mod.rs index 7c69fb9393..3a3e38b500 100644 --- a/src/vendor/handlebars/src/helpers/mod.rs +++ b/src/vendor/handlebars/src/helpers/mod.rs @@ -1,13 +1,12 @@ -use render::{RenderContext, RenderError, Helper}; +use render::{RenderContext, Helper}; use registry::Registry; +use error::RenderError; pub use self::helper_if::{IF_HELPER, UNLESS_HELPER}; pub use self::helper_each::EACH_HELPER; pub use self::helper_with::WITH_HELPER; pub use self::helper_lookup::LOOKUP_HELPER; pub use self::helper_raw::RAW_HELPER; -#[cfg(feature="partial_legacy")] -pub use self::helper_partial::{INCLUDE_HELPER, BLOCK_HELPER, PARTIAL_HELPER}; pub use self::helper_log::LOG_HELPER; /// Helper Definition @@ -51,8 +50,13 @@ pub trait HelperDef: Send + Sync { } /// implement HelperDef for bare function so we can use function as helper -impl Fn(&'b Helper, &'c Registry, &'d mut RenderContext) -> Result<(), RenderError>> HelperDef for F { - fn call(&self, h: &Helper, r: &Registry, rc: &mut RenderContext) -> Result<(), RenderError>{ +impl< + F: Send + + Sync + + for<'b, 'c, 'd, 'e> Fn(&'b Helper, &'c Registry, &'d mut RenderContext) + -> Result<(), RenderError>, +> HelperDef for F { + fn call(&self, h: &Helper, r: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { (*self)(h, r, rc) } } @@ -62,8 +66,6 @@ mod helper_each; mod helper_with; mod helper_lookup; mod helper_raw; -#[cfg(feature="partial_legacy")] -mod helper_partial; mod helper_log; // pub type HelperDef = for <'a, 'b, 'c> Fn<(&'a Context, &'b Helper, &'b Registry, &'c mut RenderContext), Result>; @@ -80,17 +82,19 @@ mod test { use context::JsonRender; use helpers::HelperDef; use registry::Registry; - use render::{RenderContext, RenderError, Renderable, Helper}; + use render::{RenderContext, Renderable, Helper}; + use error::RenderError; #[derive(Clone, Copy)] struct MetaHelper; impl HelperDef for MetaHelper { - fn call(&self, - h: &Helper, - r: &Registry, - rc: &mut RenderContext) - -> Result<(), RenderError> { + fn call( + &self, + h: &Helper, + r: &Registry, + rc: &mut RenderContext, + ) -> Result<(), RenderError> { let v = h.param(0).unwrap(); if !h.is_block() { @@ -109,8 +113,16 @@ mod test { #[test] fn test_meta_helper() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{foo this}}").is_ok()); - assert!(handlebars.register_template_string("t1", "{{#bar this}}nice{{/bar}}").is_ok()); + assert!( + handlebars + .register_template_string("t0", "{{foo this}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t1", "{{#bar this}}nice{{/bar}}") + .is_ok() + ); let meta_helper = MetaHelper; handlebars.register_helper("helperMissing", Box::new(meta_helper)); @@ -126,32 +138,34 @@ mod test { #[test] fn test_helper_for_subexpression() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t2", "{{foo value=(bar 0)}}").is_ok()); - - handlebars.register_helper("helperMissing", - Box::new(|h: &Helper, - _: &Registry, - rc: &mut RenderContext| - -> Result<(), RenderError> { - let output = format!("{}{}", - h.name(), - h.param(0).unwrap().value()); - try!(rc.writer.write(output.into_bytes().as_ref())); - Ok(()) - })); - handlebars.register_helper("foo", - Box::new(|h: &Helper, - _: &Registry, - rc: &mut RenderContext| - -> Result<(), RenderError> { - let output = format!("{}", - h.hash_get("value") - .unwrap() - .value() - .render()); - try!(rc.writer.write(output.into_bytes().as_ref())); - Ok(()) - })); + assert!( + handlebars + .register_template_string("t2", "{{foo value=(bar 0)}}") + .is_ok() + ); + + handlebars.register_helper( + "helperMissing", + Box::new(|h: &Helper, + _: &Registry, + rc: &mut RenderContext| + -> Result<(), RenderError> { + let output = format!("{}{}", h.name(), h.param(0).unwrap().value()); + try!(rc.writer.write(output.into_bytes().as_ref())); + Ok(()) + }), + ); + handlebars.register_helper( + "foo", + Box::new(|h: &Helper, + _: &Registry, + rc: &mut RenderContext| + -> Result<(), RenderError> { + let output = format!("{}", h.hash_get("value").unwrap().value().render()); + try!(rc.writer.write(output.into_bytes().as_ref())); + Ok(()) + }), + ); let mut data = BTreeMap::new(); // handlebars should never try to lookup this value because diff --git a/src/vendor/handlebars/src/lib.rs b/src/vendor/handlebars/src/lib.rs index 6fbfd2968b..41b848a04b 100644 --- a/src/vendor/handlebars/src/lib.rs +++ b/src/vendor/handlebars/src/lib.rs @@ -294,9 +294,9 @@ extern crate serde; extern crate serde_json; pub use self::template::Template; -pub use self::error::{TemplateError, TemplateFileError, TemplateRenderError}; +pub use self::error::{RenderError, TemplateError, TemplateFileError, TemplateRenderError}; pub use self::registry::{EscapeFn, no_escape, html_escape, Registry as Handlebars}; -pub use self::render::{Renderable, Evaluable, RenderError, RenderContext, Helper, ContextJson, +pub use self::render::{Renderable, Evaluable, RenderContext, Helper, ContextJson, Directive as Decorator}; pub use self::helpers::HelperDef; pub use self::directives::DirectiveDef as DecoratorDef; @@ -311,5 +311,4 @@ mod helpers; mod context; mod support; mod directives; -#[cfg(not(feature="partial_legacy"))] mod partial; diff --git a/src/vendor/handlebars/src/partial.rs b/src/vendor/handlebars/src/partial.rs index 5e85d7d9a8..bf649ba4a1 100644 --- a/src/vendor/handlebars/src/partial.rs +++ b/src/vendor/handlebars/src/partial.rs @@ -2,12 +2,15 @@ use std::collections::BTreeMap; use std::iter::FromIterator; use registry::Registry; -use render::{RenderError, RenderContext, Directive, Evaluable, Renderable}; +use context::{Context, merge_json}; +use render::{RenderContext, Directive, Evaluable, Renderable}; +use error::RenderError; -pub fn expand_partial(d: &Directive, - r: &Registry, - rc: &mut RenderContext) - -> Result<(), RenderError> { +pub fn expand_partial( + d: &Directive, + r: &Registry, + rc: &mut RenderContext, +) -> Result<(), RenderError> { // try eval inline partials first if let Some(t) = d.template() { @@ -39,19 +42,16 @@ pub fn expand_partial(d: &Directive, } let hash = d.hash(); - let r = if hash.is_empty() { + if hash.is_empty() { t.render(r, &mut local_rc) } else { - let hash_ctx = - BTreeMap::from_iter(hash.iter().map(|(k, v)| (k.clone(), v.value().clone()))); - { - let mut ctx_ref = local_rc.context_mut(); - *ctx_ref = ctx_ref.extend(&hash_ctx); - } - t.render(r, &mut local_rc) - }; - - r + let hash_ctx = BTreeMap::from_iter( + d.hash().iter().map(|(k, v)| (k.clone(), v.value().clone())), + ); + let partial_context = merge_json(local_rc.evaluate(".")?, &hash_ctx); + let mut partial_rc = local_rc.with_context(Context::wraps(&partial_context)?); + t.render(r, &mut partial_rc) + } } None => Ok(()), } @@ -65,29 +65,80 @@ mod test { #[test] fn test() { let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("t0", "{{> t1}}").is_ok()); - assert!(handlebars.register_template_string("t1", "{{this}}").is_ok()); - assert!(handlebars.register_template_string("t2", "{{#> t99}}not there{{/t99}}").is_ok()); - assert!(handlebars.register_template_string("t3", "{{#*inline \"t31\"}}{{this}}{{/inline}}{{> t31}}").is_ok()); - assert!(handlebars.register_template_string("t4", "{{#> t5}}{{#*inline \"nav\"}}navbar{{/inline}}{{/t5}}").is_ok()); - assert!(handlebars.register_template_string("t5", "include {{> nav}}").is_ok()); - assert!(handlebars.register_template_string("t6", "{{> t1 a}}").is_ok()); - assert!(handlebars.register_template_string("t7", "{{#*inline \"t71\"}}{{a}}{{/inline}}{{> t71 a=\"world\"}}").is_ok()); + assert!( + handlebars + .register_template_string("t0", "{{> t1}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t1", "{{this}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t2", "{{#> t99}}not there{{/t99}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t3", "{{#*inline \"t31\"}}{{this}}{{/inline}}{{> t31}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string( + "t4", + "{{#> t5}}{{#*inline \"nav\"}}navbar{{/inline}}{{/t5}}", + ) + .is_ok() + ); + assert!( + handlebars + .register_template_string("t5", "include {{> nav}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string("t6", "{{> t1 a}}") + .is_ok() + ); + assert!( + handlebars + .register_template_string( + "t7", + "{{#*inline \"t71\"}}{{a}}{{/inline}}{{> t71 a=\"world\"}}", + ) + .is_ok() + ); assert!(handlebars.register_template_string("t8", "{{a}}").is_ok()); - assert!(handlebars.register_template_string("t9", "{{> t8 a=2}}").is_ok()); + assert!( + handlebars + .register_template_string("t9", "{{> t8 a=2}}") + .is_ok() + ); assert_eq!(handlebars.render("t0", &1).ok().unwrap(), "1".to_string()); - assert_eq!(handlebars.render("t2", &1).ok().unwrap(), - "not there".to_string()); + assert_eq!( + handlebars.render("t2", &1).ok().unwrap(), + "not there".to_string() + ); assert_eq!(handlebars.render("t3", &1).ok().unwrap(), "1".to_string()); - assert_eq!(handlebars.render("t4", &1).ok().unwrap(), - "include navbar".to_string()); - assert_eq!(handlebars.render("t6", &btreemap!{"a".to_string() => "2".to_string()}) - .ok() - .unwrap(), - "2".to_string()); - assert_eq!(handlebars.render("t7", &1).ok().unwrap(), - "world".to_string()); + assert_eq!( + handlebars.render("t4", &1).ok().unwrap(), + "include navbar".to_string() + ); + assert_eq!( + handlebars + .render("t6", &btreemap!{"a".to_string() => "2".to_string()}) + .ok() + .unwrap(), + "2".to_string() + ); + assert_eq!( + handlebars.render("t7", &1).ok().unwrap(), + "world".to_string() + ); assert_eq!(handlebars.render("t9", &1).ok().unwrap(), "2".to_string()); } @@ -122,10 +173,46 @@ mod test { let two_partial = "--- two ---"; let mut handlebars = Registry::new(); - assert!(handlebars.register_template_string("template", main_template).is_ok()); - assert!(handlebars.register_template_string("two", two_partial).is_ok()); + assert!( + handlebars + .register_template_string("template", main_template) + .is_ok() + ); + assert!( + handlebars + .register_template_string("two", two_partial) + .is_ok() + ); let r0 = handlebars.render("template", &true); assert_eq!(r0.ok().unwrap(), "one--- two ---three--- two ---"); } + + #[test] + fn test_hash_context_outscope() { + let main_template = "In: {{> p a=2}} Out: {{a}}"; + let p_partial = "{{a}}"; + + let mut handlebars = Registry::new(); + assert!( + handlebars + .register_template_string("template", main_template) + .is_ok() + ); + assert!(handlebars.register_template_string("p", p_partial).is_ok()); + + let r0 = handlebars.render("template", &true); + assert_eq!(r0.ok().unwrap(), "In: 2 Out: "); + } + + #[test] + fn test_nested_partial_scope() { + let t = "{{#*inline \"pp\"}}{{a}} {{b}}{{/inline}}{{#each c}}{{> pp a=2}}{{/each}}"; + let data = json!({"c": [{"b": true}, {"b": false}]}); + + let mut handlebars = Registry::new(); + assert!(handlebars.register_template_string("t", t).is_ok()); + let r0 = handlebars.render("t", &data); + assert_eq!(r0.ok().unwrap(), "2 true2 false"); + } } diff --git a/src/vendor/handlebars/src/registry.rs b/src/vendor/handlebars/src/registry.rs index fb01843ea1..930a8ca267 100644 --- a/src/vendor/handlebars/src/registry.rs +++ b/src/vendor/handlebars/src/registry.rs @@ -8,12 +8,12 @@ use serde::Serialize; use regex::{Regex, Captures}; use template::Template; -use render::{Renderable, RenderError, RenderContext}; +use render::{Renderable, RenderContext}; use context::Context; use helpers::{self, HelperDef}; use directives::{self, DirectiveDef}; use support::str::StringWriter; -use error::{TemplateError, TemplateFileError, TemplateRenderError}; +use error::{RenderError, TemplateError, TemplateFileError, TemplateRenderError}; lazy_static!{ @@ -30,15 +30,15 @@ pub type EscapeFn = Box String + Send + Sync>; /// The default *escape fn* replaces the characters `&"<>` /// with the equivalent html / xml entities. pub fn html_escape(data: &str) -> String { - DEFAULT_REPLACE.replace_all(data, |cap: &Captures| { + DEFAULT_REPLACE + .replace_all(data, |cap: &Captures| { match cap.get(0).map(|m| m.as_str()) { - Some("<") => "<", - Some(">") => ">", - Some("\"") => """, - Some("&") => "&", - _ => unreachable!(), - } - .to_owned() + Some("<") => "<", + Some(">") => ">", + Some("\"") => """, + Some("&") => "&", + _ => unreachable!(), + }.to_owned() }) .into_owned() } @@ -73,24 +73,6 @@ impl Registry { r.setup_builtins() } - #[cfg(feature="partial_legacy")] - fn setup_builtins(mut self) -> Registry { - self.register_helper("if", Box::new(helpers::IF_HELPER)); - self.register_helper("unless", Box::new(helpers::UNLESS_HELPER)); - self.register_helper("each", Box::new(helpers::EACH_HELPER)); - self.register_helper("with", Box::new(helpers::WITH_HELPER)); - self.register_helper("lookup", Box::new(helpers::LOOKUP_HELPER)); - self.register_helper("raw", Box::new(helpers::RAW_HELPER)); - self.register_helper(">", Box::new(helpers::INCLUDE_HELPER)); - self.register_helper("block", Box::new(helpers::BLOCK_HELPER)); - self.register_helper("partial", Box::new(helpers::PARTIAL_HELPER)); - self.register_helper("log", Box::new(helpers::LOG_HELPER)); - - self.register_decorator("inline", Box::new(directives::INLINE_DIRECTIVE)); - self - } - - #[cfg(not(feature = "partial_legacy"))] fn setup_builtins(mut self) -> Registry { self.register_helper("if", Box::new(helpers::IF_HELPER)); self.register_helper("unless", Box::new(helpers::UNLESS_HELPER)); @@ -117,14 +99,18 @@ impl Registry { /// Register a template string /// /// Returns `TemplateError` if there is syntax error on parsing template. - pub fn register_template_string(&mut self, - name: &str, - tpl_str: S) - -> Result<(), TemplateError> - where S: AsRef + pub fn register_template_string( + &mut self, + name: &str, + tpl_str: S, + ) -> Result<(), TemplateError> + where + S: AsRef, { - try!(Template::compile_with_name(tpl_str, name.to_owned(), self.source_map) - .and_then(|t| Ok(self.templates.insert(name.to_string(), t)))); + try!( + Template::compile_with_name(tpl_str, name.to_owned(), self.source_map) + .and_then(|t| Ok(self.templates.insert(name.to_string(), t))) + ); Ok(()) } @@ -133,31 +119,37 @@ impl Registry { /// A named partial will be added to the registry. It will overwrite template with /// same name. Currently registered partial is just identical to template. pub fn register_partial(&mut self, name: &str, partial_str: S) -> Result<(), TemplateError> - where S: AsRef + where + S: AsRef, { self.register_template_string(name, partial_str) } /// Register a template from a path - pub fn register_template_file

(&mut self, - name: &str, - tpl_path: P) - -> Result<(), TemplateFileError> - where P: AsRef + pub fn register_template_file

( + &mut self, + name: &str, + tpl_path: P, + ) -> Result<(), TemplateFileError> + where + P: AsRef, { - let mut file = - try!(File::open(tpl_path).map_err(|e| TemplateFileError::IOError(e, name.to_owned()))); + let mut file = try!(File::open(tpl_path).map_err(|e| { + TemplateFileError::IOError(e, name.to_owned()) + })); self.register_template_source(name, &mut file) } /// Register a template from `std::io::Read` source - pub fn register_template_source(&mut self, - name: &str, - tpl_source: &mut Read) - -> Result<(), TemplateFileError> { + pub fn register_template_source( + &mut self, + name: &str, + tpl_source: &mut Read, + ) -> Result<(), TemplateFileError> { let mut buf = String::new(); - try!(tpl_source.read_to_string(&mut buf) - .map_err(|e| TemplateFileError::IOError(e, name.to_owned()))); + try!(tpl_source.read_to_string(&mut buf).map_err(|e| { + TemplateFileError::IOError(e, name.to_owned()) + })); try!(self.register_template_string(name, buf)); Ok(()) } @@ -168,24 +160,28 @@ impl Registry { } /// register a helper - pub fn register_helper(&mut self, - name: &str, - def: Box) - -> Option> { + pub fn register_helper( + &mut self, + name: &str, + def: Box, + ) -> Option> { self.helpers.insert(name.to_string(), def) } /// register a decorator - pub fn register_decorator(&mut self, - name: &str, - def: Box) - -> Option> { + pub fn register_decorator( + &mut self, + name: &str, + def: Box, + ) -> Option> { self.directives.insert(name.to_string(), def) } /// Register a new *escape fn* to be used from now on by this registry. - pub fn register_escape_fn String + Send + Sync>(&mut self, - escape_fn: F) { + pub fn register_escape_fn String + Send + Sync>( + &mut self, + escape_fn: F, + ) { self.escape_fn = Box::new(escape_fn); } @@ -232,7 +228,8 @@ impl Registry { /// /// Returns rendered string or an struct with error information pub fn render(&self, name: &str, data: &T) -> Result - where T: Serialize + where + T: Serialize, { let mut writer = StringWriter::new(); { @@ -244,25 +241,28 @@ impl Registry { /// Render a registered template and write some data to the `std::io::Write` pub fn renderw(&self, name: &str, data: &T, writer: &mut Write) -> Result<(), RenderError> - where T: Serialize + where + T: Serialize, { self.get_template(&name.to_string()) .ok_or(RenderError::new(format!("Template not found: {}", name))) .and_then(|t| { - let mut ctx = Context::wraps(data); + let ctx = try!(Context::wraps(data)); let mut local_helpers = HashMap::new(); - let mut render_context = RenderContext::new(&mut ctx, &mut local_helpers, writer); + let mut render_context = RenderContext::new(ctx, &mut local_helpers, writer); render_context.root_template = t.name.clone(); t.render(self, &mut render_context) }) } /// render a template string using current registry without register it - pub fn template_render(&self, - template_string: &str, - data: &T) - -> Result - where T: Serialize + pub fn template_render( + &self, + template_string: &str, + data: &T, + ) -> Result + where + T: Serialize, { let mut writer = StringWriter::new(); { @@ -272,27 +272,33 @@ impl Registry { } /// render a template string using current registry without register it - pub fn template_renderw(&self, - template_string: &str, - data: &T, - writer: &mut Write) - -> Result<(), TemplateRenderError> - where T: Serialize + pub fn template_renderw( + &self, + template_string: &str, + data: &T, + writer: &mut Write, + ) -> Result<(), TemplateRenderError> + where + T: Serialize, { let tpl = try!(Template::compile(template_string)); - let mut ctx = Context::wraps(data); + let ctx = try!(Context::wraps(data)); let mut local_helpers = HashMap::new(); - let mut render_context = RenderContext::new(&mut ctx, &mut local_helpers, writer); - tpl.render(self, &mut render_context).map_err(TemplateRenderError::from) + let mut render_context = RenderContext::new(ctx, &mut local_helpers, writer); + tpl.render(self, &mut render_context).map_err( + TemplateRenderError::from, + ) } /// render a template source using current registry without register it - pub fn template_renderw2(&self, - template_source: &mut Read, - data: &T, - writer: &mut Write) - -> Result<(), TemplateRenderError> - where T: Serialize + pub fn template_renderw2( + &self, + template_source: &mut Read, + data: &T, + writer: &mut Write, + ) -> Result<(), TemplateRenderError> + where + T: Serialize, { let mut tpl_str = String::new(); try!(template_source.read_to_string(&mut tpl_str).map_err(|e| { @@ -305,21 +311,21 @@ impl Registry { #[cfg(test)] mod test { use registry::Registry; - use render::{RenderContext, Renderable, RenderError, Helper}; + use render::{RenderContext, Renderable, Helper}; use helpers::HelperDef; use support::str::StringWriter; - #[cfg(feature = "partial_legacy")] - use error::TemplateRenderError; + use error::RenderError; #[derive(Clone, Copy)] struct DummyHelper; impl HelperDef for DummyHelper { - fn call(&self, - h: &Helper, - r: &Registry, - rc: &mut RenderContext) - -> Result<(), RenderError> { + fn call( + &self, + h: &Helper, + r: &Registry, + rc: &mut RenderContext, + ) -> Result<(), RenderError> { try!(h.template().unwrap().render(r, rc)); Ok(()) } @@ -345,10 +351,6 @@ mod test { r.register_helper("dummy", Box::new(DUMMY_HELPER)); // built-in helpers plus 1 - #[cfg(feature = "partial_legacy")] - assert_eq!(r.helpers.len(), 10 + 1); - - #[cfg(not(feature = "partial_legacy"))] assert_eq!(r.helpers.len(), 7 + 1); } @@ -373,7 +375,8 @@ mod test { let input = String::from("\"<>&"); - r.register_template_string("test", String::from("{{this}}")).unwrap(); + r.register_template_string("test", String::from("{{this}}")) + .unwrap(); assert_eq!(""<>&", r.render("test", &input).unwrap()); @@ -387,40 +390,12 @@ mod test { } #[test] - #[cfg(feature="partial_legacy")] - fn test_template_render() { - let mut r = Registry::new(); - - assert!(r.register_template_string("index", "

").is_ok()); - - assert_eq!("

".to_string(), - r.template_render("{{> index}}", &{}).unwrap()); + fn test_escape() { + let r = Registry::new(); + let data = json!({ + "hello": "world" + }); - assert_eq!("hello world".to_string(), - r.template_render("hello {{this}}", &"world".to_string()).unwrap()); - - let mut sw = StringWriter::new(); - - { - r.template_renderw("{{> index}}", &{}, &mut sw).unwrap(); - } - - assert_eq!("

".to_string(), sw.to_string()); - - // fail for template error - match r.template_render("{{ hello", &{}).unwrap_err() { - TemplateRenderError::TemplateError(_) => {} - _ => { - panic!(); - } - } - - // fail to render error - match r.template_render("{{> notfound}}", &{}).unwrap_err() { - TemplateRenderError::RenderError(_) => {} - _ => { - panic!(); - } - } + assert_eq!("{{hello}}", r.template_render(r"\\{{hello}}", &data).unwrap()); } } diff --git a/src/vendor/handlebars/src/render.rs b/src/vendor/handlebars/src/render.rs index 703513c262..5af124c9d8 100644 --- a/src/vendor/handlebars/src/render.rs +++ b/src/vendor/handlebars/src/render.rs @@ -1,12 +1,10 @@ use std::collections::{HashMap, BTreeMap, VecDeque}; -use std::error; use std::fmt; use std::rc::Rc; use std::io::Write; -use std::io::Error as IOError; use serde::Serialize; -use serde_json::value::{Value as Json}; +use serde_json::value::Value as Json; use template::{Template, TemplateElement, Parameter, HelperTemplate, TemplateMapping, BlockParam, Directive as DirectiveTemplate}; @@ -15,58 +13,9 @@ use registry::Registry; use context::{Context, JsonRender}; use helpers::HelperDef; use support::str::StringWriter; -#[cfg(not(feature="partial_legacy"))] +use error::RenderError; use partial; -/// Error when rendering data on template. -#[derive(Debug, Clone)] -pub struct RenderError { - pub desc: String, - pub template_name: Option, - pub line_no: Option, - pub column_no: Option, -} - -impl fmt::Display for RenderError { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match (self.line_no, self.column_no) { - (Some(line), Some(col)) => { - write!(f, - "Error rendering \"{}\" line {}, col {}: {}", - self.template_name.as_ref().unwrap_or(&"Unnamed template".to_owned()), - line, - col, - self.desc) - } - _ => write!(f, "{}", self.desc), - } - - } -} - -impl error::Error for RenderError { - fn description(&self) -> &str { - &self.desc[..] - } -} - -impl From for RenderError { - fn from(_: IOError) -> RenderError { - RenderError::new("IO Error") - } -} - -impl RenderError { - pub fn new>(desc: T) -> RenderError { - RenderError { - desc: desc.as_ref().to_owned(), - template_name: None, - line_no: None, - column_no: None, - } - } -} - /// The context of a render call /// /// this context stores information of a render and a writer where generated @@ -81,7 +30,7 @@ pub struct RenderContext<'a> { default_var: Json, block_context: VecDeque, /// the context - context: &'a mut Context, + context: Context, /// the `Write` where page is generated pub writer: &'a mut Write, /// current template name @@ -93,10 +42,11 @@ pub struct RenderContext<'a> { impl<'a> RenderContext<'a> { /// Create a render context from a `Write` - pub fn new(ctx: &'a mut Context, - local_helpers: &'a mut HashMap>>, - w: &'a mut Write) - -> RenderContext<'a> { + pub fn new( + ctx: Context, + local_helpers: &'a mut HashMap>>, + w: &'a mut Write, + ) -> RenderContext<'a> { RenderContext { partials: HashMap::new(), path: ".".to_string(), @@ -126,7 +76,25 @@ impl<'a> RenderContext<'a> { disable_escape: self.disable_escape, local_helpers: self.local_helpers, - context: self.context, + context: self.context.clone(), + writer: self.writer, + } + } + + pub fn with_context(&mut self, ctx: Context) -> RenderContext { + RenderContext { + partials: self.partials.clone(), + path: ".".to_owned(), + local_path_root: VecDeque::new(), + local_variables: self.local_variables.clone(), + current_template: self.current_template.clone(), + root_template: self.root_template.clone(), + default_var: self.default_var.clone(), + block_context: VecDeque::new(), + + disable_escape: self.disable_escape, + local_helpers: self.local_helpers, + context: ctx, writer: self.writer, } } @@ -174,10 +142,7 @@ impl<'a> RenderContext<'a> { new_key.push_str("@../"); new_key.push_str(&key[1..]); - let v = self.local_variables - .get(key) - .unwrap() - .clone(); + let v = self.local_variables.get(key).unwrap().clone(); new_map.insert(new_key, v); } self.local_variables = new_map; @@ -191,10 +156,7 @@ impl<'a> RenderContext<'a> { new_key.push('@'); new_key.push_str(&key[4..]); - let v = self.local_variables - .get(key) - .unwrap() - .clone(); + let v = self.local_variables.get(key).unwrap().clone(); new_map.insert(new_key, v); } } @@ -209,46 +171,51 @@ impl<'a> RenderContext<'a> { self.writer } - pub fn push_block_context(&mut self, ctx: &T) - where T: Serialize + pub fn push_block_context(&mut self, ctx: &T) -> Result<(), RenderError> + where + T: Serialize, { - self.block_context.push_front(Context::wraps(ctx)); + let r = self.block_context.push_front(Context::wraps(ctx)?); + Ok(r) } pub fn pop_block_context(&mut self) { self.block_context.pop_front(); } - pub fn evaluate_in_block_context(&self, local_path: &str) -> Option<&Json> { + pub fn evaluate_in_block_context( + &self, + local_path: &str, + ) -> Result, RenderError> { for bc in self.block_context.iter() { - let v = bc.navigate(".", &self.local_path_root, local_path); + let v = bc.navigate(".", &self.local_path_root, local_path)?; if !v.is_null() { - return Some(v); + return Ok(Some(v)); } } - None + Ok(None) } pub fn is_current_template(&self, p: &str) -> bool { - self.current_template - .as_ref() - .map(|s| s == p) - .unwrap_or(false) + self.current_template.as_ref().map(|s| s == p).unwrap_or( + false, + ) } pub fn context(&self) -> &Context { - self.context + &self.context } pub fn context_mut(&mut self) -> &mut Context { - self.context + &mut self.context } - pub fn register_local_helper(&mut self, - name: &str, - def: Box) - -> Option>> { + pub fn register_local_helper( + &mut self, + name: &str, + def: Box, + ) -> Option>> { self.local_helpers.insert(name.to_string(), Rc::new(def)) } @@ -259,20 +226,34 @@ impl<'a> RenderContext<'a> { pub fn get_local_helper(&self, name: &str) -> Option>> { self.local_helpers.get(name).map(|r| r.clone()) } + + pub fn evaluate(&self, path: &str) -> Result<&Json, RenderError> { + self.context.navigate( + self.get_path(), + self.get_local_path_root(), + path, + ) + } + + pub fn evaluate_absolute(&self, path: &str) -> Result<&Json, RenderError> { + self.context.navigate(".", &VecDeque::new(), path) + } } impl<'a> fmt::Debug for RenderContext<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, - "partials: {:?}, path: {:?}, local_variables: {:?}, current_template: {:?}, \ + write!( + f, + "partials: {:?}, path: {:?}, local_variables: {:?}, current_template: {:?}, \ root_template: {:?}, disable_escape: {:?}, local_path_root: {:?}", - self.partials, - self.path, - self.local_variables, - self.current_template, - self.root_template, - self.disable_escape, - self.local_path_root) + self.partials, + self.path, + self.local_variables, + self.current_template, + self.root_template, + self.disable_escape, + self.local_path_root + ) } } @@ -293,7 +274,9 @@ impl ContextJson { /// Return root level of this path if any pub fn path_root(&self) -> Option<&str> { - self.path.as_ref().and_then(|p| p.split(|c| c == '.' || c == '/').nth(0)) + self.path.as_ref().and_then(|p| { + p.split(|c| c == '.' || c == '/').nth(0) + }) } /// Returns the value @@ -314,10 +297,11 @@ pub struct Helper<'a> { } impl<'a, 'b> Helper<'a> { - fn from_template(ht: &'a HelperTemplate, - registry: &Registry, - rc: &'b mut RenderContext) - -> Result, RenderError> { + fn from_template( + ht: &'a HelperTemplate, + registry: &Registry, + rc: &'b mut RenderContext, + ) -> Result, RenderError> { let mut evaluated_params = Vec::new(); for p in ht.params.iter() { let r = try!(p.expand(registry, rc)); @@ -331,14 +315,14 @@ impl<'a, 'b> Helper<'a> { } Ok(Helper { - name: &ht.name, - params: evaluated_params, - hash: evaluated_hash, - block_param: &ht.block_param, - template: &ht.template, - inverse: &ht.inverse, - block: ht.block, - }) + name: &ht.name, + params: evaluated_params, + hash: evaluated_hash, + block_param: &ht.block_param, + template: &ht.template, + inverse: &ht.inverse, + block: ht.block, + }) } /// Returns helper name @@ -428,7 +412,8 @@ impl<'a, 'b> Helper<'a> { /// Return block param pair (for example |key, val|) if any pub fn block_param_pair(&self) -> Option<(&str, &str)> { if let Some(BlockParam::Pair((Parameter::Name(ref s1), Parameter::Name(ref s2)))) = - *self.block_param { + *self.block_param + { Some((s1, s2)) } else { None @@ -445,10 +430,11 @@ pub struct Directive<'a> { } impl<'a, 'b> Directive<'a> { - fn from_template(dt: &'a DirectiveTemplate, - registry: &Registry, - rc: &'b mut RenderContext) - -> Result, RenderError> { + fn from_template( + dt: &'a DirectiveTemplate, + registry: &Registry, + rc: &'b mut RenderContext, + ) -> Result, RenderError> { let name = try!(dt.name.expand_as_name(registry, rc)); let mut evaluated_params = Vec::new(); @@ -464,11 +450,11 @@ impl<'a, 'b> Directive<'a> { } Ok(Directive { - name: name, - params: evaluated_params, - hash: evaluated_hash, - template: &dt.template, - }) + name: name, + params: evaluated_params, + hash: evaluated_hash, + template: &dt.template, + }) } /// Returns helper name @@ -528,10 +514,11 @@ pub trait Evaluable { impl Parameter { - pub fn expand_as_name(&self, - registry: &Registry, - rc: &mut RenderContext) - -> Result { + pub fn expand_as_name( + &self, + registry: &Registry, + rc: &mut RenderContext, + ) -> Result { match self { &Parameter::Name(ref name) => Ok(name.to_owned()), &Parameter::Subexpression(ref t) => { @@ -551,38 +538,44 @@ impl Parameter { } } - pub fn expand(&self, - registry: &Registry, - rc: &mut RenderContext) - -> Result { + pub fn expand( + &self, + registry: &Registry, + rc: &mut RenderContext, + ) -> Result { match self { &Parameter::Name(ref name) => { - Ok(rc.get_local_var(&name).map_or_else(|| { - ContextJson { - path: Some(name.to_owned()), - value: rc.evaluate_in_block_context(name).map_or_else(|| {rc.context().navigate(rc.get_path(), rc.get_local_path_root(), name).clone()}, |v| v.clone()), - } - - }, - |v| { - ContextJson { - path: None, - value: v.clone(), - } - })) + let local_value = rc.get_local_var(&name); + if let Some(value) = local_value { + Ok(ContextJson { + path: Some(name.to_owned()), + value: value.clone(), + }) + } else { + let block_context_value = rc.evaluate_in_block_context(name)?; + let value = if block_context_value.is_none() { + rc.evaluate(name)? + } else { + block_context_value.unwrap() + }; + Ok(ContextJson { + path: Some(name.to_owned()), + value: value.clone(), + }) + } } &Parameter::Literal(ref j) => { Ok(ContextJson { - path: None, - value: j.clone(), - }) + path: None, + value: j.clone(), + }) } &Parameter::Subexpression(_) => { let text_value = try!(self.expand_as_name(registry, rc)); Ok(ContextJson { - path: None, - value: Json::String(text_value), - }) + path: None, + value: Json::String(text_value), + }) } } } @@ -645,7 +638,6 @@ impl Evaluable for Template { impl Renderable for TemplateElement { fn render(&self, registry: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> { - debug!("rendering {:?}, {:?}", self, rc); match *self { RawString(ref v) => { try!(rc.writer.write(v.clone().into_bytes().as_ref())); @@ -675,22 +667,26 @@ impl Renderable for TemplateElement { if let Some(ref d) = rc.get_local_helper(&ht.name) { d.call(&helper, registry, rc) } else { - registry.get_helper(&ht.name) + registry + .get_helper(&ht.name) .or(registry.get_helper(if ht.block { - "blockHelperMissing" - } else { - "helperMissing" - })) - .ok_or(RenderError::new(format!("Helper not defined: {:?}", ht.name))) + "blockHelperMissing" + } else { + "helperMissing" + })) + .ok_or(RenderError::new( + format!("Helper not defined: {:?}", ht.name), + )) .and_then(|d| d.call(&helper, registry, rc)) } } DirectiveExpression(_) | DirectiveBlock(_) => self.eval(registry, rc), - #[cfg(not(feature="partial_legacy"))] - PartialExpression(ref dt) | PartialBlock(ref dt) => { - Directive::from_template(dt, registry, rc) - .and_then(|di| partial::expand_partial(&di, registry, rc)) + PartialExpression(ref dt) | + PartialBlock(ref dt) => { + Directive::from_template(dt, registry, rc).and_then(|di| { + partial::expand_partial(&di, registry, rc) + }) } _ => Ok(()), } @@ -706,7 +702,9 @@ impl Evaluable for TemplateElement { match registry.get_decorator(&di.name) { Some(d) => (**d).call(&di, registry, rc), None => { - Err(RenderError::new(format!("Directive not defined: {:?}", dt.name))) + Err(RenderError::new( + format!("Directive not defined: {:?}", dt.name), + )) } } }) @@ -720,10 +718,10 @@ impl Evaluable for TemplateElement { fn test_raw_string() { let r = Registry::new(); let mut sw = StringWriter::new(); - let mut ctx = Context::null(); + let ctx = Context::null(); let mut hlps = HashMap::new(); { - let mut rc = RenderContext::new(&mut ctx, &mut hlps, &mut sw); + let mut rc = RenderContext::new(ctx, &mut hlps, &mut sw); let raw_string = RawString("

hello world

".to_string()); raw_string.render(&r, &mut rc).ok().unwrap(); @@ -739,10 +737,10 @@ fn test_expression() { let mut m: HashMap = HashMap::new(); let value = "

".to_string(); m.insert("hello".to_string(), value); - let mut ctx = Context::wraps(&m); + let ctx = Context::wraps(&m).unwrap(); { - let mut rc = RenderContext::new(&mut ctx, &mut hlps, &mut sw); + let mut rc = RenderContext::new(ctx, &mut hlps, &mut sw); let element = Expression(Parameter::Name("hello".into())); element.render(&r, &mut rc).ok().unwrap(); @@ -759,10 +757,10 @@ fn test_html_expression() { let mut m: HashMap = HashMap::new(); let value = "world"; m.insert("hello".to_string(), value.to_string()); - let mut ctx = Context::wraps(&m); + let ctx = Context::wraps(&m).unwrap(); { - let mut rc = RenderContext::new(&mut ctx, &mut hlps, &mut sw); + let mut rc = RenderContext::new(ctx, &mut hlps, &mut sw); let element = HTMLExpression(Parameter::Name("hello".into())); element.render(&r, &mut rc).ok().unwrap(); } @@ -778,12 +776,12 @@ fn test_template() { let mut m: HashMap = HashMap::new(); let value = "world".to_string(); m.insert("hello".to_string(), value); - let mut ctx = Context::wraps(&m); + let ctx = Context::wraps(&m).unwrap(); { - let mut rc = RenderContext::new(&mut ctx, &mut hlps, &mut sw); + let mut rc = RenderContext::new(ctx, &mut hlps, &mut sw); let mut elements: Vec = Vec::new(); let e1 = RawString("

".to_string()); @@ -813,22 +811,28 @@ fn test_template() { fn test_render_context_promotion_and_demotion() { use context::to_json; let mut sw = StringWriter::new(); - let mut ctx = Context::null(); + let ctx = Context::null(); let mut hlps = HashMap::new(); - let mut render_context = RenderContext::new(&mut ctx, &mut hlps, &mut sw); + let mut render_context = RenderContext::new(ctx, &mut hlps, &mut sw); render_context.set_local_var("@index".to_string(), to_json(&0)); render_context.promote_local_vars(); - assert_eq!(render_context.get_local_var(&"@../index".to_string()).unwrap(), - &to_json(&0)); + assert_eq!( + render_context + .get_local_var(&"@../index".to_string()) + .unwrap(), + &to_json(&0) + ); render_context.demote_local_vars(); - assert_eq!(render_context.get_local_var(&"@index".to_string()).unwrap(), - &to_json(&0)); + assert_eq!( + render_context.get_local_var(&"@index".to_string()).unwrap(), + &to_json(&0) + ); } #[test] @@ -842,9 +846,12 @@ fn test_render_subexpression() { m.insert("const".to_string(), "truthy".to_string()); { - if let Err(e) = r.template_renderw("

{{#if (const)}}{{(hello)}}{{/if}}

", - &m, - &mut sw) { + if let Err(e) = r.template_renderw( + "

{{#if (const)}}{{(hello)}}{{/if}}

", + &m, + &mut sw, + ) + { panic!("{}", e); } } @@ -855,22 +862,22 @@ fn test_render_subexpression() { #[test] fn test_render_subexpression_issue_115() { let mut r = Registry::new(); - r.register_helper("format", - Box::new(|h: &Helper, - _: &Registry, - rc: &mut RenderContext| - -> Result<(), RenderError> { - rc.writer - .write(format!("{}", - h.param(0) - .unwrap() - .value() - .render()) - .into_bytes() - .as_ref()) - .map(|_| ()) - .map_err(RenderError::from) - })); + r.register_helper( + "format", + Box::new(|h: &Helper, + _: &Registry, + rc: &mut RenderContext| + -> Result<(), RenderError> { + rc.writer + .write( + format!("{}", h.param(0).unwrap().value().render()) + .into_bytes() + .as_ref(), + ) + .map(|_| ()) + .map_err(RenderError::from) + }), + ); let mut sw = StringWriter::new(); let mut m: HashMap = HashMap::new(); @@ -891,8 +898,10 @@ fn test_render_error_line_no() { let m: HashMap = HashMap::new(); let name = "invalid_template"; - assert!(r.register_template_string(name, "

\n{{#if true}}\n {{#each}}{{/each}}\n{{/if}}") - .is_ok()); + assert!( + r.register_template_string(name, "

\n{{#if true}}\n {{#each}}{{/each}}\n{{/if}}") + .is_ok() + ); if let Err(e) = r.render(name, &m) { assert_eq!(e.line_no.unwrap(), 3); @@ -904,12 +913,19 @@ fn test_render_error_line_no() { } #[test] -#[cfg(not(feature="partial_legacy"))] fn test_partial_failback_render() { let mut r = Registry::new(); - assert!(r.register_template_string("parent", "{{> layout}}").is_ok()); - assert!(r.register_template_string("child", "{{#*inline \"layout\"}}content{{/inline}}{{#> parent}}{{> seg}}{{/parent}}").is_ok()); + assert!( + r.register_template_string("parent", "{{> layout}}") + .is_ok() + ); + assert!( + r.register_template_string( + "child", + "{{#*inline \"layout\"}}content{{/inline}}{{#> parent}}{{> seg}}{{/parent}}", + ).is_ok() + ); assert!(r.register_template_string("seg", "1234").is_ok()); let r = r.render("child", &true).expect("should work"); @@ -920,11 +936,17 @@ fn test_partial_failback_render() { fn test_key_with_slash() { let mut r = Registry::new(); - assert!(r.register_template_string("t", "{{#each .}}{{@key}}: {{this}}\n{{/each}}").is_ok()); + assert!( + r.register_template_string("t", "{{#each .}}{{@key}}: {{this}}\n{{/each}}") + .is_ok() + ); - let r = r.render("t", &json!({ + let r = r.render( + "t", + &json!({ "/foo": "bar" - })).expect("should work"); + }), + ).expect("should work"); assert_eq!(r, "/foo: bar\n"); } diff --git a/src/vendor/handlebars/src/template.rs b/src/vendor/handlebars/src/template.rs index 4cd1da9742..385cf9c39f 100644 --- a/src/vendor/handlebars/src/template.rs +++ b/src/vendor/handlebars/src/template.rs @@ -116,7 +116,9 @@ impl Parameter { pub fn parse(s: &str) -> Result { let mut parser = Rdp::new(StringInput::new(s)); if !parser.parameter() { - return Err(TemplateError::of(TemplateErrorReason::InvalidParam(s.to_owned()))); + return Err(TemplateError::of( + TemplateErrorReason::InvalidParam(s.to_owned()), + )); } let mut it = parser.queue().iter().peekable(); @@ -133,6 +135,10 @@ impl Template { } } + fn unescape_tags(txt: &str) -> String { + txt.replace(r"\\{{", "{{") + } + fn push_element(&mut self, e: TemplateElement, line: usize, col: usize) { self.elements.push(e); if let Some(ref mut maps) = self.mapping { @@ -145,17 +151,18 @@ impl Template { } #[inline] - fn parse_subexpression<'a>(source: &'a str, - it: &mut Peekable>>, - limit: usize) - -> Result { + fn parse_subexpression<'a>( + source: &'a str, + it: &mut Peekable>>, + limit: usize, + ) -> Result { let espec = try!(Template::parse_expression(source, it.by_ref(), limit)); if let Parameter::Name(name) = espec.name { Ok(Parameter::Subexpression(Subexpression { - name: name, - params: espec.params, - hash: espec.hash, - })) + name: name, + params: espec.params, + hash: espec.hash, + })) } else { // line/col no Err(TemplateError::of(TemplateErrorReason::NestedSubexpression)) @@ -163,16 +170,19 @@ impl Template { } #[inline] - fn parse_name<'a>(source: &'a str, - it: &mut Peekable>>, - _: usize) - -> Result { + fn parse_name<'a>( + source: &'a str, + it: &mut Peekable>>, + _: usize, + ) -> Result { let name_node = it.next().unwrap(); match name_node.rule { Rule::identifier | Rule::reference | Rule::invert_tag_item => { - Ok(Parameter::Name(source[name_node.start..name_node.end].to_owned())) + Ok(Parameter::Name( + source[name_node.start..name_node.end].to_owned(), + )) } Rule::subexpression => { Template::parse_subexpression(source, it.by_ref(), name_node.end) @@ -182,10 +192,11 @@ impl Template { } #[inline] - fn parse_param<'a>(source: &'a str, - it: &mut Peekable>>, - _: usize) - -> Result { + fn parse_param<'a>( + source: &'a str, + it: &mut Peekable>>, + _: usize, + ) -> Result { let mut param = it.next().unwrap(); if param.rule == Rule::param { param = it.next().unwrap(); @@ -201,7 +212,11 @@ impl Template { } } Rule::subexpression => { - try!(Template::parse_subexpression(source, it.by_ref(), param.end)) + try!(Template::parse_subexpression( + source, + it.by_ref(), + param.end, + )) } _ => unreachable!(), }; @@ -222,10 +237,11 @@ impl Template { } #[inline] - fn parse_hash<'a>(source: &'a str, - it: &mut Peekable>>, - limit: usize) - -> Result<(String, Parameter), TemplateError> { + fn parse_hash<'a>( + source: &'a str, + it: &mut Peekable>>, + limit: usize, + ) -> Result<(String, Parameter), TemplateError> { let name = it.next().unwrap(); // identifier let key = source[name.start..name.end].to_owned(); @@ -235,33 +251,37 @@ impl Template { } #[inline] - fn parse_block_param<'a>(source: &'a str, - it: &mut Peekable>>, - limit: usize) - -> Result { + fn parse_block_param<'a>( + source: &'a str, + it: &mut Peekable>>, + limit: usize, + ) -> Result { let p1_name = it.next().unwrap(); // identifier let p1 = source[p1_name.start..p1_name.end].to_owned(); let p2 = it.peek().and_then(|p2_name| if p2_name.end <= limit { - Some(source[p2_name.start..p2_name.end].to_owned()) - } else { - None - }); + Some(source[p2_name.start..p2_name.end].to_owned()) + } else { + None + }); if p2.is_some() { it.next(); - Ok(BlockParam::Pair((Parameter::Name(p1), Parameter::Name(p2.unwrap())))) + Ok(BlockParam::Pair( + (Parameter::Name(p1), Parameter::Name(p2.unwrap())), + )) } else { Ok(BlockParam::Single(Parameter::Name(p1))) } } #[inline] - fn parse_expression<'a>(source: &'a str, - it: &mut Peekable>>, - limit: usize) - -> Result { + fn parse_expression<'a>( + source: &'a str, + it: &mut Peekable>>, + limit: usize, + ) -> Result { let mut params: Vec = Vec::new(); let mut hashes: BTreeMap = BTreeMap::new(); let mut omit_pre_ws = false; @@ -309,13 +329,13 @@ impl Template { } } Ok(ExpressionSpec { - name: name, - params: params, - hash: hashes, - block_param: block_param, - omit_pre_ws: omit_pre_ws, - omit_pro_ws: omit_pro_ws, - }) + name: name, + params: params, + hash: hashes, + block_param: block_param, + omit_pre_ws: omit_pre_ws, + omit_pro_ws: omit_pro_ws, + }) } #[inline] @@ -344,7 +364,10 @@ impl Template { if !parser.handlebars() { let (_, pos) = parser.expected(); let (line_no, col_no) = parser.input().line_col(pos); - return Err(TemplateError::of(TemplateErrorReason::InvalidSyntax).at(line_no, col_no)); + return Err(TemplateError::of(TemplateErrorReason::InvalidSyntax).at( + line_no, + col_no, + )); } let mut it = parser.queue().iter().peekable(); @@ -354,17 +377,26 @@ impl Template { if token.rule != Rule::template { if token.start != prev_end && !omit_pro_ws && token.rule != Rule::raw_text && - token.rule != Rule::raw_block_text { + token.rule != Rule::raw_block_text + { let (line_no, col_no) = parser.input().line_col(prev_end); if token.rule == Rule::raw_block_end { let text = &source[prev_end..token.start]; let mut t = Template::new(mapping); - t.push_element(RawString(text.to_owned()), line_no, col_no); + t.push_element( + RawString(Template::unescape_tags(text)), + line_no, + col_no, + ); template_stack.push_front(t); } else { let text = &source[prev_end..token.start]; let mut t = template_stack.front_mut().unwrap(); - t.push_element(RawString(text.to_owned()), line_no, col_no); + t.push_element( + RawString(Template::unescape_tags(text)), + line_no, + col_no, + ); } } } @@ -380,7 +412,7 @@ impl Template { text = text.trim_left(); } let mut t = template_stack.front_mut().unwrap(); - t.push_element(RawString(text.to_owned()), line_no, col_no); + t.push_element(RawString(Template::unescape_tags(text)), line_no, col_no); } Rule::helper_block_start | Rule::raw_block_start | @@ -445,7 +477,7 @@ impl Template { text = text.trim_left(); } let mut t = Template::new(mapping); - t.push_element(RawString(text.to_owned()), line_no, col_no); + t.push_element(RawString(Template::unescape_tags(text)), line_no, col_no); template_stack.push_front(t); } Rule::expression | @@ -519,9 +551,14 @@ impl Template { let t = template_stack.front_mut().unwrap(); t.elements.push(HelperBlock(h)); } else { - return Err(TemplateError::of( - TemplateErrorReason::MismatchingClosedHelper( - h.name, close_tag_name)).at(line_no, col_no)); + return Err( + TemplateError::of( + TemplateErrorReason::MismatchingClosedHelper( + h.name, + close_tag_name, + ), + ).at(line_no, col_no), + ); } } Rule::directive_block_end | @@ -538,9 +575,14 @@ impl Template { t.elements.push(PartialBlock(d)); } } else { - return Err(TemplateError::of( - TemplateErrorReason::MismatchingClosedDirective( - d.name, close_tag_name)).at(line_no, col_no)); + return Err( + TemplateError::of( + TemplateErrorReason::MismatchingClosedDirective( + d.name, + close_tag_name, + ), + ).at(line_no, col_no), + ); } } _ => unreachable!(), @@ -569,10 +611,11 @@ impl Template { } } - pub fn compile_with_name>(source: S, - name: String, - mapping: bool) - -> Result { + pub fn compile_with_name>( + source: S, + name: String, + mapping: bool, + ) -> Result { match Template::compile2(source, mapping) { Ok(mut t) => { t.name = Some(name); @@ -597,6 +640,28 @@ pub enum TemplateElement { Comment(String), } +#[test] +fn test_parse_escaped_tag_raw_string() { + let source = r"foo \\{{bar}}"; + let t = Template::compile(source.to_string()).ok().unwrap(); + assert_eq!(t.elements.len(), 1); + assert_eq!( + *t.elements.get(0).unwrap(), + RawString("foo {{bar}}".to_string()) + ); +} + +#[test] +fn test_parse_escaped_block_raw_string() { + let source = r"\\{{{{foo}}}} bar"; + let t = Template::compile(source.to_string()).ok().unwrap(); + assert_eq!(t.elements.len(), 1); + assert_eq!( + *t.elements.get(0).unwrap(), + RawString("{{{{foo}}}} bar".to_string()) + ); +} + #[test] fn test_parse_template() { let source = "

{{title}} 你好

{{{content}}} @@ -607,22 +672,21 @@ fn test_parse_template() { assert_eq!(t.elements.len(), 10); assert_eq!(*t.elements.get(0).unwrap(), RawString("

".to_string())); - assert_eq!(*t.elements.get(1).unwrap(), - Expression(Parameter::Name("title".to_string()))); + assert_eq!( + *t.elements.get(1).unwrap(), + Expression(Parameter::Name("title".to_string())) + ); - assert_eq!(*t.elements.get(3).unwrap(), - HTMLExpression(Parameter::Name("content".to_string()))); + assert_eq!( + *t.elements.get(3).unwrap(), + HTMLExpression(Parameter::Name("content".to_string())) + ); match *t.elements.get(5).unwrap() { HelperBlock(ref h) => { assert_eq!(h.name, "if".to_string()); assert_eq!(h.params.len(), 1); - assert_eq!(h.template - .as_ref() - .unwrap() - .elements - .len(), - 1); + assert_eq!(h.template.as_ref().unwrap().elements.len(), 1); } _ => { panic!("Helper expected here."); @@ -644,12 +708,7 @@ fn test_parse_template() { HelperBlock(ref h) => { assert_eq!(h.name, "unless".to_string()); assert_eq!(h.params.len(), 1); - assert_eq!(h.inverse - .as_ref() - .unwrap() - .elements - .len(), - 1); + assert_eq!(h.inverse.as_ref().unwrap().elements.len(), 1); } _ => { panic!("Helper expression here"); @@ -664,8 +723,10 @@ fn test_parse_error() { let t = Template::compile(source.to_string()); - assert_eq!(t.unwrap_err(), - TemplateError::of(TemplateErrorReason::InvalidSyntax).at(4, 5)); + assert_eq!( + t.unwrap_err(), + TemplateError::of(TemplateErrorReason::InvalidSyntax).at(4, 5) + ); } #[test] @@ -750,22 +811,20 @@ fn test_white_space_omitter() { assert_eq!(t.elements[1], Expression(Parameter::Name("world".into()))); assert_eq!(t.elements[2], RawString("!".to_string())); - let t2 = Template::compile("{{#if true}}1 {{~ else ~}} 2 {{~/if}}".to_string()).ok().unwrap(); + let t2 = Template::compile("{{#if true}}1 {{~ else ~}} 2 {{~/if}}".to_string()) + .ok() + .unwrap(); assert_eq!(t2.elements.len(), 1); match t2.elements[0] { HelperBlock(ref h) => { - assert_eq!(h.template - .as_ref() - .unwrap() - .elements - [0], - RawString("1".to_string())); - assert_eq!(h.inverse - .as_ref() - .unwrap() - .elements - [0], - RawString("2".to_string())); + assert_eq!( + h.template.as_ref().unwrap().elements[0], + RawString("1".to_string()) + ); + assert_eq!( + h.inverse.as_ref().unwrap().elements[0], + RawString("2".to_string()) + ); } _ => unreachable!(), } @@ -802,8 +861,10 @@ fn test_raw_helper() { assert_eq!(h.name, "raw".to_owned()); if let Some(ref ht) = h.template { assert_eq!(ht.elements.len(), 1); - assert_eq!(*ht.elements.get(0).unwrap(), - RawString("good{{night}}".to_owned())); + assert_eq!( + *ht.elements.get(0).unwrap(), + RawString("good{{night}}".to_owned()) + ); } else { panic!("helper template not found"); } @@ -827,8 +888,10 @@ fn test_literal_parameter_parser() { Ok(t) => { if let HelperExpression(ref ht) = t.elements[0] { assert_eq!(ht.params[0], Parameter::Literal(Json::U64(1))); - assert_eq!(ht.hash["name"], - Parameter::Literal(Json::String("value".to_owned()))); + assert_eq!( + ht.hash["name"], + Parameter::Literal(Json::String("value".to_owned())) + ); assert_eq!(ht.hash["valid"], Parameter::Literal(Json::Boolean(false))); assert_eq!(ht.hash["ref"], Parameter::Name("someref".to_owned())); } @@ -844,8 +907,10 @@ fn test_literal_parameter_parser() { Ok(t) => { if let HelperExpression(ref ht) = t.elements[0] { assert_eq!(ht.params[0], Parameter::Literal(Json::U64(1))); - assert_eq!(ht.hash["name"], - Parameter::Literal(Json::String("value".to_owned()))); + assert_eq!( + ht.hash["name"], + Parameter::Literal(Json::String("value".to_owned())) + ); assert_eq!(ht.hash["valid"], Parameter::Literal(Json::Bool(false))); assert_eq!(ht.hash["ref"], Parameter::Name("someref".to_owned())); } @@ -873,13 +938,11 @@ fn test_template_mapping() { #[test] fn test_whitespace_elements() { - let c = Template::compile(" {{elem}}\n\t{{#if true}} \ - {{/if}}\n{{{{raw}}}} {{{{/raw}}}}\n{{{{raw}}}}{{{{/raw}}}}\n"); - assert_eq!(c.ok() - .unwrap() - .elements - .len(), - 9); + let c = Template::compile( + " {{elem}}\n\t{{#if true}} \ + {{/if}}\n{{{{raw}}}} {{{{/raw}}}}\n{{{{raw}}}}{{{{/raw}}}}\n", + ); + assert_eq!(c.ok().unwrap().elements.len(), 9); } #[test] @@ -903,7 +966,8 @@ fn test_block_param() { Ok(t) => { if let HelperBlock(ref ht) = t.elements[0] { if let Some(BlockParam::Pair((Parameter::Name(ref n1), - Parameter::Name(ref n2)))) = ht.block_param { + Parameter::Name(ref n2)))) = ht.block_param + { assert_eq!(n1, "key"); assert_eq!(n2, "val"); } else { @@ -918,7 +982,6 @@ fn test_block_param() { } #[test] -#[cfg(not(feature="partial_legacy"))] fn test_directive() { match Template::compile("hello {{* ssh}} world") { Err(e) => panic!("{}", e), @@ -940,36 +1003,40 @@ fn test_directive() { } } - match Template::compile("{{#*inline \"hello\"}}expand to hello{{/inline}}{{> hello}}") { + match Template::compile( + "{{#*inline \"hello\"}}expand to hello{{/inline}}{{> hello}}", + ) { Err(e) => panic!("{}", e), Ok(t) => { if let DirectiveBlock(ref db) = t.elements[0] { assert_eq!(db.name, Parameter::Name("inline".to_owned())); - assert_eq!(db.params[0], - Parameter::Literal(Json::String("hello".to_owned()))); - assert_eq!(db.template - .as_ref() - .unwrap() - .elements - [0], - TemplateElement::RawString("expand to hello".to_owned())); + assert_eq!( + db.params[0], + Parameter::Literal(Json::String("hello".to_owned())) + ); + assert_eq!( + db.template.as_ref().unwrap().elements[0], + TemplateElement::RawString("expand to hello".to_owned()) + ); } } } - match Template::compile("{{#> layout \"hello\"}}expand to hello{{/layout}}{{> hello}}") { + match Template::compile( + "{{#> layout \"hello\"}}expand to hello{{/layout}}{{> hello}}", + ) { Err(e) => panic!("{}", e), Ok(t) => { if let PartialBlock(ref db) = t.elements[0] { assert_eq!(db.name, Parameter::Name("layout".to_owned())); - assert_eq!(db.params[0], - Parameter::Literal(Json::String("hello".to_owned()))); - assert_eq!(db.template - .as_ref() - .unwrap() - .elements - [0], - TemplateElement::RawString("expand to hello".to_owned())); + assert_eq!( + db.params[0], + Parameter::Literal(Json::String("hello".to_owned())) + ); + assert_eq!( + db.template.as_ref().unwrap().elements[0], + TemplateElement::RawString("expand to hello".to_owned()) + ); } } } diff --git a/src/vendor/html-diff/.cargo-checksum.json b/src/vendor/html-diff/.cargo-checksum.json new file mode 100644 index 0000000000..ff2c61b699 --- /dev/null +++ b/src/vendor/html-diff/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".travis.yml":"9888d016778d0f68a79eb90f1d0af6a966562e4ee3135cd47b8dac2ef5dab5cf","Cargo.toml":"eb7d72869e90f446010f2f350323c97ddd6d37eb8a4a3a8222eb42bf97102e24","Cargo.toml.orig":"184b4dcbf701943c92aabb1907afedb2914ab9d3e7fe758d08b5f64a9434cca3","LICENSE":"d64fe3199be0c90d1f88b363e6b567d5812f64c01accc8957e71381598a3d670","README.md":"b157f868aaad98a7d0616fa5c46c7e0cf69514b2493949b7a15da98dc156bff1","src/lib.rs":"182d2f851904424247325b7eedd507795036a1673684120581a2e99fb06e4508","src/main.rs":"538328e195cb6725abf01c82b15017fe863ee9c039559e935ef2ef40cdec06f1","test_files/basic.html":"18a3a4a909bf50032a6ab2d315a8e1af0c65378dc808923586c7bc6ba55a64b8","test_files/basic.stdout":"0ddc11528b1c8e901313b99e9111210c8e45c8ca8d2cb7ea2fdf723f05703d07","test_files/basic_compare.html":"cc97d35ed1f086121b36717374cffe3b34133402849d12c7ac7540aef84a36f1","tests/test_files.rs":"c5fe66be9fc553fa6d5d3bc946a9d05e60372a30e97b8afea65b303730c24645"},"package":"5298d63081a642508fce965740ddb03a386c5d81bf1fef0579a815cf49cb8c68"} \ No newline at end of file diff --git a/src/vendor/html-diff/.cargo-ok b/src/vendor/html-diff/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/html-diff/.travis.yml b/src/vendor/html-diff/.travis.yml new file mode 100644 index 0000000000..fe46cd2df6 --- /dev/null +++ b/src/vendor/html-diff/.travis.yml @@ -0,0 +1,19 @@ +language: rust +matrix: + include: + - os: linux + rust: stable + dist: trusty + - os: linux + rust: nightly + dist: trusty + - os: osx + rust: stable + - os: osx + rust: nightly +script: + - rustc --version + - cargo install clippy || touch clippy_failed + - RUST_BACKTRACE=1 cargo build + - if [ ! -f clippy_failed ]; then cargo clippy; fi + - RUST_BACKTRACE=1 cargo test diff --git a/src/vendor/html-diff/Cargo.toml b/src/vendor/html-diff/Cargo.toml new file mode 100644 index 0000000000..557dd4de8e --- /dev/null +++ b/src/vendor/html-diff/Cargo.toml @@ -0,0 +1,25 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "html-diff" +version = "0.0.4" +authors = ["Guillaume Gomez "] +description = "Library detect HTML diffs" +categories = ["html", "diff"] +license = "MIT" +repository = "https://github.com/GuillaumeGomez/html-diff-rs" + +[[bin]] +name = "html_diff" +[dependencies.kuchiki] +version = "0.5.1" diff --git a/src/vendor/html-diff/Cargo.toml.orig b/src/vendor/html-diff/Cargo.toml.orig new file mode 100644 index 0000000000..d12af69bb4 --- /dev/null +++ b/src/vendor/html-diff/Cargo.toml.orig @@ -0,0 +1,16 @@ +[package] +name = "html-diff" +version = "0.0.4" +authors = ["Guillaume Gomez "] + +description = "Library detect HTML diffs" +repository = "https://github.com/GuillaumeGomez/html-diff-rs" +license = "MIT" + +categories = ["html", "diff"] + +[dependencies] +kuchiki = "0.5.1" + +[[bin]] +name = "html_diff" diff --git a/src/vendor/html-diff/LICENSE b/src/vendor/html-diff/LICENSE new file mode 100644 index 0000000000..8bd1bcc08c --- /dev/null +++ b/src/vendor/html-diff/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Guillaume Gomez + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/vendor/html-diff/README.md b/src/vendor/html-diff/README.md new file mode 100644 index 0000000000..22703cd956 --- /dev/null +++ b/src/vendor/html-diff/README.md @@ -0,0 +1,11 @@ +# html-diff-rs + +This crate is aim to get differences between two HTML contents. + +## Run it + +To run it, just do: + +```bash +> cargo run -- file1.html file2.html +``` diff --git a/src/vendor/html-diff/src/lib.rs b/src/vendor/html-diff/src/lib.rs new file mode 100644 index 0000000000..e5ef71dde4 --- /dev/null +++ b/src/vendor/html-diff/src/lib.rs @@ -0,0 +1,442 @@ +extern crate kuchiki; + +use kuchiki::traits::*; +use kuchiki::{ElementData, NodeDataRef, NodeRef}; + +use std::collections::HashMap; + +#[derive(Debug, Clone, PartialEq)] +pub struct ElementInformation { + pub element_name: String, + pub element_content: String, + pub path: String, +} + +impl ElementInformation { + fn new(content: &T, path: &[String]) -> ElementInformation { + ElementInformation { + element_name: content.name(), + element_content: content.output(), + path: path.join("/"), + } + } + + fn from_path(path: &[String]) -> ElementInformation { + ElementInformation { + element_name: String::new(), + element_content: String::new(), + path: path.join("/"), + } + } +} + +trait ToOutput { + fn output(&self) -> String; + fn name(&self) -> String; +} + +impl ToOutput for NodeDataRef { + fn output(&self) -> String { + format!("<{0} {1}>{2}", + (*self).name.local, + (*(*self).attributes.borrow()).map + .iter() + .map(|(k, v)| format!("\"{}\"=\"{}\"", k.local, v)) + .collect::>() + .join(" "), + self.text_contents()) + } + + fn name(&self) -> String { + format!("{}", (*self).name.local) + } +} + +impl ToOutput for NodeRef { + fn output(&self) -> String { + self.to_string() + } + + fn name(&self) -> String { + if let Some(s) = self.to_string().split(' ').next().unwrap().split('>').next() { + if s.is_empty() { + String::new() + } else { + s[1..].to_owned() + } + } else { + String::new() + } + } +} + +/// Contains the kind of difference and some information. +#[derive(Debug, Clone)] +pub enum Difference { + /// Different node types at the same place (text vs data for example). + NodeType { + elem: ElementInformation, + opposite_elem: ElementInformation, + }, + /// Different node names (`div` vs `ul` for example). + NodeName { + elem: ElementInformation, + opposite_elem: ElementInformation, + }, + /// Different attributes for two nodes. + NodeAttributes { + elem: ElementInformation, + elem_attributes: HashMap, + opposite_elem: ElementInformation, + opposite_elem_attributes: HashMap, + }, + /// Different text content for two nodes. + NodeText { + elem: ElementInformation, + elem_text: String, + opposite_elem: ElementInformation, + opposite_elem_text: String, + }, + /// If an element isn't present in one of the two sides. + NotPresent { + elem: Option, + opposite_elem: Option, + }, +} + +impl Difference { + pub fn is_node_type(&self) -> bool { + match *self { + Difference::NodeType { .. } => true, + _ => false, + } + } + + pub fn is_node_name(&self) -> bool { + match *self { + Difference::NodeName { .. } => true, + _ => false, + } + } + + pub fn is_node_attributes(&self) -> bool { + match *self { + Difference::NodeAttributes { .. } => true, + _ => false, + } + } + + pub fn is_node_text(&self) -> bool { + match *self { + Difference::NodeText { .. } => true, + _ => false, + } + } + + pub fn is_not_present(&self) -> bool { + match *self { + Difference::NotPresent { .. } => true, + _ => false, + } + } +} + +impl ToString for Difference { + fn to_string(&self) -> String { + match *self { + Difference::NodeType { ref elem, ref opposite_elem } => { + format!("{} => [Types differ]: expected \"{}\", found \"{}\"", + elem.path, elem.element_name, opposite_elem.element_name) + } + Difference::NodeName { ref elem, ref opposite_elem } => { + format!("{} => [Tags differ]: expected \"{}\", found \"{}\"", + elem.path, elem.element_name, opposite_elem.element_name) + } + Difference::NodeAttributes { ref elem, + ref elem_attributes, + ref opposite_elem_attributes, + .. } => { + format!("{} => [Attributes differ in \"{}\"]: expected \"{:?}\", found \"{:?}\"", + elem.path, elem.element_name, elem_attributes, opposite_elem_attributes) + } + Difference::NodeText { ref elem, ref elem_text, ref opposite_elem_text, .. } => { + format!("{} => [Texts differ]: expected {:?}, found {:?}", + elem.path, elem_text, opposite_elem_text) + } + Difference::NotPresent { ref elem, ref opposite_elem } => { + if let Some(ref elem) = *elem { + format!("{} => [One element is missing]: expected {:?}", + elem.path, elem.element_name) + } else if let Some(ref elem) = *opposite_elem { + format!("{} => [Unexpected element \"{}\"]: found {:?}", + elem.path, elem.element_name, elem.element_content) + } else { + unreachable!() + } + } + } + } +} + +fn check_elements(elem1: &NodeDataRef, + elem2: &NodeDataRef, + path: &[String]) -> Option { + let e1: &ElementData = &*elem1; + let e2: &ElementData = &*elem2; + if e1.name != e2.name { + Some(Difference::NodeName { + elem: ElementInformation::new(elem1, path), + opposite_elem: ElementInformation::new(elem2, path), + }) + } else { + let all_attributes_match = + (*e1.attributes.borrow()).map.iter().any(|(k, v)| { + (*e2.attributes.borrow()).map.get(k) != Some(v) + }); + if all_attributes_match { + Some(Difference::NodeAttributes { + elem: ElementInformation::new(elem1, path), + elem_attributes: { + let map = &(*e1.attributes.borrow()).map; + let mut result = HashMap::with_capacity(map.len()); + for (k, v) in map { + result.insert(format!("{}", k.local), v.clone()); + } + result + }, + opposite_elem: ElementInformation::new(elem2, path), + opposite_elem_attributes: { + let map = &(*e2.attributes.borrow()).map; + let mut result = HashMap::with_capacity(map.len()); + for (k, v) in map { + result.insert(format!("{}", k.local), v.clone()); + } + result + }, + }) + } else { + None + } + } +} + +fn check_if_comment_or_empty_text(e: &NodeRef) -> bool { + e.as_comment().is_none() && + if let Some(t) = e.as_text() { + !t.borrow().trim().is_empty() + } else { + true + } +} + +fn go_through_tree(element1: &NodeRef, element2: &NodeRef, + path: &mut Vec) -> Vec { + let mut differences = Vec::new(); + let mut pos = 0; + let mut it1 = element1.children().filter(|e| check_if_comment_or_empty_text(e)); + let mut it2 = element2.children().filter(|e| check_if_comment_or_empty_text(e)); + loop { + let (element1, element2) = (it1.next(), it2.next()); + if let Some(diff) = match (&element1, &element2) { + (&Some(ref element1), &Some(ref element2)) => { + match (element1.clone().into_element_ref(), element2.clone().into_element_ref()) { + (Some(e1), Some(e2)) => check_elements(&e1, &e2, path), + (None, None) => { + match (element1.as_text(), element2.as_text()) { + (Some(t1), Some(t2)) => { + if t1 != t2 { + Some(Difference::NodeText { + elem: ElementInformation::from_path(path), + elem_text: t1.borrow().clone(), + opposite_elem: ElementInformation::from_path(path), + opposite_elem_text: t2.borrow().clone(), + }) + } else { + None + } + } + (None, None) => None, + _ => { + Some(Difference::NodeType { + elem: ElementInformation::new(element1, path), + opposite_elem: ElementInformation::new(element2, path), + }) + } + } + } + _ => { + Some(Difference::NodeType { + elem: ElementInformation::new(element1, path), + opposite_elem: ElementInformation::new(element2, path), + }) + } + } + } + (&Some(ref elem1), &None) => { + Some(Difference::NotPresent { + elem: Some(ElementInformation::new(elem1, path)), + opposite_elem: None, + }) + } + (&None, &Some(ref elem2)) => { + Some(Difference::NotPresent { + elem: None, + opposite_elem: Some(ElementInformation::new(elem2, path)), + }) + } + (&None, &None) => break, + } { + // need to add parent content + differences.push(diff); + continue + } + let need_pop = if let Some(ref elem) = element1 { + if let Some(elem) = elem.as_element() { + path.push(format!("{}[{}]", elem.name.local, pos)); + pos += 1; + true + } else { + false + } + } else { + false + }; + differences.extend_from_slice(&go_through_tree(&element1.unwrap(), + &element2.unwrap(), + path)); + if need_pop { + path.pop(); + } + } + differences +} + +/// Take two html content strings in output, returns a `Vec` containing the differences (if any). +pub fn get_differences(content1: &str, content2: &str) -> Vec { + go_through_tree(&kuchiki::parse_html().one(content1), &kuchiki::parse_html().one(content2), + &mut vec![String::new()]) +} + +#[test] +fn basic_diff() { + let original = "
"; + let other = "

"; + + let differences = get_differences(original, other); + assert_eq!(differences.len(), 1, "{:?}", differences); + assert_eq!(differences[0].is_node_name(), true, "{:?}", differences[0]); +} + +// Test if we stop correctly at first difference and don't go down. +#[test] +fn children_diff() { + let original = "

"; + let other = "

"; + + let differences = get_differences(original, other); + assert_eq!(differences.len(), 1, "{:?}", differences); + assert_eq!(differences[0].is_node_name(), true, "{:?}", differences[0]); +} + +#[test] +fn check_attributes_order() { + let original = "

"; + let other = "

"; + + let differences = get_differences(original, other); + assert_eq!(differences.len(), 0, "{:?}", differences); +} + +#[test] +fn check_attributes_missing() { + let original = "

"; + let other = "

"; + + let differences = get_differences(original, other); + assert_eq!(differences.len(), 1, "{:?}", differences); + assert_eq!(differences[0].is_node_attributes(), true, "{:?}", differences[0]); + match differences[0] { + Difference::NodeAttributes { ref elem_attributes, + ref opposite_elem_attributes, + .. } => { + let mut attributes = HashMap::new(); + attributes.insert("class".to_owned(), "foo".to_owned()); + assert_eq!(attributes, *opposite_elem_attributes, + "{:?}/{:?}", opposite_elem_attributes, attributes); + attributes.insert("id".to_owned(), "g".to_owned()); + assert_eq!(attributes, *elem_attributes, + "{:?}/{:?}", elem_attributes, attributes); + } + _ => unreachable!(), + } +} + +#[test] +fn check_child_below() { + let original = "
"; + let other = "
"; + + let differences = get_differences(original, other); + assert_eq!(differences.len(), 1, "{:?}", differences); + assert_eq!(differences[0].is_not_present(), true, "{:?}", differences[0]); +} + +#[test] +fn test_path() { + let original = "
"; + let other = "
"; + + let differences = get_differences(original, other); + assert_eq!(differences.len(), 1, "{:?}", differences); + assert_eq!(differences[0].is_not_present(), true, "{:?}", differences[0]); + match differences[0] { + Difference::NotPresent { ref elem, ref opposite_elem } => { + assert_eq!(elem.is_none(), true, "{:?}", elem); + assert_eq!(opposite_elem.is_some(), true, "{:?}", opposite_elem); + assert_eq!(*opposite_elem, + Some(ElementInformation { + element_name: "d".to_owned(), + element_content: "".to_owned(), + path: "/html[0]/body[1]/div[0]/b[2]/c[0]".to_owned(), + }), + "{:?}", opposite_elem); + } + _ => unreachable!(), + } + assert_eq!(differences[0].is_not_present(), true, "{:?}", differences[0]); +} + +#[test] +fn test_issue_6() { + let a = "\nchunky bacon\n\n\n\ + chunky bacon\n\n\nchunky bacon\ + \n\n\nchunky bacon\n\n\nchunky bacon\n\n\n\ + hello world!\n\n\nhello \ + world!\n\n\nsuperchunky bacon\n\n\nsuperchunky bacon\n\n\nsubchunky bacon\n\n\nsubchunky bacon\n\n\nchunky bacon\n\n\ + \n“chunky bacon”\n\n\n\ + “chunky bacon”\n\n\n‘chunky bacon’\n\n\n‘chunky bacon’\n\n\n\\$sqrt(4) = 2\ + \\$\n\n\n\\($C = \\alpha + \\beta Y^{\\gamma} + \\epsilon$\\)\n\n\ + \nchunky bacon\n\n\nmonospace bold italic phrase\ + and letters\n"; + let b = "

chunky bacon

"; + let _ = get_differences(a, b); +} + +#[test] +fn test_attributes() { + let a = r#""#; + let b = r#""#; + let differences = get_differences(a, b); + assert_eq!(differences.len(), 1); + + let a = r#""#; + let b = r#""#; + let differences = get_differences(a, b); + assert_eq!(differences.len(), 0); +} diff --git a/src/vendor/html-diff/src/main.rs b/src/vendor/html-diff/src/main.rs new file mode 100644 index 0000000000..c0b638da2e --- /dev/null +++ b/src/vendor/html-diff/src/main.rs @@ -0,0 +1,43 @@ +extern crate html_diff; + +use std::env; +use std::fs::File; +use std::io::{self, Read}; +use std::path::Path; + +fn get_file_content>(p: &P) -> io::Result { + let mut f = File::open(p)?; + let mut buffer = String::with_capacity(1000); + f.read_to_string(&mut buffer)?; + Ok(buffer) +} + +fn print_error(arg: &str, v: io::Result) { + if let Err(err) = v { + println!("\"{}\": error: {}", arg, err); + } +} + +fn main() { + let args: Vec = env::args().skip(1).collect(); + if args.len() & 1 != 0 { + println!("Need to pass an even number of HTML files"); + return + } + for args in args.chunks(2) { + let arg1 = &args[0]; + let arg2 = &args[1]; + match (get_file_content(&arg1), get_file_content(&arg2)) { + (Ok(content1), Ok(content2)) => { + let differences = html_diff::get_differences(&content1, &content2); + for diff in differences { + println!("=> {}", diff.to_string()); + } + } + (x, y) => { + print_error(arg1, x); + print_error(arg2, y); + } + } + } +} diff --git a/src/vendor/html-diff/test_files/basic.html b/src/vendor/html-diff/test_files/basic.html new file mode 100644 index 0000000000..62af9fd119 --- /dev/null +++ b/src/vendor/html-diff/test_files/basic.html @@ -0,0 +1,7 @@ + + +
foooo
+ + + + diff --git a/src/vendor/html-diff/test_files/basic.stdout b/src/vendor/html-diff/test_files/basic.stdout new file mode 100644 index 0000000000..c69b0ecc1c --- /dev/null +++ b/src/vendor/html-diff/test_files/basic.stdout @@ -0,0 +1,3 @@ +=> /html[0]/body[1]/div[0] => [Texts differ]: expected "foooo", found "foooo!?" +=> /html[0]/body[1] => [Tags differ]: expected "foo", found "p" +=> /html[0]/body[1] => [Unexpected element "foo"]: found "" diff --git a/src/vendor/html-diff/test_files/basic_compare.html b/src/vendor/html-diff/test_files/basic_compare.html new file mode 100644 index 0000000000..246184450c --- /dev/null +++ b/src/vendor/html-diff/test_files/basic_compare.html @@ -0,0 +1,8 @@ + + +
foooo!?
+

some text

+ + + + diff --git a/src/vendor/html-diff/tests/test_files.rs b/src/vendor/html-diff/tests/test_files.rs new file mode 100644 index 0000000000..f6b69209fe --- /dev/null +++ b/src/vendor/html-diff/tests/test_files.rs @@ -0,0 +1,61 @@ +extern crate html_diff; + +use std::io::Read; +use std::fs::{self, File}; +use std::path::Path; + +fn read_file>(p: P) -> String { + let mut f = File::open(p).expect("read_file::open failed"); + let mut content = String::new(); + f.read_to_string(&mut content).expect("read_file::read_to_end failed"); + content +} + +fn run_test(path: &str) -> bool { + let path_len = path.len(); + let original = read_file(path); + let compare_path = &format!("{}_compare.html", &path[..path_len - 5]); + let compare = read_file(&compare_path); + let expected_out = read_file(&format!("{}.stdout", &path[..path_len - 5])) + .split('\n') + .filter(|s| !s.trim().is_empty()) + .map(|s| s.to_owned()) + .collect::>() + .join("\n"); + let differences = html_diff::get_differences(&original, &compare); + let mut out = Vec::new(); + for diff in differences { + out.push(format!("=> {}", diff.to_string())); + } + let out = out.join("\n"); + if out != expected_out { + println!("comparison between {:?} and {:?} failed.\nGot: {:?}\nExpected: {:?}", + path, compare_path, out, expected_out); + false + } else { + true + } +} + +fn visit_test_dir>(dir: &P) -> usize { + let mut failures = 0; + for entry in fs::read_dir(dir).expect("read_dir failed") { + let entry = entry.expect("cannot get entry value"); + let path = entry.path(); + if path.is_file() { + let path_s = path.to_str().expect("to_str failed"); + if path_s.ends_with("_compare.html") || !path_s.ends_with(".html") { + continue + } + if !run_test(path_s) { + failures += 1; + } + } + } + failures +} + +#[test] +fn test_files() { + assert_eq!(visit_test_dir(&"test_files"), 0); +} diff --git a/src/vendor/html5ever/.cargo-checksum.json b/src/vendor/html5ever/.cargo-checksum.json new file mode 100644 index 0000000000..d1f29edf2b --- /dev/null +++ b/src/vendor/html5ever/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"d19819a8a7990a5daac8aae608eb5ebbca3f5a51ee4461313c9223b22ea3e913","benches/tokenizer.rs":"c26e9161bfbb96a3648b26e7e01052a6feab4f8334f87d78e432ff121534b34c","build.rs":"d4bc31f8af1b4798aee17860f2524d7f04bae0d74eea0fe1563c03daf4806aec","data/bench/lipsum-zh.html":"fafe3ed44b07f9cdb0a3ca5c010a3a73cc2534b64605593e71cc70a8cf8e27e4","data/bench/lipsum.html":"f37900ddb4ee463b9aa3191297bc1a4320b94bc72ee630de01882b50eb241740","data/bench/medium-fragment.html":"d21ab12c5e538ae48af6df7ee7d7939c6fe3b2d1e36b5874a3bbb957847ddba0","data/bench/small-fragment.html":"3825226a96ac919b0a69ae98e8547a5430214b16e56497570d382720af2bfca3","data/bench/strong.html":"5adb31981cca062df929353c60dfa1c7fb91170b5a389a13c8be6555a4cf107d","data/bench/tiny-fragment.html":"9cc2d58507945020d4d206738c8f166a618dc15161609935ab1975b913967edf","data/test/ignore":"01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b","examples/capi/tokenize.c":"ca0d98155c275f0d25ef7ce6735dec9d7cedb89f54416a406f53f10c616656bf","examples/html2html.rs":"361db15963171b48d0818af17c8adeeaa51fbd38c630eb1f984f7deaf55cb7cd","examples/noop-tokenize.rs":"b5b4ed4d32c0f43cfb0cfe245e275cb8f19a54c43ba3a669578f296e6339f3db","examples/noop-tree-builder.rs":"d3085ddb88d5d6865fb0c3fd93c7ec3e0d3cbae7434f250d068915ac88b3f238","examples/print-rcdom.rs":"b264fee44e6ea1321504bcb2536ae8c086176f0e6fe01ce12a2c0c94408580dc","examples/print-tree-actions.rs":"a3548a7e62e43ad455e774fd5edf870e40db921d55b60ed653a3388e7a060bcd","examples/tokenize.rs":"c1d51886e277c1d1b2b338c3f5e3969e835d298baa70cb66ea2f8cc9b5fd87a9","macros/match_token.rs":"4c2a8faaccfb7bbee3a769240dedacf3396b633ad9317f00edc2e4e2d83f0f91","src/driver.rs":"08209edfdf0c0d08f7b1fbc3d6f3004597599da6e23e5b3c902c6d70533bdbd0","src/lib.rs":"0d793d0c84b86999103cb73b8d784d1faabe2a663588a0c053ece2ad3622a52d","src/macros.rs":"c5feda45a822c57a269e092e161cc9905663c119903bab405728f3752f544316","src/serialize/mod.rs":"a1a979ea0928eff8202707e289a83c159cbc4091cfe35c206243fc60c3ca2ca4","src/tokenizer/char_ref/mod.rs":"b22f42b28ac9af5749097dd4a6e65e8797170c19cbe537da220f1eee715c9f36","src/tokenizer/interface.rs":"f730ffddcfa124c8ecdbec98694c1d0540084274e9169c823f96c05092e4f601","src/tokenizer/mod.rs":"cc42f54e2c7ebc022da908d6fa9f7f5a7350304ca189564d28b74f834f843a05","src/tokenizer/states.rs":"49375599bcae0f1779261ced957faf7223847818580b4f7c7dee5135a7646098","src/tree_builder/actions.rs":"601fe98453d596e12cb421ba30b6aa574469c0615c8fe51e23413c63b31e6c53","src/tree_builder/data.rs":"78dffd4663ceda12db080fa96b3b295af9f9ec71ad29bd0811f81798aeee70a2","src/tree_builder/mod.rs":"777eca650a321b5b5c78dcb45430f690ab2d841db7524ee79e015173ec444635","src/tree_builder/rules.rs":"99b1f9393c9069659a056ece5e33a58728d240e21de0631b83b6f678e72d47a8","src/tree_builder/tag_sets.rs":"981e1e9172eb5b1e6398c674b8c2161d0fa3bb4b1ea7bd0cb366c1b30a1fc44e","src/tree_builder/types.rs":"7183b788427b0cefb2a2f4829ee3b7b715065d1713a2e0c9b5cba20ad07788e7","src/util/str.rs":"a21c85d3874698f88132dfc8e5c32609a18a1ef37d838d25bc1a4420bdc11c95","tests/foreach_html5lib_test/mod.rs":"9dbeafc7526f8f144aaa5a25a98521ef38f427884e95a3d22f8640c4fbd0e879","tests/serializer.rs":"f4e7c12cf281cff66156f5a99ef37d2ff522660c47a9ffa672453f1414abe8a8","tests/tokenizer.rs":"2a0c0731cdae8220922fb89da9d95bef265ac1120e0d8681f36930c0a4131859","tests/tree_builder.rs":"30f08aae36ef4ec7388b69c7c5f7fe8909e206cc71c946bd104a134d46de7838"},"package":"a49d5001dd1bddf042ea41ed4e0a671d50b1bf187e66b349d7ec613bdce4ad90"} \ No newline at end of file diff --git a/src/vendor/html5ever/.cargo-ok b/src/vendor/html5ever/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/html5ever/Cargo.toml b/src/vendor/html5ever/Cargo.toml new file mode 100644 index 0000000000..67448b962d --- /dev/null +++ b/src/vendor/html5ever/Cargo.toml @@ -0,0 +1,47 @@ +[package] + +name = "html5ever" +version = "0.18.0" +authors = [ "The html5ever Project Developers" ] +license = "MIT / Apache-2.0" +repository = "https://github.com/servo/html5ever" +description = "High-performance browser-grade HTML5 parser" +documentation = "https://docs.rs/html5ever" +build = "build.rs" + +[lib] +name = "html5ever" + +doctest = true + +[[test]] +name = "tree_builder" +harness = false + +[[test]] +name = "tokenizer" +harness = false + +[[test]] +name = "serializer" + +[[bench]] +name = "tokenizer" +harness = false + +[features] +unstable = ["markup5ever/unstable"] +heap_size = ["markup5ever/heap_size"] + +[dependencies] +log = "0.3" +mac = "0.1" +markup5ever = { version = "0.3", path = "../markup5ever" } + +[dev-dependencies] +rustc-serialize = "0.3.15" +rustc-test = "0.1.3" + +[build-dependencies] +quote = "0.3.3" +syn = { version = "0.11", features = ["full", "visit"] } diff --git a/src/vendor/html5ever/benches/tokenizer.rs b/src/vendor/html5ever/benches/tokenizer.rs new file mode 100644 index 0000000000..4b620e05e0 --- /dev/null +++ b/src/vendor/html5ever/benches/tokenizer.rs @@ -0,0 +1,162 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate test; +extern crate html5ever; + +use std::{fs, env, cmp}; +use std::path::PathBuf; +use std::default::Default; + +use test::{black_box, Bencher, TestDesc, TestDescAndFn}; +use test::{DynTestName, DynBenchFn, TDynBenchFn}; +use test::ShouldPanic::No; + +use html5ever::tokenizer::{BufferQueue, TokenSink, Token, Tokenizer, TokenizerOpts, TokenSinkResult}; +use html5ever::tendril::*; + +struct Sink; + +impl TokenSink for Sink { + type Handle = (); + + fn process_token(&mut self, token: Token, _line_number: u64) -> TokenSinkResult<()> { + // Don't use the token, but make sure we don't get + // optimized out entirely. + black_box(token); + TokenSinkResult::Continue + } +} + +// This could almost be the TokenSink too, but it's not +// mut within run(). +struct Bench { + input: Vec, + clone_only: bool, + opts: TokenizerOpts, +} + +/// All tendrils in Bench.input are owned. +unsafe impl Send for Bench {} + +impl Bench { + fn new(name: &str, size: Option, clone_only: bool, + opts: TokenizerOpts) -> Bench { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("data/bench/"); + path.push(name); + let mut file = fs::File::open(&path).ok().expect("can't open file"); + + // Read the file and treat it as an infinitely repeating sequence of characters. + let mut file_input = ByteTendril::new(); + file.read_to_tendril(&mut file_input).ok().expect("can't read file"); + let file_input: StrTendril = file_input.try_reinterpret().unwrap(); + let size = size.unwrap_or(file_input.len()); + let mut stream = file_input.chars().cycle(); + + // Break the input into chunks of 1024 chars (= a few kB). + // This simulates reading from the network. + let mut input = vec![]; + let mut total = 0usize; + while total < size { + // The by_ref() call is important, otherwise we get wrong results! + // See rust-lang/rust#18045. + let sz = cmp::min(1024, size - total); + input.push(stream.by_ref().take(sz).collect::().to_tendril()); + total += sz; + } + + Bench { + input: input, + clone_only: clone_only, + opts: opts, + } + } +} + +impl TDynBenchFn for Bench { + fn run(&self, bh: &mut Bencher) { + bh.iter(|| { + let input = self.input.clone(); + if self.clone_only { + // Because the tokenizer consumes its buffers, we need + // to clone inside iter(). We can benchmark this + // separately and subtract it out. + // + // See rust-lang/rust#18043. + black_box(input); + } else { + let mut tok = Tokenizer::new(Sink, self.opts.clone()); + let mut buffer = BufferQueue::new(); + for buf in input.into_iter() { + buffer.push_back(buf); + let _ = tok.feed(&mut buffer); + } + let _ = tok.feed(&mut buffer); + tok.end(); + } + }); + } +} + +fn make_bench(name: &str, size: Option, clone_only: bool, + opts: TokenizerOpts) -> TestDescAndFn { + TestDescAndFn { + desc: TestDesc { + name: DynTestName([ + "tokenize ".to_string(), + name.to_string(), + size.map_or("".to_string(), |s| format!(" size {:7}", s)), + (if clone_only { " (clone only)" } else { "" }).to_string(), + (if opts.exact_errors { " (exact errors)" } else { "" }).to_string(), + ].concat().to_string()), + ignore: false, + should_panic: No, + }, + testfn: DynBenchFn(Box::new(Bench::new(name, size, clone_only, opts))), + } +} + +fn tests() -> Vec { + let mut tests = vec!(make_bench("lipsum.html", Some(1024*1024), true, Default::default())); + + let mut opts_vec = vec!(Default::default()); + if env::var("BENCH_EXACT_ERRORS").is_ok() { + opts_vec.push(TokenizerOpts { + exact_errors: true, + .. Default::default() + }); + } + + for opts in opts_vec.iter() { + for &file in ["lipsum.html", "lipsum-zh.html", "strong.html"].iter() { + for &sz in [1024, 1024*1024].iter() { + tests.push(make_bench(file, Some(sz), false, opts.clone())); + } + } + + for &file in ["tiny-fragment.html", "small-fragment.html", "medium-fragment.html"].iter() { + tests.push(make_bench(file, None, false, opts.clone())); + } + + if env::var("BENCH_UNCOMMITTED").is_ok() { + // Not checked into the repo, so don't include by default. + for &file in ["sina.com.cn.html", "wikipedia.html"].iter() { + let name = format!("uncommitted/{}", file); + tests.push(make_bench(&name, None, false, opts.clone())); + } + } + } + tests +} + +fn main() { + let args: Vec<_> = env::args().collect(); + test::test_main(&args, tests()); +} diff --git a/src/vendor/html5ever/build.rs b/src/vendor/html5ever/build.rs new file mode 100644 index 0000000000..9eb8eea147 --- /dev/null +++ b/src/vendor/html5ever/build.rs @@ -0,0 +1,28 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[macro_use] extern crate quote; +extern crate syn; + +use std::env; +use std::path::Path; + +#[path = "macros/match_token.rs"] +mod match_token; + +fn main() { + let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); + + let rules_rs = Path::new(&manifest_dir).join("src/tree_builder/rules.rs"); + match_token::expand_match_tokens( + &rules_rs, + &Path::new(&env::var("OUT_DIR").unwrap()).join("rules.rs")); + + println!("cargo:rerun-if-changed={}", rules_rs.display()); +} diff --git a/src/vendor/html5ever/data/bench/lipsum-zh.html b/src/vendor/html5ever/data/bench/lipsum-zh.html new file mode 100644 index 0000000000..1efe2fa520 --- /dev/null +++ b/src/vendor/html5ever/data/bench/lipsum-zh.html @@ -0,0 +1,19 @@ +甀 曒檃檑 糲蘥蠩 櫋瀩, 嗢 剆坲姏 齸圞趲 葠蜄蛖 砎粁 擙樲橚 噅尰崺 廘榙榾 誙 煘煓, 腶 敔耜 逯郹酟 蝪蝩覤 顲鱭鸋, 趍 櫱瀯灂 碄碆碃 矠筸 砫粍 耜僇鄗 搋朠楟 溔 齝囃 槏 鼏噳墺 滭滹漇, 骱 翀胲胵 蝑蝞蝢 鑅鷖 + +痯 荾莯 驧鬤鸕 梪涫湴, 踙 黈龠懱 椼毸溠 蠬襱覾 滱漮, 耜僇鄗 沀皯竻 饇馦 蒏 斠 墐墆墏 艎艑蔉 貕貔 廑憀慡 嫬廙彯 鳻嶬 跿, 飹勫嫢 熤熡磎 慛 賗跿, 灂瀿 綧 摿斠榱 橀槶澉 碄碆碃 鯦鯢鯡 踾踶輵 鍌鍗鍷 溿 滭滹, 綧 藙藨 蝪蝩覤 渮湸湤, 輗 鰝鰨 犌犐瑆 櫞氌瀙 鵳齖齘 塝 寁崏 摨敹暯 檌檒濦 滭滹漇, 撖 輈鄍 婸媥媕 漦澌潬, 膣 姛帡恦 莃荶衒 昢炾 + +儮嬼懫 馦騧騜 覛谼貆 墏壾 鋱, 緦 豥趍 翍脝艴 絟缾臮 摲 輴郺 篧糑縒 獧瞝瞣 袀豇貣, 廞 鶄鵳 肒芅邥 泏狔狑 覛谼貆 儋圚墝 滭滹漇 鰝鰨 蔰, 忁曨曣 蝪蝩覤 埱娵徖 萴葂 跬, 緷 巂鞪 晛桼桾 踥踕踛 翣聜蒢 虥諰諨 箄縴儳 磼簎 殠, 銇 烺焆琀 鱐鱍鱕 垽娭屔 齫儽, 蒮 靮傿 烍烚珜 蒝蒧蓏 璈皞緪 圪妀 綧 溮煡煟 轛轝酅 濷瓂癚, 篧糑縒 谾踘遳 讘麡 腶, 鯦鯢鯡 邆錉霋 鋱 蛚袲 鋱鋟鋈 瀷瀹藶 騉鬵 嗢 + +蝺 鰔鶟 濇燖燏 梪涫湴 齫儽戃, 馺 髬魆 齴讘麡 袟袘觕, 甀瞂硾 鍹餳駷 邆錉霋 曮禷 瑽 虰豖 瀿犨皫 蜬蝁蜠 檹瀔濼 榯, 獝瘝磈 輣鋄銶 抏旲 諃 褌 緳廞徲 轞騹鼚 瘵瘲 媥媕 踙 簎艜薤 鸙讟钃 + +滘 鐩闤鞿 轞騹鼚 絟缾臮 碃稘, 鮥鴮 輗 渳湥牋 獿譿躐 趉軨鄇 鋑鋡髬 嶜憃撊 磑 棳棔 滜溙 蔏 烺焆琀 鱐鱍鱕 撌斳暩 緅 彃慔 釢髟偛 礯籔羻 + +鏾鐇闠 擙樲橚 塓塕 慔 笢笣 壾 婸媥媕 奫嫮嫳, 愄揎揇 趡趛踠 瑽 秎穾, 腤萰 蛃袚觙 玝甿虮 濆澓澋 魦 綧 瘱瘵瘲 擙樲橚 瞵瞷矰 璈皞, 腠腶舝 翣聜蒢 魵 潧潣, 慖摲摓 橍殧澞 蟷蠉蟼 摮 嗢嗂 誙賗跿 磏磑禠 蝩覤 穊 鷕黰戄 鼀齕櫌 殔湝 緦, 緁 瘱瘵瘲 鸃鼞欘 窞綆腤 嗼嗹 輷邆 壿 櫱瀯灂 鶭黮齥 鏙闛颾, 眊砎粁 硻禂稢 薢蟌 鋈, 榎榯槄 墂嫫嵾 毄滱漮 豥 髟偛 + +掭掝 暲 瞵瞷矰 鬄鵊鵙 鍎鞚韕, 齞齝囃 脬舑莕 蔍 嫳嫬 絼綒 縸縩薋 毊灚襳 珝砯砨 嵧 裌覅詵 崸嵀惉 慛 碞碠 蒮 橁橖澭 摨敹暯 罫蓱蒆 嵥嵧 蟷蠉 滆 櫱瀯灂 鶟儹巏 瘑睯碫 + +滈 簎艜薤 廑憀慡 鑴鱱爧 屼汆, 歅 彔抳 鏾鐇闠 桏毢涒 垽娭屔 磝磢磭 袟袘觕 鍌鍗鍷 鋈 氠洷, 棳棔 雈靮傿 臡虈觿 氃濈瀄 槄 橀槶澉 麷劻穋 嘽 簅縭, 狑玝 垥娀庣 僤凘墈 岯岪弨 摲, 馺骱魡 抩枎殀 迗俀侹 蓪 錛鍆 蔰 暯樧 璸瓁穟 瘑睯碫 濍燂犝, 犵艿邔 獧瞝瞣 馻噈嫶 蝢褗 僣, 嬨嶵 壿 蠝襭譸 痑祣筇 觛詏貁 蜙 珶珸珿 濷瓂癚 箑箖 嗼嗹墋 峷敊浭 阰刲 鄜, 柦柋牬 寁崏庲 礯籔羻 鋍鞎 鉾 跠跬 蜸 勯噚嶢 礌簨繖 醳鏻鐆 + +蟷蠉蟼 熩熝犚 摓 髽鮛 顤鰩鷎 駍駔鳿 鸃鼞欘 褅 牬玾 殍涾烰 誽賚賧 鴸鼢曘 搋朠 殟 蟼襛 溔 嶵嶯幯 蒘蝆蜪, 蟣襋 溿煔煃 銇韎餀 蹸蹪鏂 摮 踸躽 踣 廦廥彋 鼀齕櫌, 靾鞂 虥諰諨 婸媥媕 毄滱漮 魆 蒛 裧頖 鍆錌雔 枅杺枙 堔埧娾, 蓂蓌蓖 噾噿嚁 洷炟砏 砎粁 鋱, 嬼懫 杍肜阰 麷劻穋 蔊蓴蔖 豥 + +暕 忀瀸蘌 褣諝趥 髽鮛 滍 噾噿 顤鰩鷎 逯郹酟 樏殣氀 煻獌 蚔趵郚 枲柊氠 鄃鈌鈅 暕, 禖穊稯 鄨鎷闒 鏾鐇闠 蒝蒧 誙 賌輈鄍 鶊鵱鶆 毊灚襳 珋疧 滘 瀗犡礝 簻臗藱 駔鳿 磑, 墐 圩芰敔 婂崥崣 溹溦滜 鍗鍷 diff --git a/src/vendor/html5ever/data/bench/lipsum.html b/src/vendor/html5ever/data/bench/lipsum.html new file mode 100644 index 0000000000..27dc14aff9 --- /dev/null +++ b/src/vendor/html5ever/data/bench/lipsum.html @@ -0,0 +1,40 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer eu arcu varius, fringilla nisi non, pulvinar lorem. Nam et sollicitudin nisi, eget tempus sapien. Suspendisse ac libero velit. Proin semper lacinia posuere. Morbi sollicitudin lacinia urna, eget aliquet risus lobortis sit amet. Fusce rhoncus sodales mauris, a rutrum erat elementum id. Integer nec sapien sit amet nisl convallis vehicula eu eu augue. Etiam nec elit ac nibh lacinia porta. Integer dapibus feugiat magna, eget varius ante vestibulum vel. Vestibulum vitae felis quis est tristique varius quis eget libero. Nullam tincidunt magna eros, nec luctus ante pretium at. Aenean laoreet justo vitae risus fringilla convallis. In malesuada scelerisque lorem, sed luctus tortor varius at. Morbi odio ligula, commodo eu sodales vitae, bibendum eget leo. In odio est, laoreet sit amet eleifend at, placerat in elit. + +Nullam ac viverra elit. Vestibulum et massa vel justo bibendum imperdiet. Donec elementum vitae nibh sit amet pellentesque. Ut id fringilla sem, in tincidunt quam. In a dui dignissim, gravida magna in, porta ante. Integer adipiscing porta risus. Nulla facilisi. Cras erat leo, tempor a ligula in, posuere ullamcorper nulla. Maecenas id auctor elit, imperdiet sagittis augue. Curabitur consectetur suscipit lorem porta sollicitudin. Etiam turpis orci, eleifend eu felis in, placerat consequat est. Sed ultrices, tellus ut volutpat venenatis, metus lectus malesuada diam, id ornare risus lectus sed massa. Vivamus mauris diam, lobortis ut interdum eget, porta a elit. Suspendisse potenti. + +Donec tincidunt nisi sed mollis feugiat. Mauris ultricies risus non eros feugiat tempor. In aliquam ut nunc id tempor. Curabitur vel elit dolor. Mauris ullamcorper tortor ac nisl feugiat, quis gravida nisl ullamcorper. Pellentesque a ligula quis erat rutrum sollicitudin in a metus. Aliquam ligula massa, cursus in libero a, blandit feugiat tortor. In ac auctor lorem. Ut faucibus leo nec egestas tristique. + +Nulla adipiscing consectetur odio, a iaculis eros aliquam at. Nullam dapibus ac ante et convallis. Phasellus tempor arcu velit. Donec adipiscing neque eu molestie mattis. Vestibulum id elit fringilla, ultrices orci eu, rhoncus purus. Mauris ornare nisi massa, et luctus tortor tincidunt vel. Maecenas eu ultrices enim, et varius est. Integer ipsum nunc, suscipit eu dapibus ac, ornare vitae sapien. Vestibulum posuere, nulla sed dictum tempus, magna metus commodo turpis, a aliquet orci tellus eu lectus. Mauris nulla magna, malesuada vitae iaculis ut, facilisis varius sem. In tristique sapien urna, et tristique dolor lacinia non. Suspendisse eu tincidunt eros. Pellentesque dignissim elit vitae purus auctor, non malesuada dolor scelerisque. + +Cras commodo tortor at risus ornare euismod a et risus. Sed rutrum, justo vel mollis condimentum, mi elit consectetur mi, non ultricies quam orci mollis sapien. Donec tincidunt, lacus molestie porttitor elementum, enim ligula hendrerit lacus, quis porttitor magna velit sed nisi. Quisque pretium eros id sem posuere consequat id sit amet nunc. Fusce pulvinar commodo ipsum, quis congue tellus faucibus eu. Sed bibendum dolor vitae ante porttitor pretium. Integer id malesuada eros, sed tristique metus. Nunc vitae turpis eu risus sodales vestibulum quis ut magna. In eget metus elit. Donec gravida libero risus, eget tempus erat varius eu. Vestibulum id dignissim sapien. Fusce pretium posuere lacus. Aliquam ac arcu sollicitudin, lacinia tellus vitae, pellentesque tortor. Mauris viverra velit ac lacus egestas sagittis. Duis auctor interdum tincidunt. Aenean eu ullamcorper sapien, sit amet sollicitudin magna. + +Nam vel lorem a quam sollicitudin fringilla sit amet quis nibh. Quisque commodo molestie augue. Vivamus ut erat aliquet, gravida ante at, suscipit arcu. Fusce nulla massa, lobortis vel dictum non, vehicula ac lorem. Etiam blandit sodales urna, at aliquet libero dapibus a. Cras odio mauris, porta at enim vitae, aliquam tincidunt libero. Praesent at tortor eu eros cursus consequat vel non elit. Mauris risus urna, sagittis eget turpis eu, malesuada semper nisl. Nunc posuere placerat ligula, in tristique urna pharetra et. Duis consectetur mauris nulla. Etiam auctor tincidunt molestie. Fusce eu faucibus diam, nec fermentum felis. Curabitur non lacinia quam, non luctus neque. Morbi sed ultrices diam. + +Fusce accumsan nisl sed nibh fringilla euismod. In ut arcu cursus erat imperdiet porttitor. Pellentesque tempus, nisi quis viverra convallis, eros sem dapibus magna, ut aliquet quam urna vitae dolor. Aenean id tortor turpis. Etiam lacinia arcu lorem, in consectetur arcu placerat sed. Duis non est ornare, dictum mi sit amet, cursus nunc. Suspendisse at venenatis massa. Etiam eget lorem diam. Donec tristique sapien at scelerisque porta. Aenean ornare ligula sed nibh gravida, vel commodo erat ultrices. Donec id enim purus. Vivamus malesuada tristique sapien id tempus. Morbi nec nunc dolor. + +Aliquam molestie turpis cursus blandit blandit. Integer imperdiet ullamcorper arcu, a fermentum nisi. Cras hendrerit quam id mollis elementum. Etiam ut erat ac leo posuere aliquet eget non tortor. Nam vel velit sed dui tincidunt gravida eget eget risus. Suspendisse adipiscing sed nulla vel molestie. Aliquam suscipit, sem sed volutpat sagittis, magna enim feugiat erat, pharetra feugiat magna neque a ante. Duis at metus eget leo congue molestie. Vivamus id massa ornare, rutrum ante nec, ullamcorper lacus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Vestibulum lobortis arcu eu arcu hendrerit convallis. Integer mollis velit at ante consequat, eu pharetra erat venenatis. Integer tincidunt sit amet massa vel hendrerit. Morbi malesuada facilisis augue sed congue. Phasellus porttitor vel mi eu imperdiet. Aenean tincidunt, massa et tristique mollis, nisl metus vulputate est, quis sollicitudin metus ipsum vel felis. + +Suspendisse nec feugiat dui. Proin nec lorem semper, dignissim leo et, suscipit turpis. In posuere sem ut blandit scelerisque. Fusce vel ultricies augue, adipiscing pretium lacus. Mauris ac dui non odio convallis pellentesque. Curabitur posuere nec odio ut sodales. Morbi varius risus lacinia, convallis mauris in, tristique turpis. + +Vivamus hendrerit justo augue, et molestie ligula aliquam ac. Nunc nec vehicula felis. Donec quam lacus, commodo sollicitudin aliquet eu, aliquam ut leo. Donec vulputate arcu urna, in molestie orci faucibus non. Praesent ut ullamcorper ante. Quisque sollicitudin libero in arcu gravida, quis scelerisque tortor volutpat. Nulla ornare mi ac odio sagittis rutrum. Sed quis sagittis felis. Praesent bibendum orci sed risus elementum, malesuada posuere massa condimentum. Sed velit nunc, pulvinar eu feugiat at, ultrices eu odio. Mauris lacinia ut odio eget ornare. Nullam commodo mollis lorem, ac vehicula justo tristique a. + +Morbi est ipsum, egestas a urna sed, aliquet tempus ipsum. In eget fermentum libero. Nullam a sodales dui. Nam imperdiet condimentum luctus. Morbi bibendum at nulla sed aliquam. Quisque nibh nibh, sollicitudin non ullamcorper commodo, viverra non metus. Suspendisse eleifend turpis massa. Cras tortor metus, rutrum sit amet tellus a, sodales suscipit eros. Sed in vulputate ligula. Integer posuere velit sed nisl tristique suscipit. Quisque bibendum eleifend enim in sollicitudin. Phasellus tincidunt orci pretium, molestie felis eu, sodales metus. + +Vestibulum consectetur orci ut blandit aliquet. Sed posuere cursus lacus vestibulum posuere. Phasellus ut risus sem. Vivamus et purus non felis pellentesque lacinia. Phasellus aliquam, diam eget vestibulum lobortis, purus tortor porttitor eros, vitae auctor lorem velit a turpis. Integer eu metus vel nisi porta lobortis sollicitudin eget arcu. Maecenas ac blandit dolor. In et sapien ornare, dignissim nulla quis, tempor odio. + +Ut nec quam ligula. Ut euismod, nisi nec iaculis faucibus, nisi arcu dignissim neque, a fringilla dolor tellus ut arcu. Curabitur iaculis rhoncus orci sed fermentum. Cras augue elit, eleifend sodales pellentesque ac, varius bibendum nulla. Etiam id diam non purus porta lobortis. Cras fringilla metus in ipsum laoreet placerat. Integer vel quam nec libero varius mattis in non nibh. + +Pellentesque adipiscing feugiat neque, vitae imperdiet dui. Duis pharetra elit a dictum laoreet. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nulla vulputate malesuada nisi, vel egestas nulla mollis ut. Nunc faucibus pharetra leo ac ultricies. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus in odio a magna convallis molestie ut at mauris. Morbi bibendum id dui id imperdiet. Curabitur volutpat et erat quis venenatis. Integer tincidunt et felis sed rutrum. Donec vitae porttitor enim. Sed nisi nunc, auctor ac ullamcorper quis, eleifend id metus. + +Morbi felis est, tincidunt at eros at, interdum tempor tortor. Nam et semper metus. Vivamus lacinia pulvinar magna, a lacinia ligula condimentum vitae. Donec vitae ullamcorper diam. Aenean auctor mollis tincidunt. Mauris hendrerit eros quis nulla posuere, non mattis tellus venenatis. Fusce et ligula nec arcu consequat pulvinar. Maecenas sagittis odio justo, at ultrices velit aliquet quis. In hac habitasse platea dictumst. Suspendisse viverra nunc vitae lectus bibendum, vel pretium arcu pretium. Curabitur iaculis condimentum magna ac rutrum. Aenean placerat massa nunc, id vehicula magna vulputate eget. Integer dignissim nunc in enim bibendum consequat vitae id leo. Mauris quis aliquam quam. Suspendisse vel fringilla purus. Mauris sodales dui vitae lacus pellentesque tincidunt a eget nunc. + +Nullam imperdiet vestibulum magna nec dictum. Vestibulum scelerisque vestibulum congue. Phasellus fermentum pulvinar elit, eget fringilla arcu vestibulum sed. Mauris pretium nulla in consectetur cursus. Cras malesuada est vulputate hendrerit bibendum. Aenean a tristique diam, ac convallis ipsum. Nunc ac justo ut ante tristique pulvinar. Donec ornare leo sed iaculis rutrum. Integer tincidunt vestibulum massa scelerisque accumsan. Maecenas malesuada, orci at tincidunt faucibus, ipsum velit condimentum odio, vitae cursus risus justo vel orci. Interdum et malesuada fames ac ante ipsum primis in faucibus. Vivamus eu tincidunt leo. Nam a faucibus ipsum, in convallis ligula. Fusce urna lorem, iaculis ut pharetra a, laoreet a mauris. Maecenas molestie justo enim, vitae tincidunt nulla dictum quis. + +Ut ac purus ut velit feugiat tincidunt nec sit amet lorem. Mauris nulla sapien, rhoncus a condimentum et, tincidunt ut enim. Nullam eu rhoncus ante. Proin eget erat est. Vivamus suscipit fringilla metus, ut scelerisque urna. Vivamus id porta nibh, ac tincidunt nisl. Vivamus commodo tincidunt turpis a molestie. Phasellus nec interdum enim. Cras accumsan tristique massa. + +Cras vitae blandit dolor. Sed purus sem, pharetra sed orci eu, fermentum porttitor magna. Morbi dictum gravida sodales. Pellentesque varius non quam in ullamcorper. Sed in mauris sit amet sapien tempus gravida. Aliquam suscipit nulla a risus ullamcorper, et pharetra leo pharetra. Pellentesque neque lectus, molestie et eros id, consequat sagittis arcu. Nullam suscipit ipsum id lacus tincidunt sollicitudin. Fusce eget leo non massa tempor scelerisque ut a enim. Vestibulum a elementum ligula. Aliquam vehicula semper nibh nec imperdiet. Interdum et malesuada fames ac ante ipsum primis in faucibus. Etiam pretium ante eget lectus rutrum auctor. + +Sed pharetra quam metus. Aenean ac rutrum arcu. Donec sit amet pharetra nulla, vitae porttitor eros. Nullam accumsan cursus dolor, ut sodales magna tincidunt quis. Quisque egestas pellentesque velit id fringilla. Duis vel nisi libero. Vivamus ultrices ligula vel tempor lacinia. Cras dictum ut nunc vel suscipit. Duis convallis tortor varius consectetur tempor. Maecenas sed pharetra quam. Nunc malesuada risus justo, et vehicula quam placerat at. Vestibulum non orci eu felis viverra convallis. + +Nulla accumsan ultrices ligula, id commodo odio interdum sed. Fusce sit amet varius tortor. Integer non mattis eros. Curabitur vulputate massa non ante lacinia sodales. Aenean a feugiat ligula. Fusce ultricies molestie lectus auctor dignissim. Duis eu lorem feugiat, varius quam vel, volutpat magna. Pellentesque nec nisl ut lorem interdum condimentum scelerisque eu purus. Vestibulum porttitor elementum lectus quis lobortis. Vestibulum non sem ultricies, elementum risus non, aliquet ipsum. Phasellus pellentesque lacinia purus et tristique. Aenean lacinia, mi vel rutrum dapibus, nibh lacus hendrerit velit, ac faucibus massa erat sodales dui. Etiam in enim varius, auctor risus vel, blandit quam. + diff --git a/src/vendor/html5ever/data/bench/medium-fragment.html b/src/vendor/html5ever/data/bench/medium-fragment.html new file mode 100644 index 0000000000..570bef2ffe --- /dev/null +++ b/src/vendor/html5ever/data/bench/medium-fragment.html @@ -0,0 +1,24 @@ +

History[edit]

+

By early 1992 the search was on for a good byte-stream encoding of multi-byte character sets. The draft ISO 10646 standard contained a non-required annex called UTF-1 + that provided a byte-stream encoding of its 32-bit code points. This +encoding was not satisfactory on performance grounds, but did introduce +the notion that bytes in the range of 0–127 continue representing the +ASCII characters in UTF, thereby providing backward compatibility with +ASCII.

+

In July 1992, the X/Open committee XoJIG was looking for a better encoding. Dave Prosser of Unix System Laboratories + submitted a proposal for one that had faster implementation +characteristics and introduced the improvement that 7-bit ASCII +characters would only represent themselves; all multibyte +sequences would include only bytes where the high bit was set. This +original proposal, FSS-UTF (File System Safe UCS Transformation Format), + was similar in concept to UTF-8, but lacked the crucial property of self-synchronization.[7][8]

+

In August 1992, this proposal was circulated by an IBM X/Open representative to interested parties. Ken Thompson of the Plan 9 operating system group at Bell Labs + then made a small but crucial modification to the encoding, making it +very slightly less bit-efficient than the previous proposal but allowing + it to be self-synchronizing, + meaning that it was no longer necessary to read from the beginning of +the string to find code point boundaries. Thompson's design was outlined + on September 2, 1992, on a placemat in a New Jersey diner with Rob Pike. In the following days, Pike and Thompson implemented it and updated Plan 9 to use it throughout, and then communicated their success back to X/Open.[7]

+

UTF-8 was first officially presented at the USENIX conference in San Diego, from January 25 to 29, 1993.

+

Google reported that in 2008 UTF-8 (misleadingly labelled "Unicode") became the most common encoding for HTML files.[9][10]

+

Description[edit]

diff --git a/src/vendor/html5ever/data/bench/small-fragment.html b/src/vendor/html5ever/data/bench/small-fragment.html new file mode 100644 index 0000000000..a0b9643e5c --- /dev/null +++ b/src/vendor/html5ever/data/bench/small-fragment.html @@ -0,0 +1,7 @@ +

In July 1992, the X/Open committee XoJIG was looking for a better encoding. Dave Prosser of Unix System Laboratories + submitted a proposal for one that had faster implementation +characteristics and introduced the improvement that 7-bit ASCII +characters would only represent themselves; all multibyte +sequences would include only bytes where the high bit was set. This +original proposal, FSS-UTF (File System Safe UCS Transformation Format), + was similar in concept to UTF-8, but lacked the crucial property of self-synchronization. diff --git a/src/vendor/html5ever/data/bench/strong.html b/src/vendor/html5ever/data/bench/strong.html new file mode 100644 index 0000000000..0ef665e5d7 --- /dev/null +++ b/src/vendor/html5ever/data/bench/strong.html @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/src/vendor/html5ever/data/bench/tiny-fragment.html b/src/vendor/html5ever/data/bench/tiny-fragment.html new file mode 100644 index 0000000000..7ce535433d --- /dev/null +++ b/src/vendor/html5ever/data/bench/tiny-fragment.html @@ -0,0 +1 @@ +

Hello, world!

diff --git a/src/vendor/html5ever/data/test/ignore b/src/vendor/html5ever/data/test/ignore new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/src/vendor/html5ever/data/test/ignore @@ -0,0 +1 @@ + diff --git a/src/vendor/html5ever/examples/capi/tokenize.c b/src/vendor/html5ever/examples/capi/tokenize.c new file mode 100644 index 0000000000..8c8cdd4649 --- /dev/null +++ b/src/vendor/html5ever/examples/capi/tokenize.c @@ -0,0 +1,74 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#include + +#include "html5ever.h" + +void put_str(const char *x) { + fputs(x, stdout); +} + +void put_buf(struct h5e_buf text) { + fwrite(text.data, text.len, 1, stdout); +} + +void do_chars(void *user, struct h5e_buf text) { + put_str("CHARS : "); + put_buf(text); + put_str("\n"); +} + +void do_start_tag(void *user, struct h5e_buf name, int self_closing, size_t num_attrs) { + put_str("TAG : <"); + put_buf(name); + if (self_closing) { + putchar('/'); + } + put_str(">\n"); +} + +void do_tag_attr(void *user, struct h5e_buf name, struct h5e_buf value) { + put_str(" ATTR: "); + put_buf(name); + put_str("=\""); + put_buf(value); + put_str("\"\n"); +} + +void do_end_tag(void *user, struct h5e_buf name) { + put_str("TAG : \n"); +} + +struct h5e_token_ops ops = { + .do_chars = do_chars, + .do_start_tag = do_start_tag, + .do_tag_attr = do_tag_attr, + .do_end_tag = do_end_tag, +}; + +struct h5e_token_sink sink = { + .ops = &ops, + .user = NULL, +}; + +int main(int argc, char *argv[]) { + if (argc < 2) { + printf("Usage: %s 'HTML fragment'\n", argv[0]); + return 1; + } + + struct h5e_tokenizer *tok = h5e_tokenizer_new(&sink); + h5e_tokenizer_feed(tok, h5e_buf_from_cstr(argv[1])); + h5e_tokenizer_end(tok); + h5e_tokenizer_free(tok); + return 0; +} diff --git a/src/vendor/html5ever/examples/html2html.rs b/src/vendor/html5ever/examples/html2html.rs new file mode 100644 index 0000000000..a3eba1bb6c --- /dev/null +++ b/src/vendor/html5ever/examples/html2html.rs @@ -0,0 +1,49 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Parse and re-serialize a HTML5 document. +//! +//! This is meant to produce the exact same output (ignoring stderr) as +//! +//! java -classpath htmlparser-1.4.jar nu.validator.htmlparser.tools.HTML2HTML +//! +//! where htmlparser-1.4.jar comes from http://about.validator.nu/htmlparser/ + +extern crate html5ever; + +use std::io::{self, Write}; +use std::default::Default; + + +use html5ever::{parse_document, serialize}; +use html5ever::driver::ParseOpts; +use html5ever::rcdom::RcDom; +use html5ever::tendril::TendrilSink; +use html5ever::tree_builder::TreeBuilderOpts; + +fn main() { + let opts = ParseOpts { + tree_builder: TreeBuilderOpts { + drop_doctype: true, + ..Default::default() + }, + ..Default::default() + }; + let stdin = io::stdin(); + let dom = parse_document(RcDom::default(), opts) + .from_utf8() + .read_from(&mut stdin.lock()) + .unwrap(); + + // The validator.nu HTML2HTML always prints a doctype at the very beginning. + io::stdout().write_all(b"\n") + .ok().expect("writing DOCTYPE failed"); + serialize(&mut io::stdout(), &dom.document, Default::default()) + .ok().expect("serialization failed"); +} diff --git a/src/vendor/html5ever/examples/noop-tokenize.rs b/src/vendor/html5ever/examples/noop-tokenize.rs new file mode 100644 index 0000000000..c134f485ef --- /dev/null +++ b/src/vendor/html5ever/examples/noop-tokenize.rs @@ -0,0 +1,43 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Run a single benchmark once. For use with profiling tools. + +extern crate html5ever; + +use std::io; +use std::default::Default; + +use html5ever::tokenizer::{BufferQueue, TokenSinkResult, TokenSink, Token, Tokenizer}; +use html5ever::tendril::*; + +struct Sink(Vec); + +impl TokenSink for Sink { + type Handle = (); + + fn process_token(&mut self, token: Token, _line_number: u64) -> TokenSinkResult<()> { + // Don't use the token, but make sure we don't get + // optimized out entirely. + self.0.push(token); + TokenSinkResult::Continue + } +} + +fn main() { + let mut chunk = ByteTendril::new(); + io::stdin().read_to_tendril(&mut chunk).unwrap(); + let mut input = BufferQueue::new(); + input.push_back(chunk.try_reinterpret().unwrap()); + + let mut tok = Tokenizer::new(Sink(Vec::new()), Default::default()); + let _ = tok.feed(&mut input); + assert!(input.is_empty()); + tok.end(); +} diff --git a/src/vendor/html5ever/examples/noop-tree-builder.rs b/src/vendor/html5ever/examples/noop-tree-builder.rs new file mode 100644 index 0000000000..fffc5be61d --- /dev/null +++ b/src/vendor/html5ever/examples/noop-tree-builder.rs @@ -0,0 +1,112 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[macro_use] extern crate html5ever; + +use std::io; +use std::default::Default; +use std::collections::HashMap; +use std::borrow::Cow; + +use html5ever::{Attribute, QualName, ExpandedName}; +use html5ever::parse_document; +use html5ever::tree_builder::{TreeSink, QuirksMode, NodeOrText, ElementFlags}; +use html5ever::tendril::*; + +struct Sink { + next_id: usize, + names: HashMap, +} + +impl Sink { + fn get_id(&mut self) -> usize { + let id = self.next_id; + self.next_id += 2; + id + } +} + +impl TreeSink for Sink { + type Handle = usize; + type Output = Self; + fn finish(self) -> Self { self } + + fn get_document(&mut self) -> usize { + 0 + } + + fn get_template_contents(&mut self, target: &usize) -> usize { + if let Some(expanded_name!(html "template")) = self.names.get(&target).map(|n| n.expanded()) { + target + 1 + } else { + panic!("not a template element") + } + } + + fn same_node(&self, x: &usize, y: &usize) -> bool { + x == y + } + + fn same_tree(&self, _x: &usize, _y: &usize) -> bool { + true + } + + fn elem_name(&self, target: &usize) -> ExpandedName { + self.names.get(target).expect("not an element").expanded() + } + + fn create_element(&mut self, name: QualName, _: Vec, _: ElementFlags) -> usize { + let id = self.get_id(); + self.names.insert(id, name); + id + } + + fn create_comment(&mut self, _text: StrTendril) -> usize { + self.get_id() + } + + #[allow(unused_variables)] + fn create_pi(&mut self, target: StrTendril, value: StrTendril) -> usize { + unimplemented!() + } + + fn has_parent_node(&self, _node: &usize) -> bool { + // `node` will have a parent unless a script moved it, and we're + // not running scripts. Therefore we can aways return true. + true + } + + fn append_before_sibling(&mut self, + _sibling: &usize, + _new_node: NodeOrText) { } + + fn parse_error(&mut self, _msg: Cow<'static, str>) { } + fn set_quirks_mode(&mut self, _mode: QuirksMode) { } + fn append(&mut self, _parent: &usize, _child: NodeOrText) { } + + fn append_doctype_to_document(&mut self, _: StrTendril, _: StrTendril, _: StrTendril) { } + fn add_attrs_if_missing(&mut self, target: &usize, _attrs: Vec) { + assert!(self.names.contains_key(&target), "not an element"); + } + fn remove_from_parent(&mut self, _target: &usize) { } + fn reparent_children(&mut self, _node: &usize, _new_parent: &usize) { } + fn mark_script_already_started(&mut self, _node: &usize) { } +} + +fn main() { + let sink = Sink { + next_id: 1, + names: HashMap::new(), + }; + let stdin = io::stdin(); + parse_document(sink, Default::default()) + .from_utf8() + .read_from(&mut stdin.lock()) + .unwrap(); +} diff --git a/src/vendor/html5ever/examples/print-rcdom.rs b/src/vendor/html5ever/examples/print-rcdom.rs new file mode 100644 index 0000000000..9869bac1ba --- /dev/null +++ b/src/vendor/html5ever/examples/print-rcdom.rs @@ -0,0 +1,77 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[macro_use] extern crate html5ever; + +use std::io; +use std::iter::repeat; +use std::default::Default; +use std::string::String; + +use html5ever::parse_document; +use html5ever::rcdom::{NodeData, RcDom, Handle}; +use html5ever::tendril::TendrilSink; + +// This is not proper HTML serialization, of course. + +fn walk(indent: usize, handle: Handle) { + let node = handle; + // FIXME: don't allocate + print!("{}", repeat(" ").take(indent).collect::()); + match node.data { + NodeData::Document + => println!("#Document"), + + NodeData::Doctype { ref name, ref public_id, ref system_id } + => println!("", name, public_id, system_id), + + NodeData::Text { ref contents } + => println!("#text: {}", escape_default(&contents.borrow())), + + NodeData::Comment { ref contents } + => println!("", escape_default(contents)), + + NodeData::Element { ref name, ref attrs, .. } => { + assert!(name.ns == ns!(html)); + print!("<{}", name.local); + for attr in attrs.borrow().iter() { + assert!(attr.name.ns == ns!()); + print!(" {}=\"{}\"", attr.name.local, attr.value); + } + println!(">"); + } + + NodeData::ProcessingInstruction { .. } => unreachable!() + } + + for child in node.children.borrow().iter() { + walk(indent+4, child.clone()); + } +} + +// FIXME: Copy of str::escape_default from std, which is currently unstable +pub fn escape_default(s: &str) -> String { + s.chars().flat_map(|c| c.escape_default()).collect() +} + +fn main() { + let stdin = io::stdin(); + let dom = parse_document(RcDom::default(), Default::default()) + .from_utf8() + .read_from(&mut stdin.lock()) + .unwrap(); + walk(0, dom.document); + + if !dom.errors.is_empty() { + println!("\nParse errors:"); + for err in dom.errors.into_iter() { + println!(" {}", err); + } + } +} diff --git a/src/vendor/html5ever/examples/print-tree-actions.rs b/src/vendor/html5ever/examples/print-tree-actions.rs new file mode 100644 index 0000000000..50c31ae9a0 --- /dev/null +++ b/src/vendor/html5ever/examples/print-tree-actions.rs @@ -0,0 +1,169 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[macro_use] extern crate html5ever; + +use std::io; +use std::default::Default; +use std::collections::HashMap; +use std::borrow::Cow; + +use html5ever::{QualName, ExpandedName, Attribute}; +use html5ever::tree_builder::{TreeSink, QuirksMode, NodeOrText, AppendNode, AppendText, ElementFlags}; +use html5ever::parse_document; +use html5ever::tendril::*; + +struct Sink { + next_id: usize, + names: HashMap, +} + +impl Sink { + fn get_id(&mut self) -> usize { + let id = self.next_id; + self.next_id += 2; + id + } +} + +impl TreeSink for Sink { + type Handle = usize; + type Output = Self; + fn finish(self) -> Self { self } + + fn parse_error(&mut self, msg: Cow<'static, str>) { + println!("Parse error: {}", msg); + } + + fn get_document(&mut self) -> usize { + 0 + } + + fn get_template_contents(&mut self, target: &usize) -> usize { + if let Some(expanded_name!(html "template")) = self.names.get(target).map(|n| n.expanded()) { + target + 1 + } else { + panic!("not a template element") + } + } + + fn set_quirks_mode(&mut self, mode: QuirksMode) { + println!("Set quirks mode to {:?}", mode); + } + + fn same_node(&self, x: &usize, y: &usize) -> bool { + x == y + } + + fn elem_name(&self, target: &usize) -> ExpandedName { + self.names.get(target).expect("not an element").expanded() + } + + fn create_element(&mut self, name: QualName, _: Vec, _: ElementFlags) -> usize { + let id = self.get_id(); + println!("Created {:?} as {}", name, id); + self.names.insert(id, name); + id + } + + fn create_comment(&mut self, text: StrTendril) -> usize { + let id = self.get_id(); + println!("Created comment \"{}\" as {}", escape_default(&text), id); + id + } + + #[allow(unused_variables)] + fn create_pi(&mut self, target: StrTendril, value: StrTendril) -> usize { + unimplemented!() + } + + fn has_parent_node(&self, _node: &usize) -> bool { + // `node` will have a parent unless a script moved it, and we're + // not running scripts. Therefore we can aways return true + true + } + + fn append(&mut self, parent: &usize, child: NodeOrText) { + match child { + AppendNode(n) + => println!("Append node {} to {}", n, parent), + AppendText(t) + => println!("Append text to {}: \"{}\"", parent, escape_default(&t)), + } + } + + fn append_before_sibling(&mut self, + sibling: &usize, + new_node: NodeOrText) { + match new_node { + AppendNode(n) + => println!("Append node {} before {}", n, sibling), + AppendText(t) + => println!("Append text before {}: \"{}\"", sibling, escape_default(&t)), + } + } + + fn append_doctype_to_document(&mut self, + name: StrTendril, + public_id: StrTendril, + system_id: StrTendril) { + println!("Append doctype: {} {} {}", name, public_id, system_id); + } + + fn add_attrs_if_missing(&mut self, target: &usize, attrs: Vec) { + assert!(self.names.contains_key(target), "not an element"); + println!("Add missing attributes to {}:", target); + for attr in attrs.into_iter() { + println!(" {:?} = {}", attr.name, attr.value); + } + } + + fn associate_with_form(&mut self, _target: &usize, _form: &usize) { + // No form owner support. Since same_tree always returns + // true we cannot be sure that this associate_with_form call is + // valid + } + + fn remove_from_parent(&mut self, target: &usize) { + println!("Remove {} from parent", target); + } + + fn reparent_children(&mut self, node: &usize, new_parent: &usize) { + println!("Move children from {} to {}", node, new_parent); + } + + fn mark_script_already_started(&mut self, node: &usize) { + println!("Mark script {} as already started", node); + } + + fn set_current_line(&mut self, line_number: u64) { + println!("Set current line to {}", line_number); + } + + fn pop(&mut self, elem: &usize) { + println!("Popped element {}", elem); + } +} + +// FIXME: Copy of str::escape_default from std, which is currently unstable +pub fn escape_default(s: &str) -> String { + s.chars().flat_map(|c| c.escape_default()).collect() +} + +fn main() { + let sink = Sink { + next_id: 1, + names: HashMap::new(), + }; + let stdin = io::stdin(); + parse_document(sink, Default::default()) + .from_utf8() + .read_from(&mut stdin.lock()) + .unwrap(); +} diff --git a/src/vendor/html5ever/examples/tokenize.rs b/src/vendor/html5ever/examples/tokenize.rs new file mode 100644 index 0000000000..13c039d5d7 --- /dev/null +++ b/src/vendor/html5ever/examples/tokenize.rs @@ -0,0 +1,98 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate html5ever; + +use std::io; +use std::default::Default; + +use html5ever::tokenizer::{TokenSink, Tokenizer, Token, TokenizerOpts, ParseError, TokenSinkResult}; +use html5ever::tokenizer::{CharacterTokens, NullCharacterToken, TagToken, StartTag, EndTag}; +use html5ever::tokenizer::BufferQueue; +use html5ever::tendril::*; + +#[derive(Copy, Clone)] +struct TokenPrinter { + in_char_run: bool, +} + +impl TokenPrinter { + fn is_char(&mut self, is_char: bool) { + match (self.in_char_run, is_char) { + (false, true ) => print!("CHAR : \""), + (true, false) => println!("\""), + _ => (), + } + self.in_char_run = is_char; + } + + fn do_char(&mut self, c: char) { + self.is_char(true); + print!("{}", c.escape_default().collect::()); + } +} + +impl TokenSink for TokenPrinter { + type Handle = (); + + fn process_token(&mut self, token: Token, _line_number: u64) -> TokenSinkResult<()> { + match token { + CharacterTokens(b) => { + for c in b.chars() { + self.do_char(c); + } + } + NullCharacterToken => self.do_char('\0'), + TagToken(tag) => { + self.is_char(false); + // This is not proper HTML serialization, of course. + match tag.kind { + StartTag => print!("TAG : <\x1b[32m{}\x1b[0m", tag.name), + EndTag => print!("TAG : <\x1b[31m/{}\x1b[0m", tag.name), + } + for attr in tag.attrs.iter() { + print!(" \x1b[36m{}\x1b[0m='\x1b[34m{}\x1b[0m'", + attr.name.local, attr.value); + } + if tag.self_closing { + print!(" \x1b[31m/\x1b[0m"); + } + println!(">"); + } + ParseError(err) => { + self.is_char(false); + println!("ERROR: {}", err); + } + _ => { + self.is_char(false); + println!("OTHER: {:?}", token); + } + } + TokenSinkResult::Continue + } +} + +fn main() { + let mut sink = TokenPrinter { + in_char_run: false, + }; + let mut chunk = ByteTendril::new(); + io::stdin().read_to_tendril(&mut chunk).unwrap(); + let mut input = BufferQueue::new(); + input.push_back(chunk.try_reinterpret().unwrap()); + + let mut tok = Tokenizer::new(sink, TokenizerOpts { + profile: true, + .. Default::default() + }); + let _ = tok.feed(&mut input); + assert!(input.is_empty()); + tok.end(); + sink.is_char(false); +} diff --git a/src/vendor/html5ever/macros/match_token.rs b/src/vendor/html5ever/macros/match_token.rs new file mode 100644 index 0000000000..4f02e80ee8 --- /dev/null +++ b/src/vendor/html5ever/macros/match_token.rs @@ -0,0 +1,482 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/*! + +Implements the `match_token!()` macro for use by the HTML tree builder +in `src/tree_builder/rules.rs`. + + +## Example + +```rust +match_token!(token { + CommentToken(text) => 1, + + tag @ => 2, + + => 3, + +
=> else, + + tag @ => 4, + + token => 5, +}) +``` + + +## Syntax + +Because of the simplistic parser, the macro invocation must +start with exactly `match_token!(token {` (with whitespace as specified) +and end with exactly `})`. + +The left-hand side of each match arm is an optional `name @` binding, followed by + + - an ordinary Rust pattern that starts with an identifier or an underscore, or + + - a sequence of HTML tag names as identifiers, each inside "<...>" or "" + to match an open or close tag respectively, or + + - a "wildcard tag" "<_>" or "" to match all open tags or all close tags + respectively. + +The right-hand side is either an expression or the keyword `else`. + +Note that this syntax does not support guards or pattern alternation like +`Foo | Bar`. This is not a fundamental limitation; it's done for implementation +simplicity. + + +## Semantics + +Ordinary Rust patterns match as usual. If present, the `name @` binding has +the usual meaning. + +A sequence of named tags matches any of those tags. A single sequence can +contain both open and close tags. If present, the `name @` binding binds (by +move) the `Tag` struct, not the outer `Token`. That is, a match arm like + +```rust +tag @ => ... +``` + +expands to something like + +```rust +TagToken(tag @ Tag { name: local_name!("html"), kind: StartTag }) +| TagToken(tag @ Tag { name: local_name!("head"), kind: StartTag }) => ... +``` + +A wildcard tag matches any tag of the appropriate kind, *unless* it was +previously matched with an `else` right-hand side (more on this below). + +The expansion of this macro reorders code somewhat, to satisfy various +restrictions arising from moves. However it provides the semantics of in-order +matching, by enforcing the following restrictions on its input: + + - The last pattern must be a variable or the wildcard "_". In other words + it must match everything. + + - Otherwise, ordinary Rust patterns and specific-tag patterns cannot appear + after wildcard tag patterns. + + - No tag name may appear more than once. + + - A wildcard tag pattern may not occur in the same arm as any other tag. + "<_> => ..." and "<_> => ..." are both forbidden. + + - The right-hand side "else" may only appear with specific-tag patterns. + It means that these specific tags should be handled by the last, + catch-all case arm, rather than by any wildcard tag arm. This situation + is common in the HTML5 syntax. +*/ + +use quote::{ToTokens, Tokens}; +use std::collections::HashSet; +use std::fs::File; +use std::io::{Read, Write}; +use std::path::Path; +use std::slice; +use syn; + +pub fn expand_match_tokens(from: &Path, to: &Path) { + let mut source = String::new(); + File::open(from).unwrap().read_to_string(&mut source).unwrap(); + let tts = syn::parse_token_trees(&source).expect("Parsing rules.rs module"); + let mut tokens = Tokens::new(); + tokens.append_all(expand_tts(&tts)); + let code = tokens.to_string().replace("{ ", "{\n").replace(" }", "\n}"); + File::create(to).unwrap().write_all(code.as_bytes()).unwrap(); +} + +fn expand_tts(tts: &[syn::TokenTree]) -> Vec { + use syn::*; + + let mut expanded = Vec::new(); + let mut tts = tts.iter(); + while let Some(tt) = tts.next() { + match *tt { + TokenTree::Token(Token::Ident(ref ident)) if ident == "match_token" => { + let start = tts.clone(); + if let Some(&TokenTree::Token(Token::Not)) = tts.next() { + if let Some(&TokenTree::Delimited(Delimited { ref tts, .. })) = tts.next() { + let (to_be_matched, arms) = parse_match_token_macro(tts); + let tokens = expand_match_token_macro(to_be_matched, arms); + let tts = syn::parse_token_trees(&tokens.to_string()) + .expect("parsing macro expansion as token trees"); + expanded.extend(tts); + continue + } + } + tts = start + } + TokenTree::Token(_) => { + expanded.push(tt.clone()) + } + TokenTree::Delimited(Delimited { delim, ref tts }) => { + expanded.push(TokenTree::Delimited(Delimited { + delim: delim, + tts: expand_tts(tts), + })) + } + } + } + expanded +} + +fn parse_match_token_macro(tts: &[syn::TokenTree]) -> (&syn::Ident, Vec) { + use syn::TokenTree::Delimited; + use syn::DelimToken::Brace; + + let mut tts = tts.iter(); + let ident = if let Some(&syn::TokenTree::Token(syn::Token::Ident(ref ident))) = tts.next() { + ident + } else { + panic!("expected ident") + }; + + let block = if let Some(&Delimited(syn::Delimited { delim: Brace, ref tts })) = tts.next() { + tts + } else { + panic!("expected one {} block") + }; + assert_eq!(tts.len(), 0); + + let mut tts = block.iter(); + let mut arms = Vec::new(); + while tts.len() > 0 { + arms.push(parse_arm(&mut tts)) + } + (ident, arms) +} + +#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)] +enum TagKind { + StartTag, + EndTag, +} + +/// A single tag, as may appear in an LHS. +/// +/// `name` is `None` for wildcards. +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +struct Tag { + kind: TagKind, + name: Option, +} + +/// Left-hand side of a pattern-match arm. +#[derive(Debug)] +enum LHS { + Pattern(Tokens), + Tags(Vec), +} + +/// Right-hand side of a pattern-match arm. +#[derive(Debug)] +enum RHS { + Else, + Expression(Tokens), +} + +/// A whole arm, including optional outer `name @` binding. +#[derive(Debug)] +struct Arm { + binding: Option, + lhs: LHS, + rhs: RHS, +} + +fn parse_arm(tts: &mut slice::Iter) -> Arm { + Arm { + binding: parse_binding(tts), + lhs: parse_lhs(tts), + rhs: parse_rhs(tts), + } +} + +fn parse_binding(tts: &mut slice::Iter) -> Option { + let start = tts.clone(); + if let (Some(&syn::TokenTree::Token(syn::Token::Ident(ref ident))), + Some(&syn::TokenTree::Token(syn::Token::At))) = (tts.next(), tts.next()) { + Some(ident.clone()) + } else { + *tts = start; + None + } +} + +fn consume_if_present(tts: &mut slice::Iter, expected: syn::Token) -> bool { + if let Some(&syn::TokenTree::Token(ref first)) = tts.as_slice().first() { + if *first == expected { + tts.next(); + return true + } + } + false +} + +fn parse_lhs(tts: &mut slice::Iter) -> LHS { + if consume_if_present(tts, syn::Token::Lt) { + let mut tags = Vec::new(); + loop { + tags.push(Tag { + kind: if consume_if_present(tts, syn::Token::BinOp(syn::BinOpToken::Slash)) { + TagKind::EndTag + } else { + TagKind::StartTag + }, + name: if consume_if_present(tts, syn::Token::Underscore) { + None + } else { + if let Some(&syn::TokenTree::Token(syn::Token::Ident(ref ident))) = tts.next() { + Some(ident.clone()) + } else { + panic!("expected identifier (tag name)") + } + } + }); + assert!(consume_if_present(tts, syn::Token::Gt), "expected '>' closing a tag pattern"); + if !consume_if_present(tts, syn::Token::Lt) { + break + } + } + assert!(consume_if_present(tts, syn::Token::FatArrow)); + LHS::Tags(tags) + } else { + let mut pattern = Tokens::new(); + for tt in tts { + if let &syn::TokenTree::Token(syn::Token::FatArrow) = tt { + return LHS::Pattern(pattern) + } + tt.to_tokens(&mut pattern) + } + panic!("did not find =>") + } +} + +fn parse_rhs(tts: &mut slice::Iter) -> RHS { + use syn::DelimToken::Brace; + let start = tts.clone(); + let first = tts.next(); + let after_first = tts.clone(); + let second = tts.next(); + if let (Some(&syn::TokenTree::Token(syn::Token::Ident(ref ident))), + Some(&syn::TokenTree::Token(syn::Token::Comma))) = (first, second) { + if ident == "else" { + return RHS::Else + } + } + let mut expression = Tokens::new(); + if let Some(&syn::TokenTree::Delimited(syn::Delimited { delim: Brace, .. })) = first { + first.to_tokens(&mut expression); + *tts = after_first; + consume_if_present(tts, syn::Token::Comma); + } else { + *tts = start; + for tt in tts { + tt.to_tokens(&mut expression); + if let &syn::TokenTree::Token(syn::Token::Comma) = tt { + break + } + } + } + RHS::Expression(expression) +} + +fn expand_match_token_macro(to_be_matched: &syn::Ident, mut arms: Vec) -> Tokens { + // Handle the last arm specially at the end. + let last_arm = arms.pop().unwrap(); + + // Tags we've seen, used for detecting duplicates. + let mut seen_tags: HashSet = HashSet::new(); + + // Case arms for wildcard matching. We collect these and + // emit them later. + let mut wildcards_patterns: Vec = Vec::new(); + let mut wildcards_expressions: Vec = Vec::new(); + + // Tags excluded (by an 'else' RHS) from wildcard matching. + let mut wild_excluded_patterns: Vec = Vec::new(); + + let mut arms_code = Vec::new(); + + for Arm { binding, lhs, rhs } in arms { + // Build Rust syntax for the `name @` binding, if any. + let binding = match binding { + Some(ident) => quote!(#ident @), + None => quote!(), + }; + + match (lhs, rhs) { + (LHS::Pattern(_), RHS::Else) => panic!("'else' may not appear with an ordinary pattern"), + + // ordinary pattern => expression + (LHS::Pattern(pat), RHS::Expression(expr)) => { + if !wildcards_patterns.is_empty() { + panic!("ordinary patterns may not appear after wildcard tags {:?} {:?}", pat, expr); + } + arms_code.push(quote!(#binding #pat => #expr)) + } + + // ... => else + (LHS::Tags(tags), RHS::Else) => { + for tag in tags { + if !seen_tags.insert(tag.clone()) { + panic!("duplicate tag"); + } + if tag.name.is_none() { + panic!("'else' may not appear with a wildcard tag"); + } + wild_excluded_patterns.push(make_tag_pattern(&Tokens::new(), tag)); + } + } + + // <_> => expression + // ... => expression + (LHS::Tags(tags), RHS::Expression(expr)) => { + // Is this arm a tag wildcard? + // `None` if we haven't processed the first tag yet. + let mut wildcard = None; + for tag in tags { + if !seen_tags.insert(tag.clone()) { + panic!("duplicate tag"); + } + + match tag.name { + // + Some(_) => { + if !wildcards_patterns.is_empty() { + panic!("specific tags may not appear after wildcard tags"); + } + + if wildcard == Some(true) { + panic!("wildcard tags must appear alone"); + } + + if wildcard.is_some() { + // Push the delimeter `|` if it's not the first tag. + arms_code.push(quote!( | )) + } + arms_code.push(make_tag_pattern(&binding, tag)); + + wildcard = Some(false); + } + + // <_> + None => { + if wildcard.is_some() { + panic!("wildcard tags must appear alone"); + } + wildcard = Some(true); + wildcards_patterns.push(make_tag_pattern(&binding, tag)); + wildcards_expressions.push(expr.clone()); + } + } + } + + match wildcard { + None => panic!("[internal macro error] tag arm with no tags"), + Some(false) => arms_code.push(quote!( => #expr)), + Some(true) => {} // codegen for wildcards is deferred + } + } + } + } + + // Time to process the last, catch-all arm. We will generate something like + // + // last_arm_token => { + // let enable_wildcards = match last_arm_token { + // TagToken(Tag { kind: EndTag, name: local_name!("body"), .. }) => false, + // TagToken(Tag { kind: EndTag, name: local_name!("html"), .. }) => false, + // // ... + // _ => true, + // }; + // + // match (enable_wildcards, last_arm_token) { + // (true, TagToken(name @ Tag { kind: StartTag, .. })) + // => ..., // wildcard action for start tags + // + // (true, TagToken(name @ Tag { kind: EndTag, .. })) + // => ..., // wildcard action for end tags + // + // (_, token) => ... // using the pattern from that last arm + // } + // } + + let Arm { binding, lhs, rhs } = last_arm; + + let (last_pat, last_expr) = match (binding, lhs, rhs) { + (Some(_), _, _) => panic!("the last arm cannot have an @-binding"), + (None, LHS::Tags(_), _) => panic!("the last arm cannot have tag patterns"), + (None, _, RHS::Else) => panic!("the last arm cannot use 'else'"), + (None, LHS::Pattern(p), RHS::Expression(e)) => (p, e) + }; + + quote! { + match #to_be_matched { + #( + #arms_code + )* + last_arm_token => { + let enable_wildcards = match last_arm_token { + #( + #wild_excluded_patterns => false, + )* + _ => true, + }; + match (enable_wildcards, last_arm_token) { + #( + (true, #wildcards_patterns) => #wildcards_expressions + )* + (_, #last_pat) => #last_expr + } + } + } + } +} + +fn make_tag_pattern(binding: &Tokens, tag: Tag) -> Tokens { + let kind = match tag.kind { + TagKind::StartTag => quote!(::tokenizer::StartTag), + TagKind::EndTag => quote!(::tokenizer::EndTag), + }; + let name_field = if let Some(name) = tag.name { + let name = name.to_string(); + quote!(name: local_name!(#name),) + } else { + quote!() + }; + quote! { + ::tree_builder::types::TagToken(#binding ::tokenizer::Tag { kind: #kind, #name_field .. }) + } +} diff --git a/src/vendor/html5ever/src/driver.rs b/src/vendor/html5ever/src/driver.rs new file mode 100644 index 0000000000..20d49074b6 --- /dev/null +++ b/src/vendor/html5ever/src/driver.rs @@ -0,0 +1,134 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! High-level interface to the parser. + +use {Attribute, QualName}; +use buffer_queue::BufferQueue; +use tokenizer::{Tokenizer, TokenizerOpts, TokenizerResult}; +use tree_builder::{TreeBuilderOpts, TreeBuilder, TreeSink, create_element}; + +use std::borrow::Cow; + +use tendril; +use tendril::StrTendril; +use tendril::stream::{TendrilSink, Utf8LossyDecoder}; + +/// All-encompassing options struct for the parser. +#[derive(Clone, Default)] +pub struct ParseOpts { + /// Tokenizer options. + pub tokenizer: TokenizerOpts, + + /// Tree builder options. + pub tree_builder: TreeBuilderOpts, +} + +/// Parse an HTML document +/// +/// The returned value implements `tendril::TendrilSink` +/// so that Unicode input may be provided incrementally, +/// or all at once with the `one` method. +/// +/// If your input is bytes, use `Parser::from_utf8` or `Parser::from_bytes`. +pub fn parse_document(sink: Sink, opts: ParseOpts) -> Parser where Sink: TreeSink { + let tb = TreeBuilder::new(sink, opts.tree_builder); + let tok = Tokenizer::new(tb, opts.tokenizer); + Parser { tokenizer: tok, input_buffer: BufferQueue::new() } +} + +/// Parse an HTML fragment +/// +/// The returned value implements `tendril::TendrilSink` +/// so that Unicode input may be provided incrementally, +/// or all at once with the `one` method. +/// +/// If your input is bytes, use `Parser::from_utf8` or `Parser::from_bytes`. +pub fn parse_fragment(mut sink: Sink, opts: ParseOpts, + context_name: QualName, context_attrs: Vec) + -> Parser + where Sink: TreeSink { + let context_elem = create_element(&mut sink, context_name, context_attrs); + parse_fragment_for_element(sink, opts, context_elem, None) +} + +/// Like `parse_fragment`, but with an existing context element +/// and optionally a form element. +pub fn parse_fragment_for_element(sink: Sink, opts: ParseOpts, + context_element: Sink::Handle, + form_element: Option) + -> Parser + where Sink: TreeSink { + let tb = TreeBuilder::new_for_fragment(sink, context_element, form_element, opts.tree_builder); + let tok_opts = TokenizerOpts { + initial_state: Some(tb.tokenizer_state_for_context_elem()), + .. opts.tokenizer + }; + let tok = Tokenizer::new(tb, tok_opts); + Parser { tokenizer: tok, input_buffer: BufferQueue::new() } +} + +/// An HTML parser, +/// ready to receive Unicode input through the `tendril::TendrilSink` trait’s methods. +pub struct Parser where Sink: TreeSink { + pub tokenizer: Tokenizer>, + pub input_buffer: BufferQueue, +} + +impl TendrilSink for Parser { + fn process(&mut self, t: StrTendril) { + self.input_buffer.push_back(t); + // FIXME: Properly support somehow. + while let TokenizerResult::Script(_) = self.tokenizer.feed(&mut self.input_buffer) {} + } + + // FIXME: Is it too noisy to report every character decoding error? + fn error(&mut self, desc: Cow<'static, str>) { + self.tokenizer.sink.sink.parse_error(desc) + } + + type Output = Sink::Output; + + fn finish(mut self) -> Self::Output { + // FIXME: Properly support somehow. + while let TokenizerResult::Script(_) = self.tokenizer.feed(&mut self.input_buffer) {} + assert!(self.input_buffer.is_empty()); + self.tokenizer.end(); + self.tokenizer.sink.sink.finish() + } +} + +impl Parser { + /// Wrap this parser into a `TendrilSink` that accepts UTF-8 bytes. + /// + /// Use this when your input is bytes that are known to be in the UTF-8 encoding. + /// Decoding is lossy, like `String::from_utf8_lossy`. + pub fn from_utf8(self) -> Utf8LossyDecoder { + Utf8LossyDecoder::new(self) + } +} + +#[cfg(test)] +mod tests { + use rcdom::RcDom; + use serialize::serialize; + use tendril::TendrilSink; + use super::*; + + #[test] + fn from_utf8() { + let dom = parse_document(RcDom::default(), ParseOpts::default()) + .from_utf8() + .one("Test".as_bytes()); + let mut serialized = Vec::new(); + serialize(&mut serialized, &dom.document, Default::default()).unwrap(); + assert_eq!(String::from_utf8(serialized).unwrap().replace(" ", ""), + "<html><head><title>Test"); + } +} diff --git a/src/vendor/html5ever/src/lib.rs b/src/vendor/html5ever/src/lib.rs new file mode 100644 index 0000000000..4b8029d981 --- /dev/null +++ b/src/vendor/html5ever/src/lib.rs @@ -0,0 +1,35 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![crate_name="html5ever"] +#![crate_type="dylib"] + +#![cfg_attr(test, deny(warnings))] +#![allow(unused_parens)] + +#[macro_use] extern crate log; +#[macro_use] extern crate markup5ever; +#[macro_use] extern crate mac; + +pub use markup5ever::*; +pub use driver::{ParseOpts, parse_document, parse_fragment, Parser}; + +pub use serialize::serialize; + +#[macro_use] +mod macros; + +mod util { + pub mod str; +} + +pub mod serialize; +pub mod tokenizer; +pub mod tree_builder; +pub mod driver; diff --git a/src/vendor/html5ever/src/macros.rs b/src/vendor/html5ever/src/macros.rs new file mode 100644 index 0000000000..33dc80d7f0 --- /dev/null +++ b/src/vendor/html5ever/src/macros.rs @@ -0,0 +1,33 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +macro_rules! unwrap_or_else { + ($opt:expr, $else_block:block) => { + match $opt { + None => $else_block, + Some(x) => x, + } + } +} + +macro_rules! unwrap_or_return { + ($opt:expr, $retval:expr) => { + unwrap_or_else!($opt, { return $retval }) + } +} + +macro_rules! time { + ($e:expr) => {{ + let now = ::std::time::Instant::now(); + let result = $e; + let d = now.elapsed(); + let dt = d.as_secs() * 1_000_000_000 + u64::from(d.subsec_nanos()); + (result, dt) + }} +} diff --git a/src/vendor/html5ever/src/serialize/mod.rs b/src/vendor/html5ever/src/serialize/mod.rs new file mode 100644 index 0000000000..1d69f295b5 --- /dev/null +++ b/src/vendor/html5ever/src/serialize/mod.rs @@ -0,0 +1,221 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use markup5ever::serialize::{Serialize, Serializer, TraversalScope, AttrRef}; +use std::io::{self, Write}; +use std::default::Default; + +use {LocalName, QualName}; + +pub fn serialize(writer: Wr, node: &T, opts: SerializeOpts) -> io::Result<()> +where Wr: Write, T: Serialize { + let mut ser = HtmlSerializer::new(writer, opts); + node.serialize(&mut ser, opts.traversal_scope) +} + +#[derive(Copy, Clone)] +pub struct SerializeOpts { + /// Is scripting enabled? + pub scripting_enabled: bool, + + /// Serialize the root node? Default: ChildrenOnly + pub traversal_scope: TraversalScope, +} + +impl Default for SerializeOpts { + fn default() -> SerializeOpts { + SerializeOpts { + scripting_enabled: true, + traversal_scope: TraversalScope::ChildrenOnly, + } + } +} + +struct ElemInfo { + html_name: Option, + ignore_children: bool, + processed_first_child: bool, +} + +struct HtmlSerializer { + writer: Wr, + opts: SerializeOpts, + stack: Vec, +} + +fn tagname(name: &QualName) -> LocalName { + match name.ns { + ns!(html) | ns!(mathml) | ns!(svg) => (), + ref ns => { + // FIXME(#122) + warn!("node with weird namespace {:?}", ns); + } + } + + name.local.clone() +} + +impl HtmlSerializer { + fn new(writer: Wr, opts: SerializeOpts) -> Self { + HtmlSerializer { + writer: writer, + opts: opts, + stack: vec!(ElemInfo { + html_name: None, + ignore_children: false, + processed_first_child: false, + }), + } + } + + fn parent(&mut self) -> &mut ElemInfo { + self.stack.last_mut().expect("no parent ElemInfo") + } + + fn write_escaped(&mut self, text: &str, attr_mode: bool) -> io::Result<()> { + for c in text.chars() { + try!(match c { + '&' => self.writer.write_all(b"&"), + '\u{00A0}' => self.writer.write_all(b" "), + '"' if attr_mode => self.writer.write_all(b"""), + '<' if !attr_mode => self.writer.write_all(b"<"), + '>' if !attr_mode => self.writer.write_all(b">"), + c => self.writer.write_fmt(format_args!("{}", c)), + }); + } + Ok(()) + } +} + +impl Serializer for HtmlSerializer { + fn start_elem<'a, AttrIter>(&mut self, name: QualName, attrs: AttrIter) -> io::Result<()> + where AttrIter: Iterator> { + let html_name = match name.ns { + ns!(html) => Some(name.local.clone()), + _ => None, + }; + + if self.parent().ignore_children { + self.stack.push(ElemInfo { + html_name: html_name, + ignore_children: true, + processed_first_child: false, + }); + return Ok(()); + } + + try!(self.writer.write_all(b"<")); + try!(self.writer.write_all(tagname(&name).as_bytes())); + for (name, value) in attrs { + try!(self.writer.write_all(b" ")); + + match name.ns { + ns!() => (), + ns!(xml) => try!(self.writer.write_all(b"xml:")), + ns!(xmlns) => { + if name.local != local_name!("xmlns") { + try!(self.writer.write_all(b"xmlns:")); + } + } + ns!(xlink) => try!(self.writer.write_all(b"xlink:")), + ref ns => { + // FIXME(#122) + warn!("attr with weird namespace {:?}", ns); + try!(self.writer.write_all(b"unknown_namespace:")); + } + } + + try!(self.writer.write_all(name.local.as_bytes())); + try!(self.writer.write_all(b"=\"")); + try!(self.write_escaped(value, true)); + try!(self.writer.write_all(b"\"")); + } + try!(self.writer.write_all(b">")); + + let ignore_children = name.ns == ns!(html) && match name.local { + local_name!("area") | local_name!("base") | local_name!("basefont") | local_name!("bgsound") | local_name!("br") + | local_name!("col") | local_name!("embed") | local_name!("frame") | local_name!("hr") | local_name!("img") + | local_name!("input") | local_name!("keygen") | local_name!("link") + | local_name!("meta") | local_name!("param") | local_name!("source") | local_name!("track") | local_name!("wbr") + => true, + _ => false, + }; + + self.parent().processed_first_child = true; + + self.stack.push(ElemInfo { + html_name: html_name, + ignore_children: ignore_children, + processed_first_child: false, + }); + + Ok(()) + } + + fn end_elem(&mut self, name: QualName) -> io::Result<()> { + let info = self.stack.pop().expect("no ElemInfo"); + if info.ignore_children { + return Ok(()); + } + + try!(self.writer.write_all(b"") + } + + fn write_text(&mut self, text: &str) -> io::Result<()> { + let prepend_lf = text.starts_with("\n") && { + let parent = self.parent(); + !parent.processed_first_child && match parent.html_name { + Some(local_name!("pre")) | Some(local_name!("textarea")) | Some(local_name!("listing")) => true, + _ => false, + } + }; + + if prepend_lf { + try!(self.writer.write_all(b"\n")); + } + + let escape = match self.parent().html_name { + Some(local_name!("style")) | Some(local_name!("script")) | Some(local_name!("xmp")) + | Some(local_name!("iframe")) | Some(local_name!("noembed")) | Some(local_name!("noframes")) + | Some(local_name!("plaintext")) => false, + + Some(local_name!("noscript")) => !self.opts.scripting_enabled, + + _ => true, + }; + + if escape { + self.write_escaped(text, false) + } else { + self.writer.write_all(text.as_bytes()) + } + } + + fn write_comment(&mut self, text: &str) -> io::Result<()> { + try!(self.writer.write_all(b"") + } + + fn write_doctype(&mut self, name: &str) -> io::Result<()> { + try!(self.writer.write_all(b"") + } + + fn write_processing_instruction(&mut self, target: &str, data: &str) -> io::Result<()> { + try!(self.writer.write_all(b"") + } +} diff --git a/src/vendor/html5ever/src/tokenizer/char_ref/mod.rs b/src/vendor/html5ever/src/tokenizer/char_ref/mod.rs new file mode 100644 index 0000000000..be7fbe3a17 --- /dev/null +++ b/src/vendor/html5ever/src/tokenizer/char_ref/mod.rs @@ -0,0 +1,436 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{Tokenizer, TokenSink}; +use buffer_queue::BufferQueue; +use data; +use tendril::StrTendril; +use util::str::{is_ascii_alnum}; + +use std::char::from_u32; +use std::borrow::Cow::Borrowed; + +pub use self::Status::*; +use self::State::*; + +//§ tokenizing-character-references +pub struct CharRef { + /// The resulting character(s) + pub chars: [char; 2], + + /// How many slots in `chars` are valid? + pub num_chars: u8, +} + +pub enum Status { + Stuck, + Progress, + Done, +} + +#[derive(Debug)] +enum State { + Begin, + Octothorpe, + Numeric(u32), // base + NumericSemicolon, + Named, + BogusName, +} + +pub struct CharRefTokenizer { + state: State, + addnl_allowed: Option, + result: Option, + + num: u32, + num_too_big: bool, + seen_digit: bool, + hex_marker: Option, + + name_buf_opt: Option, + name_match: Option<(u32, u32)>, + name_len: usize, +} + +impl CharRefTokenizer { + // NB: We assume that we have an additional allowed character iff we're + // tokenizing in an attribute value. + pub fn new(addnl_allowed: Option) -> CharRefTokenizer { + CharRefTokenizer { + state: Begin, + addnl_allowed: addnl_allowed, + result: None, + num: 0, + num_too_big: false, + seen_digit: false, + hex_marker: None, + name_buf_opt: None, + name_match: None, + name_len: 0, + } + } + + // A CharRefTokenizer can only tokenize one character reference, + // so this method consumes the tokenizer. + pub fn get_result(self) -> CharRef { + self.result.expect("get_result called before done") + } + + fn name_buf<'t>(&'t self) -> &'t StrTendril { + self.name_buf_opt.as_ref() + .expect("name_buf missing in named character reference") + } + + fn name_buf_mut<'t>(&'t mut self) -> &'t mut StrTendril { + self.name_buf_opt.as_mut() + .expect("name_buf missing in named character reference") + } + + fn finish_none(&mut self) -> Status { + self.result = Some(CharRef { + chars: ['\0', '\0'], + num_chars: 0, + }); + Done + } + + fn finish_one(&mut self, c: char) -> Status { + self.result = Some(CharRef { + chars: [c, '\0'], + num_chars: 1, + }); + Done + } +} + +impl CharRefTokenizer { + pub fn step( + &mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue) + -> Status { + if self.result.is_some() { + return Done; + } + + debug!("char ref tokenizer stepping in state {:?}", self.state); + match self.state { + Begin => self.do_begin(tokenizer, input), + Octothorpe => self.do_octothorpe(tokenizer, input), + Numeric(base) => self.do_numeric(tokenizer, input, base), + NumericSemicolon => self.do_numeric_semicolon(tokenizer, input), + Named => self.do_named(tokenizer, input), + BogusName => self.do_bogus_name(tokenizer, input), + } + } + + fn do_begin( + &mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue) + -> Status { + match unwrap_or_return!(tokenizer.peek(input), Stuck) { + '\t' | '\n' | '\x0C' | ' ' | '<' | '&' + => self.finish_none(), + c if Some(c) == self.addnl_allowed + => self.finish_none(), + + '#' => { + tokenizer.discard_char(input); + self.state = Octothorpe; + Progress + } + + _ => { + self.state = Named; + self.name_buf_opt = Some(StrTendril::new()); + Progress + } + } + } + + fn do_octothorpe( + &mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue) + -> Status { + let c = unwrap_or_return!(tokenizer.peek(input), Stuck); + match c { + 'x' | 'X' => { + tokenizer.discard_char(input); + self.hex_marker = Some(c); + self.state = Numeric(16); + } + + _ => { + self.hex_marker = None; + self.state = Numeric(10); + } + } + Progress + } + + fn do_numeric( + &mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue, + base: u32) + -> Status { + let c = unwrap_or_return!(tokenizer.peek(input), Stuck); + match c.to_digit(base) { + Some(n) => { + tokenizer.discard_char(input); + self.num = self.num.wrapping_mul(base); + if self.num > 0x10FFFF { + // We might overflow, and the character is definitely invalid. + // We still parse digits and semicolon, but don't use the result. + self.num_too_big = true; + } + self.num = self.num.wrapping_add(n); + self.seen_digit = true; + Progress + } + + None if !self.seen_digit => self.unconsume_numeric(tokenizer, input), + + None => { + self.state = NumericSemicolon; + Progress + } + } + } + + fn do_numeric_semicolon( + &mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue) + -> Status { + match unwrap_or_return!(tokenizer.peek(input), Stuck) { + ';' => tokenizer.discard_char(input), + _ => tokenizer.emit_error(Borrowed("Semicolon missing after numeric character reference")), + }; + self.finish_numeric(tokenizer) + } + + fn unconsume_numeric( + &mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue) + -> Status { + let mut unconsume = StrTendril::from_char('#'); + match self.hex_marker { + Some(c) => unconsume.push_char(c), + None => (), + } + + input.push_front(unconsume); + tokenizer.emit_error(Borrowed("Numeric character reference without digits")); + self.finish_none() + } + + fn finish_numeric(&mut self, tokenizer: &mut Tokenizer) -> Status { + fn conv(n: u32) -> char { + from_u32(n).expect("invalid char missed by error handling cases") + } + + let (c, error) = match self.num { + n if (n > 0x10FFFF) || self.num_too_big => ('\u{fffd}', true), + 0x00 | 0xD800...0xDFFF => ('\u{fffd}', true), + + 0x80...0x9F => match data::C1_REPLACEMENTS[(self.num - 0x80) as usize] { + Some(c) => (c, true), + None => (conv(self.num), true), + }, + + 0x01...0x08 | 0x0B | 0x0D...0x1F | 0x7F | 0xFDD0...0xFDEF + => (conv(self.num), true), + + n if (n & 0xFFFE) == 0xFFFE + => (conv(n), true), + + n => (conv(n), false), + }; + + if error { + let msg = format_if!(tokenizer.opts.exact_errors, + "Invalid numeric character reference", + "Invalid numeric character reference value 0x{:06X}", self.num); + tokenizer.emit_error(msg); + } + + self.finish_one(c) + } + + fn do_named( + &mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue) + -> Status { + let c = unwrap_or_return!(tokenizer.get_char(input), Stuck); + self.name_buf_mut().push_char(c); + match data::NAMED_ENTITIES.get(&self.name_buf()[..]) { + // We have either a full match or a prefix of one. + Some(&m) => { + if m.0 != 0 { + // We have a full match, but there might be a longer one to come. + self.name_match = Some(m); + self.name_len = self.name_buf().len(); + } + // Otherwise we just have a prefix match. + Progress + } + + // Can't continue the match. + None => self.finish_named(tokenizer, input, Some(c)), + } + } + + fn emit_name_error(&mut self, tokenizer: &mut Tokenizer) { + let msg = format_if!(tokenizer.opts.exact_errors, + "Invalid character reference", + "Invalid character reference &{}", self.name_buf()); + tokenizer.emit_error(msg); + } + + fn unconsume_name(&mut self, input: &mut BufferQueue) { + input.push_front(self.name_buf_opt.take().unwrap()); + } + + fn finish_named(&mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue, + end_char: Option) -> Status { + match self.name_match { + None => { + match end_char { + Some(c) if is_ascii_alnum(c) => { + // Keep looking for a semicolon, to determine whether + // we emit a parse error. + self.state = BogusName; + return Progress; + } + + // Check length because &; is not a parse error. + Some(';') if self.name_buf().len() > 1 + => self.emit_name_error(tokenizer), + + _ => (), + } + self.unconsume_name(input); + self.finish_none() + } + + Some((c1, c2)) => { + // We have a complete match, but we may have consumed + // additional characters into self.name_buf. Usually + // at least one, but several in cases like + // + // ¬ => match for U+00AC + // ¬i => valid prefix for ¬in + // ¬it => can't continue match + + let name_len = self.name_len; + assert!(name_len > 0); + let last_matched = self.name_buf()[name_len-1..].chars().next().unwrap(); + + // There might not be a next character after the match, if + // we had a full match and then hit EOF. + let next_after = if name_len == self.name_buf().len() { + None + } else { + Some(self.name_buf()[name_len..].chars().next().unwrap()) + }; + + // "If the character reference is being consumed as part of an + // attribute, and the last character matched is not a U+003B + // SEMICOLON character (;), and the next character is either a + // U+003D EQUALS SIGN character (=) or an alphanumeric ASCII + // character, then, for historical reasons, all the characters + // that were matched after the U+0026 AMPERSAND character (&) + // must be unconsumed, and nothing is returned. However, if + // this next character is in fact a U+003D EQUALS SIGN + // character (=), then this is a parse error" + + let unconsume_all = match (self.addnl_allowed, last_matched, next_after) { + (_, ';', _) => false, + (Some(_), _, Some('=')) => { + tokenizer.emit_error(Borrowed("Equals sign after character reference in attribute")); + true + } + (Some(_), _, Some(c)) if is_ascii_alnum(c) => true, + _ => { + tokenizer.emit_error(Borrowed("Character reference does not end with semicolon")); + false + } + }; + + if unconsume_all { + self.unconsume_name(input); + self.finish_none() + } else { + input.push_front(StrTendril::from_slice(&self.name_buf()[name_len..])); + self.result = Some(CharRef { + chars: [from_u32(c1).unwrap(), from_u32(c2).unwrap()], + num_chars: if c2 == 0 { 1 } else { 2 }, + }); + Done + } + } + } + } + + fn do_bogus_name( + &mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue) + -> Status { + let c = unwrap_or_return!(tokenizer.get_char(input), Stuck); + self.name_buf_mut().push_char(c); + match c { + _ if is_ascii_alnum(c) => return Progress, + ';' => self.emit_name_error(tokenizer), + _ => () + } + self.unconsume_name(input); + self.finish_none() + } + + pub fn end_of_file( + &mut self, + tokenizer: &mut Tokenizer, + input: &mut BufferQueue) { + while self.result.is_none() { + match self.state { + Begin => drop(self.finish_none()), + + Numeric(_) if !self.seen_digit + => drop(self.unconsume_numeric(tokenizer, input)), + + Numeric(_) | NumericSemicolon => { + tokenizer.emit_error(Borrowed("EOF in numeric character reference")); + self.finish_numeric(tokenizer); + } + + Named => drop(self.finish_named(tokenizer, input, None)), + + BogusName => { + self.unconsume_name(input); + self.finish_none(); + } + + Octothorpe => { + input.push_front(StrTendril::from_slice("#")); + tokenizer.emit_error(Borrowed("EOF after '#' in character reference")); + self.finish_none(); + } + } + } + } +} diff --git a/src/vendor/html5ever/src/tokenizer/interface.rs b/src/vendor/html5ever/src/tokenizer/interface.rs new file mode 100644 index 0000000000..041701b533 --- /dev/null +++ b/src/vendor/html5ever/src/tokenizer/interface.rs @@ -0,0 +1,110 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use LocalName; +use interface::Attribute; +use std::borrow::Cow; +use tendril::StrTendril; +use tokenizer::states; + +pub use self::TagKind::{StartTag, EndTag}; +pub use self::Token::{DoctypeToken, TagToken, CommentToken, CharacterTokens}; +pub use self::Token::{NullCharacterToken, EOFToken, ParseError}; + +/// A `DOCTYPE` token. +// FIXME: already exists in Servo DOM +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct Doctype { + pub name: Option, + pub public_id: Option, + pub system_id: Option, + pub force_quirks: bool, +} + +impl Doctype { + pub fn new() -> Doctype { + Doctype { + name: None, + public_id: None, + system_id: None, + force_quirks: false, + } + } +} + +#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)] +pub enum TagKind { + StartTag, + EndTag, +} + +/// A tag token. +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct Tag { + pub kind: TagKind, + pub name: LocalName, + pub self_closing: bool, + pub attrs: Vec, +} + +impl Tag { + /// Are the tags equivalent when we don't care about attribute order? + /// Also ignores the self-closing flag. + pub fn equiv_modulo_attr_order(&self, other: &Tag) -> bool { + if (self.kind != other.kind) || (self.name != other.name) { + return false; + } + + let mut self_attrs = self.attrs.clone(); + let mut other_attrs = other.attrs.clone(); + self_attrs.sort(); + other_attrs.sort(); + + self_attrs == other_attrs + } +} + +#[derive(PartialEq, Eq, Debug)] +pub enum Token { + DoctypeToken(Doctype), + TagToken(Tag), + CommentToken(StrTendril), + CharacterTokens(StrTendril), + NullCharacterToken, + EOFToken, + ParseError(Cow<'static, str>), +} + +#[derive(Debug, PartialEq)] +#[must_use] +pub enum TokenSinkResult { + Continue, + Script(Handle), + Plaintext, + RawData(states::RawKind) +} + +/// Types which can receive tokens from the tokenizer. +pub trait TokenSink { + type Handle; + + /// Process a token. + fn process_token(&mut self, token: Token, line_number: u64) -> TokenSinkResult; + + // Signal sink that tokenization reached the end. + fn end(&mut self) {} + + /// Used in the markup declaration open state. By default, this always + /// returns false and thus all CDATA sections are tokenized as bogus + /// comments. + /// https://html.spec.whatwg.org/multipage/#markup-declaration-open-state + fn adjusted_current_node_present_but_not_in_html_namespace(&self) -> bool { + false + } +} diff --git a/src/vendor/html5ever/src/tokenizer/mod.rs b/src/vendor/html5ever/src/tokenizer/mod.rs new file mode 100644 index 0000000000..45a3e4a521 --- /dev/null +++ b/src/vendor/html5ever/src/tokenizer/mod.rs @@ -0,0 +1,1565 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The HTML5 tokenizer. + +pub use self::interface::{Doctype, TagKind, StartTag, EndTag, Tag}; +pub use self::interface::{Token, DoctypeToken, TagToken, CommentToken}; +pub use self::interface::{CharacterTokens, NullCharacterToken, EOFToken, ParseError}; +pub use self::interface::{TokenSink, TokenSinkResult}; + +use self::states::{Rcdata, Rawtext, ScriptData, ScriptDataEscaped}; +use self::states::{Escaped, DoubleEscaped}; +use self::states::{Unquoted, SingleQuoted, DoubleQuoted}; +use self::states::{DoctypeIdKind, Public, System}; + +use self::char_ref::{CharRef, CharRefTokenizer}; + +use util::str::lower_ascii_letter; + +use std::ascii::AsciiExt; +use std::mem::replace; +use std::default::Default; +use std::borrow::Cow::{self, Borrowed}; +use std::collections::BTreeMap; + +use {LocalName, QualName, Attribute, SmallCharSet}; +use tendril::StrTendril; +pub use buffer_queue::{BufferQueue, SetResult, FromSet, NotFromSet}; + +pub mod states; +mod interface; +mod char_ref; + +pub enum ProcessResult { + Continue, + Suspend, + Script(Handle) +} + +#[must_use] +pub enum TokenizerResult { + Done, + Script(Handle) +} + +fn option_push(opt_str: &mut Option, c: char) { + match *opt_str { + Some(ref mut s) => s.push_char(c), + None => *opt_str = Some(StrTendril::from_char(c)), + } +} + +/// Tokenizer options, with an impl for `Default`. +#[derive(Clone)] +pub struct TokenizerOpts { + /// Report all parse errors described in the spec, at some + /// performance penalty? Default: false + pub exact_errors: bool, + + /// Discard a `U+FEFF BYTE ORDER MARK` if we see one at the beginning + /// of the stream? Default: true + pub discard_bom: bool, + + /// Keep a record of how long we spent in each state? Printed + /// when `end()` is called. Default: false + pub profile: bool, + + /// Initial state override. Only the test runner should use + /// a non-`None` value! + pub initial_state: Option, + + /// Last start tag. Only the test runner should use a + /// non-`None` value! + /// + /// FIXME: Can't use Tendril because we want TokenizerOpts + /// to be Send. + pub last_start_tag_name: Option, +} + +impl Default for TokenizerOpts { + fn default() -> TokenizerOpts { + TokenizerOpts { + exact_errors: false, + discard_bom: true, + profile: false, + initial_state: None, + last_start_tag_name: None, + } + } +} + +/// The HTML tokenizer. +pub struct Tokenizer { + /// Options controlling the behavior of the tokenizer. + opts: TokenizerOpts, + + /// Destination for tokens we emit. + pub sink: Sink, + + /// The abstract machine state as described in the spec. + state: states::State, + + /// Are we at the end of the file, once buffers have been processed + /// completely? This affects whether we will wait for lookahead or not. + at_eof: bool, + + /// Tokenizer for character references, if we're tokenizing + /// one at the moment. + char_ref_tokenizer: Option>, + + /// Current input character. Just consumed, may reconsume. + current_char: char, + + /// Should we reconsume the current input character? + reconsume: bool, + + /// Did we just consume \r, translating it to \n? In that case we need + /// to ignore the next character if it's \n. + ignore_lf: bool, + + /// Discard a U+FEFF BYTE ORDER MARK if we see one? Only done at the + /// beginning of the stream. + discard_bom: bool, + + /// Current tag kind. + current_tag_kind: TagKind, + + /// Current tag name. + current_tag_name: StrTendril, + + /// Current tag is self-closing? + current_tag_self_closing: bool, + + /// Current tag attributes. + current_tag_attrs: Vec, + + /// Current attribute name. + current_attr_name: StrTendril, + + /// Current attribute value. + current_attr_value: StrTendril, + + /// Current comment. + current_comment: StrTendril, + + /// Current doctype token. + current_doctype: Doctype, + + /// Last start tag name, for use in checking "appropriate end tag". + last_start_tag_name: Option, + + /// The "temporary buffer" mentioned in the spec. + temp_buf: StrTendril, + + /// Record of how many ns we spent in each state, if profiling is enabled. + state_profile: BTreeMap, + + /// Record of how many ns we spent in the token sink. + time_in_sink: u64, + + /// Track current line + current_line: u64, +} + +impl Tokenizer { + /// Create a new tokenizer which feeds tokens to a particular `TokenSink`. + pub fn new(sink: Sink, mut opts: TokenizerOpts) -> Tokenizer { + let start_tag_name = opts.last_start_tag_name.take() + .map(|s| LocalName::from(&*s)); + let state = opts.initial_state.unwrap_or(states::Data); + let discard_bom = opts.discard_bom; + Tokenizer { + opts: opts, + sink: sink, + state: state, + char_ref_tokenizer: None, + at_eof: false, + current_char: '\0', + reconsume: false, + ignore_lf: false, + discard_bom: discard_bom, + current_tag_kind: StartTag, + current_tag_name: StrTendril::new(), + current_tag_self_closing: false, + current_tag_attrs: vec!(), + current_attr_name: StrTendril::new(), + current_attr_value: StrTendril::new(), + current_comment: StrTendril::new(), + current_doctype: Doctype::new(), + last_start_tag_name: start_tag_name, + temp_buf: StrTendril::new(), + state_profile: BTreeMap::new(), + time_in_sink: 0, + current_line: 1, + } + } + + /// Feed an input string into the tokenizer. + pub fn feed(&mut self, input: &mut BufferQueue) -> TokenizerResult { + if input.is_empty() { + return TokenizerResult::Done; + } + + if self.discard_bom { + if let Some(c) = input.peek() { + if c == '\u{feff}' { + input.next(); + } + } else { + return TokenizerResult::Done; + } + }; + + self.run(input) + } + + pub fn set_plaintext_state(&mut self) { + self.state = states::Plaintext; + } + + fn process_token(&mut self, token: Token) -> TokenSinkResult { + if self.opts.profile { + let (ret, dt) = time!(self.sink.process_token(token, self.current_line)); + self.time_in_sink += dt; + ret + } else { + self.sink.process_token(token, self.current_line) + } + } + + fn process_token_and_continue(&mut self, token: Token) { + assert!(matches!(self.process_token(token), TokenSinkResult::Continue)); + } + + //§ preprocessing-the-input-stream + // Get the next input character, which might be the character + // 'c' that we already consumed from the buffers. + fn get_preprocessed_char( + &mut self, + mut c: char, + input: &mut BufferQueue) + -> Option { + if self.ignore_lf { + self.ignore_lf = false; + if c == '\n' { + c = unwrap_or_return!(input.next(), None); + } + } + + if c == '\r' { + self.ignore_lf = true; + c = '\n'; + } + + if c == '\n' { + self.current_line += 1; + } + + if self.opts.exact_errors && match c as u32 { + 0x01...0x08 | 0x0B | 0x0E...0x1F | 0x7F...0x9F | 0xFDD0...0xFDEF => true, + n if (n & 0xFFFE) == 0xFFFE => true, + _ => false, + } { + let msg = format!("Bad character {}", c); + self.emit_error(Cow::Owned(msg)); + } + + debug!("got character {}", c); + self.current_char = c; + Some(c) + } + + //§ tokenization + // Get the next input character, if one is available. + fn get_char(&mut self, input: &mut BufferQueue) -> Option { + if self.reconsume { + self.reconsume = false; + Some(self.current_char) + } else { + input.next().and_then(|c| self.get_preprocessed_char(c, input)) + } + } + + fn pop_except_from(&mut self, input: &mut BufferQueue, set: SmallCharSet) -> Option { + // Bail to the slow path for various corner cases. + // This means that `FromSet` can contain characters not in the set! + // It shouldn't matter because the fallback `FromSet` case should + // always do the same thing as the `NotFromSet` case. + if self.opts.exact_errors || self.reconsume || self.ignore_lf { + return self.get_char(input).map(|x| FromSet(x)); + } + + let d = input.pop_except_from(set); + debug!("got characters {:?}", d); + match d { + Some(FromSet(c)) => self.get_preprocessed_char(c, input).map(|x| FromSet(x)), + + // NB: We don't set self.current_char for a run of characters not + // in the set. It shouldn't matter for the codepaths that use + // this. + _ => d + } + } + + // Check if the next characters are an ASCII case-insensitive match. See + // BufferQueue::eat. + // + // NB: this doesn't do input stream preprocessing or set the current input + // character. + fn eat( + &mut self, + input: &mut BufferQueue, + pat: &str, + eq: fn(&u8, &u8) -> bool) + -> Option { + input.push_front(replace(&mut self.temp_buf, StrTendril::new())); + match input.eat(pat, eq) { + None if self.at_eof => Some(false), + None => { + while let Some(c) = input.next() { + self.temp_buf.push_char(c); + } + None + }, + Some(matched) => Some(matched), + } + } + + /// Run the state machine for as long as we can. + fn run(&mut self, input: &mut BufferQueue) -> TokenizerResult { + if self.opts.profile { + loop { + let state = self.state; + let old_sink = self.time_in_sink; + let (run, mut dt) = time!(self.step(input)); + dt -= (self.time_in_sink - old_sink); + let new = match self.state_profile.get_mut(&state) { + Some(x) => { + *x += dt; + false + } + None => true, + }; + if new { + // do this here because of borrow shenanigans + self.state_profile.insert(state, dt); + } + match run { + ProcessResult::Continue => (), + ProcessResult::Suspend => break, + ProcessResult::Script(node) => return TokenizerResult::Script(node), + } + } + } else { + loop { + match self.step(input) { + ProcessResult::Continue => (), + ProcessResult::Suspend => break, + ProcessResult::Script(node) => return TokenizerResult::Script(node), + } + } + } + TokenizerResult::Done + } + + fn bad_char_error(&mut self) { + let msg = format_if!( + self.opts.exact_errors, + "Bad character", + "Saw {} in state {:?}", self.current_char, self.state); + self.emit_error(msg); + } + + fn bad_eof_error(&mut self) { + let msg = format_if!( + self.opts.exact_errors, + "Unexpected EOF", + "Saw EOF in state {:?}", self.state); + self.emit_error(msg); + } + + fn emit_char(&mut self, c: char) { + self.process_token_and_continue(match c { + '\0' => NullCharacterToken, + _ => CharacterTokens(StrTendril::from_char(c)), + }); + } + + // The string must not contain '\0'! + fn emit_chars(&mut self, b: StrTendril) { + self.process_token_and_continue(CharacterTokens(b)); + } + + fn emit_current_tag(&mut self) -> ProcessResult { + self.finish_attribute(); + + let name = LocalName::from(&*self.current_tag_name); + self.current_tag_name.clear(); + + match self.current_tag_kind { + StartTag => { + self.last_start_tag_name = Some(name.clone()); + } + EndTag => { + if !self.current_tag_attrs.is_empty() { + self.emit_error(Borrowed("Attributes on an end tag")); + } + if self.current_tag_self_closing { + self.emit_error(Borrowed("Self-closing end tag")); + } + } + } + + let token = TagToken(Tag { kind: self.current_tag_kind, + name: name, + self_closing: self.current_tag_self_closing, + attrs: replace(&mut self.current_tag_attrs, vec!()), + }); + + match self.process_token(token) { + TokenSinkResult::Continue => ProcessResult::Continue, + TokenSinkResult::Plaintext => { + self.state = states::Plaintext; + ProcessResult::Continue + }, + TokenSinkResult::Script(node) => { + self.state = states::Data; + ProcessResult::Script(node) + }, + TokenSinkResult::RawData(kind) => { + self.state = states::RawData(kind); + ProcessResult::Continue + } + } + } + + fn emit_temp_buf(&mut self) { + // FIXME: Make sure that clearing on emit is spec-compatible. + let buf = replace(&mut self.temp_buf, StrTendril::new()); + self.emit_chars(buf); + } + + fn clear_temp_buf(&mut self) { + // Do this without a new allocation. + self.temp_buf.clear(); + } + + fn emit_current_comment(&mut self) { + let comment = replace(&mut self.current_comment, StrTendril::new()); + self.process_token_and_continue(CommentToken(comment)); + } + + fn discard_tag(&mut self) { + self.current_tag_name.clear(); + self.current_tag_self_closing = false; + self.current_tag_attrs = vec!(); + } + + fn create_tag(&mut self, kind: TagKind, c: char) { + self.discard_tag(); + self.current_tag_name.push_char(c); + self.current_tag_kind = kind; + } + + fn have_appropriate_end_tag(&self) -> bool { + match self.last_start_tag_name.as_ref() { + Some(last) => + (self.current_tag_kind == EndTag) + && (*self.current_tag_name == **last), + None => false, + } + } + + fn create_attribute(&mut self, c: char) { + self.finish_attribute(); + + self.current_attr_name.push_char(c); + } + + fn finish_attribute(&mut self) { + if self.current_attr_name.len() == 0 { + return; + } + + // Check for a duplicate attribute. + // FIXME: the spec says we should error as soon as the name is finished. + // FIXME: linear time search, do we care? + let dup = { + let name = &*self.current_attr_name; + self.current_tag_attrs.iter().any(|a| &*a.name.local == name) + }; + + if dup { + self.emit_error(Borrowed("Duplicate attribute")); + self.current_attr_name.clear(); + self.current_attr_value.clear(); + } else { + let name = LocalName::from(&*self.current_attr_name); + self.current_attr_name.clear(); + self.current_tag_attrs.push(Attribute { + // The tree builder will adjust the namespace if necessary. + // This only happens in foreign elements. + name: QualName::new(None, ns!(), name), + value: replace(&mut self.current_attr_value, StrTendril::new()), + }); + } + } + + fn emit_current_doctype(&mut self) { + let doctype = replace(&mut self.current_doctype, Doctype::new()); + self.process_token_and_continue(DoctypeToken(doctype)); + } + + fn doctype_id<'a>(&'a mut self, kind: DoctypeIdKind) -> &'a mut Option { + match kind { + Public => &mut self.current_doctype.public_id, + System => &mut self.current_doctype.system_id, + } + } + + fn clear_doctype_id(&mut self, kind: DoctypeIdKind) { + let id = self.doctype_id(kind); + match *id { + Some(ref mut s) => s.clear(), + None => *id = Some(StrTendril::new()), + } + } + + fn consume_char_ref(&mut self, addnl_allowed: Option) { + // NB: The char ref tokenizer assumes we have an additional allowed + // character iff we're tokenizing in an attribute value. + self.char_ref_tokenizer = Some(Box::new(CharRefTokenizer::new(addnl_allowed))); + } + + fn emit_eof(&mut self) { + self.process_token_and_continue(EOFToken); + } + + fn peek(&mut self, input: &BufferQueue) -> Option { + if self.reconsume { + Some(self.current_char) + } else { + input.peek() + } + } + + fn discard_char(&mut self, input: &mut BufferQueue) { + let c = self.get_char(input); + assert!(c.is_some()); + } + + fn emit_error(&mut self, error: Cow<'static, str>) { + self.process_token_and_continue(ParseError(error)); + } +} +//§ END + +// Shorthand for common state machine behaviors. +macro_rules! shorthand ( + ( $me:ident : emit $c:expr ) => ( $me.emit_char($c); ); + ( $me:ident : create_tag $kind:ident $c:expr ) => ( $me.create_tag($kind, $c); ); + ( $me:ident : push_tag $c:expr ) => ( $me.current_tag_name.push_char($c); ); + ( $me:ident : discard_tag ) => ( $me.discard_tag(); ); + ( $me:ident : discard_char $input:expr ) => ( $me.discard_char($input); ); + ( $me:ident : push_temp $c:expr ) => ( $me.temp_buf.push_char($c); ); + ( $me:ident : emit_temp ) => ( $me.emit_temp_buf(); ); + ( $me:ident : clear_temp ) => ( $me.clear_temp_buf(); ); + ( $me:ident : create_attr $c:expr ) => ( $me.create_attribute($c); ); + ( $me:ident : push_name $c:expr ) => ( $me.current_attr_name.push_char($c); ); + ( $me:ident : push_value $c:expr ) => ( $me.current_attr_value.push_char($c); ); + ( $me:ident : append_value $c:expr ) => ( $me.current_attr_value.push_tendril($c); ); + ( $me:ident : push_comment $c:expr ) => ( $me.current_comment.push_char($c); ); + ( $me:ident : append_comment $c:expr ) => ( $me.current_comment.push_slice($c); ); + ( $me:ident : emit_comment ) => ( $me.emit_current_comment(); ); + ( $me:ident : clear_comment ) => ( $me.current_comment.clear(); ); + ( $me:ident : create_doctype ) => ( $me.current_doctype = Doctype::new(); ); + ( $me:ident : push_doctype_name $c:expr ) => ( option_push(&mut $me.current_doctype.name, $c); ); + ( $me:ident : push_doctype_id $k:ident $c:expr ) => ( option_push($me.doctype_id($k), $c); ); + ( $me:ident : clear_doctype_id $k:ident ) => ( $me.clear_doctype_id($k); ); + ( $me:ident : force_quirks ) => ( $me.current_doctype.force_quirks = true; ); + ( $me:ident : emit_doctype ) => ( $me.emit_current_doctype(); ); + ( $me:ident : error ) => ( $me.bad_char_error(); ); + ( $me:ident : error_eof ) => ( $me.bad_eof_error(); ); +); + +// Tracing of tokenizer actions. This adds significant bloat and compile time, +// so it's behind a cfg flag. +#[cfg(trace_tokenizer)] +macro_rules! sh_trace ( ( $me:ident : $($cmds:tt)* ) => ({ + debug!(" {:s}", stringify!($($cmds)*)); + shorthand!($me:expr : $($cmds)*); +})); + +#[cfg(not(trace_tokenizer))] +macro_rules! sh_trace ( ( $me:ident : $($cmds:tt)* ) => ( shorthand!($me: $($cmds)*) ) ); + +// A little DSL for sequencing shorthand actions. +macro_rules! go ( + // A pattern like $($cmd:tt)* ; $($rest:tt)* causes parse ambiguity. + // We have to tell the parser how much lookahead we need. + + ( $me:ident : $a:tt ; $($rest:tt)* ) => ({ sh_trace!($me: $a); go!($me: $($rest)*); }); + ( $me:ident : $a:tt $b:tt ; $($rest:tt)* ) => ({ sh_trace!($me: $a $b); go!($me: $($rest)*); }); + ( $me:ident : $a:tt $b:tt $c:tt ; $($rest:tt)* ) => ({ sh_trace!($me: $a $b $c); go!($me: $($rest)*); }); + ( $me:ident : $a:tt $b:tt $c:tt $d:tt ; $($rest:tt)* ) => ({ sh_trace!($me: $a $b $c $d); go!($me: $($rest)*); }); + + // These can only come at the end. + + ( $me:ident : to $s:ident ) => ({ $me.state = states::$s; return ProcessResult::Continue; }); + ( $me:ident : to $s:ident $k1:expr ) => ({ $me.state = states::$s($k1); return ProcessResult::Continue; }); + ( $me:ident : to $s:ident $k1:ident $k2:expr ) => ({ $me.state = states::$s($k1($k2)); return ProcessResult::Continue; }); + + ( $me:ident : reconsume $s:ident ) => ({ $me.reconsume = true; go!($me: to $s); }); + ( $me:ident : reconsume $s:ident $k1:expr ) => ({ $me.reconsume = true; go!($me: to $s $k1); }); + ( $me:ident : reconsume $s:ident $k1:ident $k2:expr ) => ({ $me.reconsume = true; go!($me: to $s $k1 $k2); }); + + ( $me:ident : consume_char_ref ) => ({ $me.consume_char_ref(None); return ProcessResult::Continue; }); + ( $me:ident : consume_char_ref $addnl:expr ) => ({ $me.consume_char_ref(Some($addnl)); return ProcessResult::Continue; }); + + // We have a default next state after emitting a tag, but the sink can override. + ( $me:ident : emit_tag $s:ident ) => ({ + $me.state = states::$s; + return $me.emit_current_tag(); + }); + + ( $me:ident : eof ) => ({ $me.emit_eof(); return ProcessResult::Suspend; }); + + // If nothing else matched, it's a single command + ( $me:ident : $($cmd:tt)+ ) => ( sh_trace!($me: $($cmd)+); ); + + // or nothing. + ( $me:ident : ) => (()); +); + +macro_rules! go_match ( ( $me:ident : $x:expr, $($pats:pat),+ => $($cmds:tt)* ) => ( + match $x { + $($pats)|+ => go!($me: $($cmds)*), + _ => (), + } +)); + +// This is a macro because it can cause early return +// from the function where it is used. +macro_rules! get_char ( ($me:expr, $input:expr) => ( + unwrap_or_return!($me.get_char($input), ProcessResult::Suspend) +)); + +macro_rules! peek ( ($me:expr, $input:expr) => ( + unwrap_or_return!($me.peek($input), ProcessResult::Suspend) +)); + +macro_rules! pop_except_from ( ($me:expr, $input:expr, $set:expr) => ( + unwrap_or_return!($me.pop_except_from($input, $set), ProcessResult::Suspend) +)); + +macro_rules! eat ( ($me:expr, $input:expr, $pat:expr) => ( + unwrap_or_return!($me.eat($input, $pat, u8::eq_ignore_ascii_case), ProcessResult::Suspend) +)); + +macro_rules! eat_exact ( ($me:expr, $input:expr, $pat:expr) => ( + unwrap_or_return!($me.eat($input, $pat, u8::eq), ProcessResult::Suspend) +)); + +impl Tokenizer { + // Run the state machine for a while. + // Return true if we should be immediately re-invoked + // (this just simplifies control flow vs. break / continue). + fn step(&mut self, input: &mut BufferQueue) -> ProcessResult { + if self.char_ref_tokenizer.is_some() { + return self.step_char_ref_tokenizer(input); + } + + debug!("processing in state {:?}", self.state); + match self.state { + //§ data-state + states::Data => loop { + match pop_except_from!(self, input, small_char_set!('\r' '\0' '&' '<' '\n')) { + FromSet('\0') => go!(self: error; emit '\0'), + FromSet('&') => go!(self: consume_char_ref), + FromSet('<') => go!(self: to TagOpen), + FromSet(c) => go!(self: emit c), + NotFromSet(b) => self.emit_chars(b), + } + }, + + //§ rcdata-state + states::RawData(Rcdata) => loop { + match pop_except_from!(self, input, small_char_set!('\r' '\0' '&' '<' '\n')) { + FromSet('\0') => go!(self: error; emit '\u{fffd}'), + FromSet('&') => go!(self: consume_char_ref), + FromSet('<') => go!(self: to RawLessThanSign Rcdata), + FromSet(c) => go!(self: emit c), + NotFromSet(b) => self.emit_chars(b), + } + }, + + //§ rawtext-state + states::RawData(Rawtext) => loop { + match pop_except_from!(self, input, small_char_set!('\r' '\0' '<' '\n')) { + FromSet('\0') => go!(self: error; emit '\u{fffd}'), + FromSet('<') => go!(self: to RawLessThanSign Rawtext), + FromSet(c) => go!(self: emit c), + NotFromSet(b) => self.emit_chars(b), + } + }, + + //§ script-data-state + states::RawData(ScriptData) => loop { + match pop_except_from!(self, input, small_char_set!('\r' '\0' '<' '\n')) { + FromSet('\0') => go!(self: error; emit '\u{fffd}'), + FromSet('<') => go!(self: to RawLessThanSign ScriptData), + FromSet(c) => go!(self: emit c), + NotFromSet(b) => self.emit_chars(b), + } + }, + + //§ script-data-escaped-state + states::RawData(ScriptDataEscaped(Escaped)) => loop { + match pop_except_from!(self, input, small_char_set!('\r' '\0' '-' '<' '\n')) { + FromSet('\0') => go!(self: error; emit '\u{fffd}'), + FromSet('-') => go!(self: emit '-'; to ScriptDataEscapedDash Escaped), + FromSet('<') => go!(self: to RawLessThanSign ScriptDataEscaped Escaped), + FromSet(c) => go!(self: emit c), + NotFromSet(b) => self.emit_chars(b), + } + }, + + //§ script-data-double-escaped-state + states::RawData(ScriptDataEscaped(DoubleEscaped)) => loop { + match pop_except_from!(self, input, small_char_set!('\r' '\0' '-' '<' '\n')) { + FromSet('\0') => go!(self: error; emit '\u{fffd}'), + FromSet('-') => go!(self: emit '-'; to ScriptDataEscapedDash DoubleEscaped), + FromSet('<') => go!(self: emit '<'; to RawLessThanSign ScriptDataEscaped DoubleEscaped), + FromSet(c) => go!(self: emit c), + NotFromSet(b) => self.emit_chars(b), + } + }, + + //§ plaintext-state + states::Plaintext => loop { + match pop_except_from!(self, input, small_char_set!('\r' '\0' '\n')) { + FromSet('\0') => go!(self: error; emit '\u{fffd}'), + FromSet(c) => go!(self: emit c), + NotFromSet(b) => self.emit_chars(b), + } + }, + + //§ tag-open-state + states::TagOpen => loop { match get_char!(self, input) { + '!' => go!(self: clear_temp; to MarkupDeclarationOpen), + '/' => go!(self: to EndTagOpen), + '?' => go!(self: error; clear_comment; push_comment '?'; to BogusComment), + c => match lower_ascii_letter(c) { + Some(cl) => go!(self: create_tag StartTag cl; to TagName), + None => go!(self: error; emit '<'; reconsume Data), + } + }}, + + //§ end-tag-open-state + states::EndTagOpen => loop { match get_char!(self, input) { + '>' => go!(self: error; to Data), + '\0' => go!(self: error; clear_comment; push_comment '\u{fffd}'; to BogusComment), + c => match lower_ascii_letter(c) { + Some(cl) => go!(self: create_tag EndTag cl; to TagName), + None => go!(self: error; clear_comment; push_comment c; to BogusComment), + } + }}, + + //§ tag-name-state + states::TagName => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' + => go!(self: to BeforeAttributeName), + '/' => go!(self: to SelfClosingStartTag), + '>' => go!(self: emit_tag Data), + '\0' => go!(self: error; push_tag '\u{fffd}'), + c => go!(self: push_tag (c.to_ascii_lowercase())), + }}, + + //§ script-data-escaped-less-than-sign-state + states::RawLessThanSign(ScriptDataEscaped(Escaped)) => loop { match get_char!(self, input) { + '/' => go!(self: clear_temp; to RawEndTagOpen ScriptDataEscaped Escaped), + c => match lower_ascii_letter(c) { + Some(cl) => go!(self: clear_temp; push_temp cl; emit '<'; emit c; + to ScriptDataEscapeStart DoubleEscaped), + None => go!(self: emit '<'; reconsume RawData ScriptDataEscaped Escaped), + } + }}, + + //§ script-data-double-escaped-less-than-sign-state + states::RawLessThanSign(ScriptDataEscaped(DoubleEscaped)) => loop { match get_char!(self, input) { + '/' => go!(self: clear_temp; emit '/'; to ScriptDataDoubleEscapeEnd), + _ => go!(self: reconsume RawData ScriptDataEscaped DoubleEscaped), + }}, + + //§ rcdata-less-than-sign-state rawtext-less-than-sign-state script-data-less-than-sign-state + // otherwise + states::RawLessThanSign(kind) => loop { match get_char!(self, input) { + '/' => go!(self: clear_temp; to RawEndTagOpen kind), + '!' if kind == ScriptData => go!(self: emit '<'; emit '!'; to ScriptDataEscapeStart Escaped), + _ => go!(self: emit '<'; reconsume RawData kind), + }}, + + //§ rcdata-end-tag-open-state rawtext-end-tag-open-state script-data-end-tag-open-state script-data-escaped-end-tag-open-state + states::RawEndTagOpen(kind) => loop { + let c = get_char!(self, input); + match lower_ascii_letter(c) { + Some(cl) => go!(self: create_tag EndTag cl; push_temp c; to RawEndTagName kind), + None => go!(self: emit '<'; emit '/'; reconsume RawData kind), + } + }, + + //§ rcdata-end-tag-name-state rawtext-end-tag-name-state script-data-end-tag-name-state script-data-escaped-end-tag-name-state + states::RawEndTagName(kind) => loop { + let c = get_char!(self, input); + if self.have_appropriate_end_tag() { + match c { + '\t' | '\n' | '\x0C' | ' ' + => go!(self: to BeforeAttributeName), + '/' => go!(self: to SelfClosingStartTag), + '>' => go!(self: emit_tag Data), + _ => (), + } + } + + match lower_ascii_letter(c) { + Some(cl) => go!(self: push_tag cl; push_temp c), + None => go!(self: discard_tag; emit '<'; emit '/'; emit_temp; reconsume RawData kind), + } + }, + + //§ script-data-double-escape-start-state + states::ScriptDataEscapeStart(DoubleEscaped) => loop { + let c = get_char!(self, input); + match c { + '\t' | '\n' | '\x0C' | ' ' | '/' | '>' => { + let esc = if &*self.temp_buf == "script" { DoubleEscaped } else { Escaped }; + go!(self: emit c; to RawData ScriptDataEscaped esc); + } + _ => match lower_ascii_letter(c) { + Some(cl) => go!(self: push_temp cl; emit c), + None => go!(self: reconsume RawData ScriptDataEscaped Escaped), + } + } + }, + + //§ script-data-escape-start-state + states::ScriptDataEscapeStart(Escaped) => loop { match get_char!(self, input) { + '-' => go!(self: emit '-'; to ScriptDataEscapeStartDash), + _ => go!(self: reconsume RawData ScriptData), + }}, + + //§ script-data-escape-start-dash-state + states::ScriptDataEscapeStartDash => loop { match get_char!(self, input) { + '-' => go!(self: emit '-'; to ScriptDataEscapedDashDash Escaped), + _ => go!(self: reconsume RawData ScriptData), + }}, + + //§ script-data-escaped-dash-state script-data-double-escaped-dash-state + states::ScriptDataEscapedDash(kind) => loop { match get_char!(self, input) { + '-' => go!(self: emit '-'; to ScriptDataEscapedDashDash kind), + '<' => { + if kind == DoubleEscaped { go!(self: emit '<'); } + go!(self: to RawLessThanSign ScriptDataEscaped kind); + } + '\0' => go!(self: error; emit '\u{fffd}'; to RawData ScriptDataEscaped kind), + c => go!(self: emit c; to RawData ScriptDataEscaped kind), + }}, + + //§ script-data-escaped-dash-dash-state script-data-double-escaped-dash-dash-state + states::ScriptDataEscapedDashDash(kind) => loop { match get_char!(self, input) { + '-' => go!(self: emit '-'), + '<' => { + if kind == DoubleEscaped { go!(self: emit '<'); } + go!(self: to RawLessThanSign ScriptDataEscaped kind); + } + '>' => go!(self: emit '>'; to RawData ScriptData), + '\0' => go!(self: error; emit '\u{fffd}'; to RawData ScriptDataEscaped kind), + c => go!(self: emit c; to RawData ScriptDataEscaped kind), + }}, + + //§ script-data-double-escape-end-state + states::ScriptDataDoubleEscapeEnd => loop { + let c = get_char!(self, input); + match c { + '\t' | '\n' | '\x0C' | ' ' | '/' | '>' => { + let esc = if &*self.temp_buf == "script" { Escaped } else { DoubleEscaped }; + go!(self: emit c; to RawData ScriptDataEscaped esc); + } + _ => match lower_ascii_letter(c) { + Some(cl) => go!(self: push_temp cl; emit c), + None => go!(self: reconsume RawData ScriptDataEscaped DoubleEscaped), + } + } + }, + + //§ before-attribute-name-state + states::BeforeAttributeName => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' => (), + '/' => go!(self: to SelfClosingStartTag), + '>' => go!(self: emit_tag Data), + '\0' => go!(self: error; create_attr '\u{fffd}'; to AttributeName), + c => match lower_ascii_letter(c) { + Some(cl) => go!(self: create_attr cl; to AttributeName), + None => { + go_match!(self: c, + '"' , '\'' , '<' , '=' => error); + go!(self: create_attr c; to AttributeName); + } + } + }}, + + //§ attribute-name-state + states::AttributeName => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' + => go!(self: to AfterAttributeName), + '/' => go!(self: to SelfClosingStartTag), + '=' => go!(self: to BeforeAttributeValue), + '>' => go!(self: emit_tag Data), + '\0' => go!(self: error; push_name '\u{fffd}'), + c => match lower_ascii_letter(c) { + Some(cl) => go!(self: push_name cl), + None => { + go_match!(self: c, + '"' , '\'' , '<' => error); + go!(self: push_name c); + } + } + }}, + + //§ after-attribute-name-state + states::AfterAttributeName => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' => (), + '/' => go!(self: to SelfClosingStartTag), + '=' => go!(self: to BeforeAttributeValue), + '>' => go!(self: emit_tag Data), + '\0' => go!(self: error; create_attr '\u{fffd}'; to AttributeName), + c => match lower_ascii_letter(c) { + Some(cl) => go!(self: create_attr cl; to AttributeName), + None => { + go_match!(self: c, + '"' , '\'' , '<' => error); + go!(self: create_attr c; to AttributeName); + } + } + }}, + + //§ before-attribute-value-state + // Use peek so we can handle the first attr character along with the rest, + // hopefully in the same zero-copy buffer. + states::BeforeAttributeValue => loop { match peek!(self, input) { + '\t' | '\n' | '\r' | '\x0C' | ' ' => go!(self: discard_char input), + '"' => go!(self: discard_char input; to AttributeValue DoubleQuoted), + '\'' => go!(self: discard_char input; to AttributeValue SingleQuoted), + '\0' => go!(self: discard_char input; error; push_value '\u{fffd}'; to AttributeValue Unquoted), + '>' => go!(self: discard_char input; error; emit_tag Data), + _ => go!(self: to AttributeValue Unquoted), + }}, + + //§ attribute-value-(double-quoted)-state + states::AttributeValue(DoubleQuoted) => loop { + match pop_except_from!(self, input, small_char_set!('\r' '"' '&' '\0' '\n')) { + FromSet('"') => go!(self: to AfterAttributeValueQuoted), + FromSet('&') => go!(self: consume_char_ref '"'), + FromSet('\0') => go!(self: error; push_value '\u{fffd}'), + FromSet(c) => go!(self: push_value c), + NotFromSet(ref b) => go!(self: append_value b), + } + }, + + //§ attribute-value-(single-quoted)-state + states::AttributeValue(SingleQuoted) => loop { + match pop_except_from!(self, input, small_char_set!('\r' '\'' '&' '\0' '\n')) { + FromSet('\'') => go!(self: to AfterAttributeValueQuoted), + FromSet('&') => go!(self: consume_char_ref '\''), + FromSet('\0') => go!(self: error; push_value '\u{fffd}'), + FromSet(c) => go!(self: push_value c), + NotFromSet(ref b) => go!(self: append_value b), + } + }, + + //§ attribute-value-(unquoted)-state + states::AttributeValue(Unquoted) => loop { + match pop_except_from!(self, input, small_char_set!('\r' '\t' '\n' '\x0C' ' ' '&' '>' '\0')) { + FromSet('\t') | FromSet('\n') | FromSet('\x0C') | FromSet(' ') + => go!(self: to BeforeAttributeName), + FromSet('&') => go!(self: consume_char_ref '>'), + FromSet('>') => go!(self: emit_tag Data), + FromSet('\0') => go!(self: error; push_value '\u{fffd}'), + FromSet(c) => { + go_match!(self: c, + '"' , '\'' , '<' , '=' , '`' => error); + go!(self: push_value c); + } + NotFromSet(ref b) => go!(self: append_value b), + } + }, + + //§ after-attribute-value-(quoted)-state + states::AfterAttributeValueQuoted => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' + => go!(self: to BeforeAttributeName), + '/' => go!(self: to SelfClosingStartTag), + '>' => go!(self: emit_tag Data), + _ => go!(self: error; reconsume BeforeAttributeName), + }}, + + //§ self-closing-start-tag-state + states::SelfClosingStartTag => loop { match get_char!(self, input) { + '>' => { + self.current_tag_self_closing = true; + go!(self: emit_tag Data); + } + _ => go!(self: error; reconsume BeforeAttributeName), + }}, + + //§ comment-start-state + states::CommentStart => loop { match get_char!(self, input) { + '-' => go!(self: to CommentStartDash), + '\0' => go!(self: error; push_comment '\u{fffd}'; to Comment), + '>' => go!(self: error; emit_comment; to Data), + c => go!(self: push_comment c; to Comment), + }}, + + //§ comment-start-dash-state + states::CommentStartDash => loop { match get_char!(self, input) { + '-' => go!(self: to CommentEnd), + '\0' => go!(self: error; append_comment "-\u{fffd}"; to Comment), + '>' => go!(self: error; emit_comment; to Data), + c => go!(self: push_comment '-'; push_comment c; to Comment), + }}, + + //§ comment-state + states::Comment => loop { match get_char!(self, input) { + '-' => go!(self: to CommentEndDash), + '\0' => go!(self: error; push_comment '\u{fffd}'), + c => go!(self: push_comment c), + }}, + + //§ comment-end-dash-state + states::CommentEndDash => loop { match get_char!(self, input) { + '-' => go!(self: to CommentEnd), + '\0' => go!(self: error; append_comment "-\u{fffd}"; to Comment), + c => go!(self: push_comment '-'; push_comment c; to Comment), + }}, + + //§ comment-end-state + states::CommentEnd => loop { match get_char!(self, input) { + '>' => go!(self: emit_comment; to Data), + '\0' => go!(self: error; append_comment "--\u{fffd}"; to Comment), + '!' => go!(self: error; to CommentEndBang), + '-' => go!(self: error; push_comment '-'), + c => go!(self: error; append_comment "--"; push_comment c; to Comment), + }}, + + //§ comment-end-bang-state + states::CommentEndBang => loop { match get_char!(self, input) { + '-' => go!(self: append_comment "--!"; to CommentEndDash), + '>' => go!(self: emit_comment; to Data), + '\0' => go!(self: error; append_comment "--!\u{fffd}"; to Comment), + c => go!(self: append_comment "--!"; push_comment c; to Comment), + }}, + + //§ doctype-state + states::Doctype => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' + => go!(self: to BeforeDoctypeName), + _ => go!(self: error; reconsume BeforeDoctypeName), + }}, + + //§ before-doctype-name-state + states::BeforeDoctypeName => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' => (), + '\0' => go!(self: error; create_doctype; push_doctype_name '\u{fffd}'; to DoctypeName), + '>' => go!(self: error; create_doctype; force_quirks; emit_doctype; to Data), + c => go!(self: create_doctype; push_doctype_name (c.to_ascii_lowercase()); + to DoctypeName), + }}, + + //§ doctype-name-state + states::DoctypeName => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' + => go!(self: clear_temp; to AfterDoctypeName), + '>' => go!(self: emit_doctype; to Data), + '\0' => go!(self: error; push_doctype_name '\u{fffd}'), + c => go!(self: push_doctype_name (c.to_ascii_lowercase())), + }}, + + //§ after-doctype-name-state + states::AfterDoctypeName => loop { + if eat!(self, input, "public") { + go!(self: to AfterDoctypeKeyword Public); + } else if eat!(self, input, "system") { + go!(self: to AfterDoctypeKeyword System); + } else { + match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' => (), + '>' => go!(self: emit_doctype; to Data), + _ => go!(self: error; force_quirks; to BogusDoctype), + } + } + }, + + //§ after-doctype-public-keyword-state after-doctype-system-keyword-state + states::AfterDoctypeKeyword(kind) => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' + => go!(self: to BeforeDoctypeIdentifier kind), + '"' => go!(self: error; clear_doctype_id kind; to DoctypeIdentifierDoubleQuoted kind), + '\'' => go!(self: error; clear_doctype_id kind; to DoctypeIdentifierSingleQuoted kind), + '>' => go!(self: error; force_quirks; emit_doctype; to Data), + _ => go!(self: error; force_quirks; to BogusDoctype), + }}, + + //§ before-doctype-public-identifier-state before-doctype-system-identifier-state + states::BeforeDoctypeIdentifier(kind) => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' => (), + '"' => go!(self: clear_doctype_id kind; to DoctypeIdentifierDoubleQuoted kind), + '\'' => go!(self: clear_doctype_id kind; to DoctypeIdentifierSingleQuoted kind), + '>' => go!(self: error; force_quirks; emit_doctype; to Data), + _ => go!(self: error; force_quirks; to BogusDoctype), + }}, + + //§ doctype-public-identifier-(double-quoted)-state doctype-system-identifier-(double-quoted)-state + states::DoctypeIdentifierDoubleQuoted(kind) => loop { match get_char!(self, input) { + '"' => go!(self: to AfterDoctypeIdentifier kind), + '\0' => go!(self: error; push_doctype_id kind '\u{fffd}'), + '>' => go!(self: error; force_quirks; emit_doctype; to Data), + c => go!(self: push_doctype_id kind c), + }}, + + //§ doctype-public-identifier-(single-quoted)-state doctype-system-identifier-(single-quoted)-state + states::DoctypeIdentifierSingleQuoted(kind) => loop { match get_char!(self, input) { + '\'' => go!(self: to AfterDoctypeIdentifier kind), + '\0' => go!(self: error; push_doctype_id kind '\u{fffd}'), + '>' => go!(self: error; force_quirks; emit_doctype; to Data), + c => go!(self: push_doctype_id kind c), + }}, + + //§ after-doctype-public-identifier-state + states::AfterDoctypeIdentifier(Public) => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' + => go!(self: to BetweenDoctypePublicAndSystemIdentifiers), + '>' => go!(self: emit_doctype; to Data), + '"' => go!(self: error; clear_doctype_id System; to DoctypeIdentifierDoubleQuoted System), + '\'' => go!(self: error; clear_doctype_id System; to DoctypeIdentifierSingleQuoted System), + _ => go!(self: error; force_quirks; to BogusDoctype), + }}, + + //§ after-doctype-system-identifier-state + states::AfterDoctypeIdentifier(System) => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' => (), + '>' => go!(self: emit_doctype; to Data), + _ => go!(self: error; to BogusDoctype), + }}, + + //§ between-doctype-public-and-system-identifiers-state + states::BetweenDoctypePublicAndSystemIdentifiers => loop { match get_char!(self, input) { + '\t' | '\n' | '\x0C' | ' ' => (), + '>' => go!(self: emit_doctype; to Data), + '"' => go!(self: clear_doctype_id System; to DoctypeIdentifierDoubleQuoted System), + '\'' => go!(self: clear_doctype_id System; to DoctypeIdentifierSingleQuoted System), + _ => go!(self: error; force_quirks; to BogusDoctype), + }}, + + //§ bogus-doctype-state + states::BogusDoctype => loop { match get_char!(self, input) { + '>' => go!(self: emit_doctype; to Data), + _ => (), + }}, + + //§ bogus-comment-state + states::BogusComment => loop { match get_char!(self, input) { + '>' => go!(self: emit_comment; to Data), + '\0' => go!(self: push_comment '\u{fffd}'), + c => go!(self: push_comment c), + }}, + + //§ markup-declaration-open-state + states::MarkupDeclarationOpen => loop { + if eat_exact!(self, input, "--") { + go!(self: clear_comment; to CommentStart); + } else if eat!(self, input, "doctype") { + go!(self: to Doctype); + } else { + if self.sink.adjusted_current_node_present_but_not_in_html_namespace() { + if eat_exact!(self, input, "[CDATA[") { + go!(self: clear_temp; to CdataSection); + } + } + go!(self: error; to BogusComment); + } + }, + + //§ cdata-section-state + states::CdataSection => loop { match get_char!(self, input) { + ']' => go!(self: to CdataSectionBracket), + '\0' => go!(self: emit_temp; emit '\0'), + c => go!(self: push_temp c), + }}, + + //§ cdata-section-bracket + states::CdataSectionBracket => match get_char!(self, input) { + ']' => go!(self: to CdataSectionEnd), + _ => go!(self: push_temp ']'; reconsume CdataSection), + }, + + //§ cdata-section-end + states::CdataSectionEnd => loop { match get_char!(self, input) { + ']' => go!(self: push_temp ']'), + '>' => go!(self: emit_temp; to Data), + _ => go!(self: push_temp ']'; push_temp ']'; reconsume CdataSection), + }}, + + //§ END + } + } + + fn step_char_ref_tokenizer(&mut self, input: &mut BufferQueue) -> ProcessResult { + // FIXME HACK: Take and replace the tokenizer so we don't + // double-mut-borrow self. This is why it's boxed. + let mut tok = self.char_ref_tokenizer.take().unwrap(); + let outcome = tok.step(self, input); + + let progress = match outcome { + char_ref::Done => { + self.process_char_ref(tok.get_result()); + return ProcessResult::Continue; + } + + char_ref::Stuck => ProcessResult::Suspend, + char_ref::Progress => ProcessResult::Continue, + }; + + self.char_ref_tokenizer = Some(tok); + progress + } + + fn process_char_ref(&mut self, char_ref: CharRef) { + let CharRef { mut chars, mut num_chars } = char_ref; + + if num_chars == 0 { + chars[0] = '&'; + num_chars = 1; + } + + for i in 0 .. num_chars { + let c = chars[i as usize]; + match self.state { + states::Data | states::RawData(states::Rcdata) + => go!(self: emit c), + + states::AttributeValue(_) + => go!(self: push_value c), + + _ => panic!("state {:?} should not be reachable in process_char_ref", self.state), + } + } + } + + /// Indicate that we have reached the end of the input. + pub fn end(&mut self) { + // Handle EOF in the char ref sub-tokenizer, if there is one. + // Do this first because it might un-consume stuff. + let mut input = BufferQueue::new(); + match self.char_ref_tokenizer.take() { + None => (), + Some(mut tok) => { + tok.end_of_file(self, &mut input); + self.process_char_ref(tok.get_result()); + } + } + + // Process all remaining buffered input. + // If we're waiting for lookahead, we're not gonna get it. + self.at_eof = true; + assert!(matches!(self.run(&mut input), TokenizerResult::Done)); + assert!(input.is_empty()); + + loop { + match self.eof_step() { + ProcessResult::Continue => (), + ProcessResult::Suspend => break, + ProcessResult::Script(_) => unreachable!(), + } + } + + self.sink.end(); + + if self.opts.profile { + self.dump_profile(); + } + } + + fn dump_profile(&self) { + let mut results: Vec<(states::State, u64)> + = self.state_profile.iter().map(|(s, t)| (*s, *t)).collect(); + results.sort_by(|&(_, x), &(_, y)| y.cmp(&x)); + + let total: u64 = results.iter().map(|&(_, t)| t).fold(0, ::std::ops::Add::add); + println!("\nTokenizer profile, in nanoseconds"); + println!("\n{:12} total in token sink", self.time_in_sink); + println!("\n{:12} total in tokenizer", total); + + for (k, v) in results.into_iter() { + let pct = 100.0 * (v as f64) / (total as f64); + println!("{:12} {:4.1}% {:?}", v, pct, k); + } + } + + fn eof_step(&mut self) -> ProcessResult { + debug!("processing EOF in state {:?}", self.state); + match self.state { + states::Data | states::RawData(Rcdata) | states::RawData(Rawtext) + | states::RawData(ScriptData) | states::Plaintext + => go!(self: eof), + + states::TagName | states::RawData(ScriptDataEscaped(_)) + | states::BeforeAttributeName | states::AttributeName + | states::AfterAttributeName | states::BeforeAttributeValue + | states::AttributeValue(_) | states::AfterAttributeValueQuoted + | states::SelfClosingStartTag | states::ScriptDataEscapedDash(_) + | states::ScriptDataEscapedDashDash(_) + => go!(self: error_eof; to Data), + + states::TagOpen + => go!(self: error_eof; emit '<'; to Data), + + states::EndTagOpen + => go!(self: error_eof; emit '<'; emit '/'; to Data), + + states::RawLessThanSign(ScriptDataEscaped(DoubleEscaped)) + => go!(self: to RawData ScriptDataEscaped DoubleEscaped), + + states::RawLessThanSign(kind) + => go!(self: emit '<'; to RawData kind), + + states::RawEndTagOpen(kind) + => go!(self: emit '<'; emit '/'; to RawData kind), + + states::RawEndTagName(kind) + => go!(self: emit '<'; emit '/'; emit_temp; to RawData kind), + + states::ScriptDataEscapeStart(kind) + => go!(self: to RawData ScriptDataEscaped kind), + + states::ScriptDataEscapeStartDash + => go!(self: to RawData ScriptData), + + states::ScriptDataDoubleEscapeEnd + => go!(self: to RawData ScriptDataEscaped DoubleEscaped), + + states::CommentStart | states::CommentStartDash + | states::Comment | states::CommentEndDash + | states::CommentEnd | states::CommentEndBang + => go!(self: error_eof; emit_comment; to Data), + + states::Doctype | states::BeforeDoctypeName + => go!(self: error_eof; create_doctype; force_quirks; emit_doctype; to Data), + + states::DoctypeName | states::AfterDoctypeName | states::AfterDoctypeKeyword(_) + | states::BeforeDoctypeIdentifier(_) | states::DoctypeIdentifierDoubleQuoted(_) + | states::DoctypeIdentifierSingleQuoted(_) | states::AfterDoctypeIdentifier(_) + | states::BetweenDoctypePublicAndSystemIdentifiers + => go!(self: error_eof; force_quirks; emit_doctype; to Data), + + states::BogusDoctype + => go!(self: emit_doctype; to Data), + + states::BogusComment + => go!(self: emit_comment; to Data), + + states::MarkupDeclarationOpen + => go!(self: error; to BogusComment), + + states::CdataSection + => go!(self: emit_temp; error_eof; to Data), + + states::CdataSectionBracket + => go!(self: push_temp ']'; to CdataSection), + + states::CdataSectionEnd + => go!(self: push_temp ']'; push_temp ']'; to CdataSection), + } + } +} + +#[cfg(test)] +#[allow(non_snake_case)] +mod test { + use super::option_push; // private items + use tendril::{StrTendril, SliceExt}; + + use super::{TokenSink, Tokenizer, TokenizerOpts, TokenSinkResult}; + + use super::interface::{Token, TagToken}; + use super::interface::{CharacterTokens, NullCharacterToken, EOFToken, ParseError}; + use super::interface::{TagKind, StartTag, EndTag, Tag}; + + use markup5ever::buffer_queue::{BufferQueue}; + use std::mem::replace; + + use {LocalName}; + + // LinesMatch implements the TokenSink trait. It is used for testing to see + // if current_line is being updated when process_token is called. The lines + // vector is a collection of the line numbers that each token is on. + struct LinesMatch { + tokens: Vec, + current_str: StrTendril, + lines: Vec<(Token, u64)>, + } + + impl LinesMatch { + fn new() -> LinesMatch { + LinesMatch { + tokens: vec!(), + current_str: StrTendril::new(), + lines: vec!(), + } + } + + fn push(&mut self, token: Token, line_number: u64) { + self.finish_str(); + self.lines.push((token, line_number)); + } + + fn finish_str(&mut self) { + if self.current_str.len() > 0 { + let s = replace(&mut self.current_str, StrTendril::new()); + self.tokens.push(CharacterTokens(s)); + } + } + + } + + impl TokenSink for LinesMatch { + + type Handle = (); + + fn process_token(&mut self, token: Token, line_number: u64) -> TokenSinkResult { + + match token { + CharacterTokens(b) => { + self.current_str.push_slice(&b); + } + + NullCharacterToken => { + self.current_str.push_char('\0'); + } + + ParseError(_) => { + panic!("unexpected parse error"); + } + + TagToken(mut t) => { + // The spec seems to indicate that one can emit + // erroneous end tags with attrs, but the test + // cases don't contain them. + match t.kind { + EndTag => { + t.self_closing = false; + t.attrs = vec!(); + } + _ => t.attrs.sort_by(|a1, a2| a1.name.cmp(&a2.name)), + } + self.push(TagToken(t), line_number); + } + + EOFToken => (), + + _ => self.push(token, line_number), + } + TokenSinkResult::Continue + } + } + + // Take in tokens, process them, and return vector with line + // numbers that each token is on + fn tokenize(input: Vec, opts: TokenizerOpts) -> Vec<(Token, u64)> { + let sink = LinesMatch::new(); + let mut tok = Tokenizer::new(sink, opts); + let mut buffer = BufferQueue::new(); + for chunk in input.into_iter() { + buffer.push_back(chunk); + let _ = tok.feed(&mut buffer); + } + tok.end(); + tok.sink.lines + } + + // Create a tag token + fn create_tag(token: StrTendril, tagkind: TagKind) -> Token { + let name = LocalName::from(&*token); + let token = TagToken(Tag { kind: tagkind, + name: name, + self_closing: false, + attrs: vec!(), + }); + token + } + + #[test] + fn push_to_None_gives_singleton() { + let mut s: Option = None; + option_push(&mut s, 'x'); + assert_eq!(s, Some("x".to_tendril())); + } + + #[test] + fn push_to_empty_appends() { + let mut s: Option = Some(StrTendril::new()); + option_push(&mut s, 'x'); + assert_eq!(s, Some("x".to_tendril())); + } + + #[test] + fn push_to_nonempty_appends() { + let mut s: Option = Some(StrTendril::from_slice("y")); + option_push(&mut s, 'x'); + assert_eq!(s, Some("yx".to_tendril())); + } + + #[test] + fn check_lines() { + let opts = TokenizerOpts { + exact_errors: false, + discard_bom: true, + profile: false, + initial_state: None, + last_start_tag_name: None, + }; + let vector = vec![StrTendril::from("\n"), StrTendril::from("\n"), + StrTendril::from("\n"), StrTendril::from("\n")]; + let expected = vec![(create_tag(StrTendril::from("a"), StartTag), 1), + (create_tag(StrTendril::from("b"), StartTag), 2), + (create_tag(StrTendril::from("b"), EndTag), 3), + (create_tag(StrTendril::from("a"), EndTag), 4)]; + let results = tokenize(vector, opts); + assert_eq!(results, expected); + } + + #[test] + fn check_lines_with_new_line() { + let opts = TokenizerOpts { + exact_errors: false, + discard_bom: true, + profile: false, + initial_state: None, + last_start_tag_name: None, + }; + let vector = vec![StrTendril::from("\r\n"), StrTendril::from("\r\n"), + StrTendril::from("\r\n"), StrTendril::from("\r\n")]; + let expected = vec![(create_tag(StrTendril::from("a"), StartTag), 1), + (create_tag(StrTendril::from("b"), StartTag), 2), + (create_tag(StrTendril::from("b"), EndTag), 3), + (create_tag(StrTendril::from("a"), EndTag), 4)]; + let results = tokenize(vector, opts); + assert_eq!(results, expected); + } +} diff --git a/src/vendor/html5ever/src/tokenizer/states.rs b/src/vendor/html5ever/src/tokenizer/states.rs new file mode 100644 index 0000000000..142b48c7ed --- /dev/null +++ b/src/vendor/html5ever/src/tokenizer/states.rs @@ -0,0 +1,93 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tokenizer states. +//! +//! This is public for use by the tokenizer tests. Other library +//! users should not have to care about this. + +pub use self::ScriptEscapeKind::*; +pub use self::DoctypeIdKind::*; +pub use self::RawKind::*; +pub use self::AttrValueKind::*; +pub use self::State::*; + +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Hash, Debug)] +pub enum ScriptEscapeKind { + Escaped, + DoubleEscaped, +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Hash, Debug)] +pub enum DoctypeIdKind { + Public, + System, +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Hash, Debug)] +pub enum RawKind { + Rcdata, + Rawtext, + ScriptData, + ScriptDataEscaped(ScriptEscapeKind), +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Hash, Debug)] +pub enum AttrValueKind { + Unquoted, + SingleQuoted, + DoubleQuoted, +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Hash, Debug)] +pub enum State { + Data, + Plaintext, + TagOpen, + EndTagOpen, + TagName, + RawData(RawKind), + RawLessThanSign(RawKind), + RawEndTagOpen(RawKind), + RawEndTagName(RawKind), + ScriptDataEscapeStart(ScriptEscapeKind), + ScriptDataEscapeStartDash, + ScriptDataEscapedDash(ScriptEscapeKind), + ScriptDataEscapedDashDash(ScriptEscapeKind), + ScriptDataDoubleEscapeEnd, + BeforeAttributeName, + AttributeName, + AfterAttributeName, + BeforeAttributeValue, + AttributeValue(AttrValueKind), + AfterAttributeValueQuoted, + SelfClosingStartTag, + BogusComment, + MarkupDeclarationOpen, + CommentStart, + CommentStartDash, + Comment, + CommentEndDash, + CommentEnd, + CommentEndBang, + Doctype, + BeforeDoctypeName, + DoctypeName, + AfterDoctypeName, + AfterDoctypeKeyword(DoctypeIdKind), + BeforeDoctypeIdentifier(DoctypeIdKind), + DoctypeIdentifierDoubleQuoted(DoctypeIdKind), + DoctypeIdentifierSingleQuoted(DoctypeIdKind), + AfterDoctypeIdentifier(DoctypeIdKind), + BetweenDoctypePublicAndSystemIdentifiers, + BogusDoctype, + CdataSection, + CdataSectionBracket, + CdataSectionEnd, +} diff --git a/src/vendor/html5ever/src/tree_builder/actions.rs b/src/vendor/html5ever/src/tree_builder/actions.rs new file mode 100644 index 0000000000..69ad69147d --- /dev/null +++ b/src/vendor/html5ever/src/tree_builder/actions.rs @@ -0,0 +1,1164 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Helpers for implementing the tree builder rules. +//! +//! Many of these are named within the spec, e.g. "reset the insertion +//! mode appropriately". + +use {LocalName, Namespace, QualName, ExpandedName}; +use interface::{Attribute, TreeSink, QuirksMode, NodeOrText, AppendNode, AppendText, create_element}; +use tendril::StrTendril; +use tokenizer::{Tag, StartTag, EndTag}; +use tokenizer::states::{RawData, RawKind}; +use tree_builder::types::*; +use tree_builder::tag_sets::*; +use tree_builder::rules::TreeBuilderStep; +use util::str::to_escaped_string; + +use std::ascii::AsciiExt; +use std::{slice, fmt}; +use std::mem::replace; +use std::iter::{Rev, Enumerate}; +use std::borrow::Cow::Borrowed; + +pub use self::PushFlag::*; + +pub struct ActiveFormattingIter<'a, Handle: 'a> { + iter: Rev>>>, +} + +impl<'a, Handle> Iterator for ActiveFormattingIter<'a, Handle> { + type Item = (usize, &'a Handle, &'a Tag); + fn next(&mut self) -> Option<(usize, &'a Handle, &'a Tag)> { + match self.iter.next() { + None | Some((_, &Marker)) => None, + Some((i, &Element(ref h, ref t))) => Some((i, h, t)), + } + } +} + +pub enum PushFlag { + Push, + NoPush, +} + +enum Bookmark { + Replace(Handle), + InsertAfter(Handle), +} + +macro_rules! qualname { + ("", $local:tt) => { + QualName { + prefix: None, + ns: ns!(), + local: local_name!($local), + } + }; + ($prefix: tt $ns:tt $local:tt) => { + QualName { + prefix: Some(namespace_prefix!($prefix)), + ns: ns!($ns), + local: local_name!($local), + } + } +} + +// These go in a trait so that we can control visibility. +pub trait TreeBuilderActions { + fn unexpected(&mut self, thing: &T) -> ProcessResult; + fn assert_named(&mut self, node: &Handle, name: LocalName); + fn clear_active_formatting_to_marker(&mut self); + fn create_formatting_element_for(&mut self, tag: Tag) -> Handle; + fn append_text(&mut self, text: StrTendril) -> ProcessResult; + fn append_comment(&mut self, text: StrTendril) -> ProcessResult; + fn append_comment_to_doc(&mut self, text: StrTendril) -> ProcessResult; + fn append_comment_to_html(&mut self, text: StrTendril) -> ProcessResult; + fn insert_appropriately(&mut self, child: NodeOrText, override_target: Option); + fn insert_phantom(&mut self, name: LocalName) -> Handle; + fn insert_and_pop_element_for(&mut self, tag: Tag) -> Handle; + fn insert_element_for(&mut self, tag: Tag) -> Handle; + fn insert_element(&mut self, push: PushFlag, ns: Namespace, name: LocalName, attrs: Vec) -> Handle; + fn create_root(&mut self, attrs: Vec); + fn close_the_cell(&mut self); + fn reset_insertion_mode(&mut self) -> InsertionMode; + fn process_chars_in_table(&mut self, token: Token) -> ProcessResult; + fn foster_parent_in_body(&mut self, token: Token) -> ProcessResult; + fn is_type_hidden(&self, tag: &Tag) -> bool; + fn close_p_element_in_button_scope(&mut self); + fn close_p_element(&mut self); + fn expect_to_close(&mut self, name: LocalName); + fn pop_until_named(&mut self, name: LocalName) -> usize; + fn pop_until(&mut self, pred: TagSet) -> usize where TagSet: Fn(ExpandedName) -> bool; + fn pop_until_current(&mut self, pred: TagSet) where TagSet: Fn(ExpandedName) -> bool; + fn generate_implied_end_except(&mut self, except: LocalName); + fn generate_implied_end(&mut self, set: TagSet) where TagSet: Fn(ExpandedName) -> bool; + fn in_scope_named(&self, scope: TagSet, name: LocalName) -> bool + where TagSet: Fn(ExpandedName) -> bool; + + fn current_node_named(&self, name: LocalName) -> bool; + fn html_elem_named(&self, elem: &Handle, name: LocalName) -> bool; + fn in_html_elem_named(&self, name: LocalName) -> bool; + fn elem_in(&self, elem: &Handle, set: TagSet) -> bool + where TagSet: Fn(ExpandedName) -> bool; + + fn in_scope(&self, scope: TagSet, pred: Pred) -> bool + where TagSet: Fn(ExpandedName) -> bool, Pred: Fn(Handle) -> bool; + + fn check_body_end(&mut self); + fn body_elem(&self) -> Option<&Handle>; + fn html_elem(&self) -> &Handle; + fn reconstruct_formatting(&mut self); + fn remove_from_stack(&mut self, elem: &Handle); + fn pop(&mut self) -> Handle; + fn push(&mut self, elem: &Handle); + fn adoption_agency(&mut self, subject: LocalName); + fn current_node_in(&self, set: TagSet) -> bool where TagSet: Fn(ExpandedName) -> bool; + fn current_node(&self) -> &Handle; + fn adjusted_current_node(&self) -> &Handle; + fn parse_raw_data(&mut self, tag: Tag, k: RawKind) -> ProcessResult; + fn to_raw_text_mode(&mut self, k: RawKind) -> ProcessResult; + fn stop_parsing(&mut self) -> ProcessResult; + fn set_quirks_mode(&mut self, mode: QuirksMode); + fn active_formatting_end_to_marker<'a>(&'a self) -> ActiveFormattingIter<'a, Handle>; + fn is_marker_or_open(&self, entry: &FormatEntry) -> bool; + fn position_in_active_formatting(&self, element: &Handle) -> Option; + fn process_end_tag_in_body(&mut self, tag: Tag); + fn handle_misnested_a_tags(&mut self, tag: &Tag); + fn is_foreign(&mut self, token: &Token) -> bool; + fn enter_foreign(&mut self, tag: Tag, ns: Namespace) -> ProcessResult; + fn adjust_attributes(&mut self, tag: &mut Tag, mut map: F) + where F: FnMut(LocalName) -> Option; + fn adjust_svg_tag_name(&mut self, tag: &mut Tag); + fn adjust_svg_attributes(&mut self, tag: &mut Tag); + fn adjust_mathml_attributes(&mut self, tag: &mut Tag); + fn adjust_foreign_attributes(&mut self, tag: &mut Tag); + fn foreign_start_tag(&mut self, tag: Tag) -> ProcessResult; + fn unexpected_start_tag_in_foreign_content(&mut self, tag: Tag) -> ProcessResult; +} + +pub fn html_elem(open_elems: &[Handle]) -> &Handle { + &open_elems[0] +} + +#[doc(hidden)] +impl TreeBuilderActions + for super::TreeBuilder + where Handle: Clone, + Sink: TreeSink, +{ + fn unexpected(&mut self, _thing: &T) -> ProcessResult { + self.sink.parse_error(format_if!( + self.opts.exact_errors, + "Unexpected token", + "Unexpected token {} in insertion mode {:?}", to_escaped_string(_thing), self.mode)); + Done + } + + fn assert_named(&mut self, node: &Handle, name: LocalName) { + assert!(self.html_elem_named(&node, name)); + } + + /// Iterate over the active formatting elements (with index in the list) from the end + /// to the last marker, or the beginning if there are no markers. + fn active_formatting_end_to_marker<'a>(&'a self) -> ActiveFormattingIter<'a, Handle> { + ActiveFormattingIter { + iter: self.active_formatting.iter().enumerate().rev(), + } + } + + fn position_in_active_formatting(&self, element: &Handle) -> Option { + self.active_formatting + .iter() + .position(|n| { + match n { + &Marker => false, + &Element(ref handle, _) => self.sink.same_node(handle, element) + } + }) + } + + fn set_quirks_mode(&mut self, mode: QuirksMode) { + self.quirks_mode = mode; + self.sink.set_quirks_mode(mode); + } + + fn stop_parsing(&mut self) -> ProcessResult { + warn!("stop_parsing not implemented, full speed ahead!"); + Done + } + + //§ parsing-elements-that-contain-only-text + // Switch to `Text` insertion mode, save the old mode, and + // switch the tokenizer to a raw-data state. + // The latter only takes effect after the current / next + // `process_token` of a start tag returns! + fn to_raw_text_mode(&mut self, k: RawKind) -> ProcessResult { + self.orig_mode = Some(self.mode); + self.mode = Text; + ToRawData(k) + } + + // The generic raw text / RCDATA parsing algorithm. + fn parse_raw_data(&mut self, tag: Tag, k: RawKind) -> ProcessResult { + self.insert_element_for(tag); + self.to_raw_text_mode(k) + } + //§ END + + fn current_node(&self) -> &Handle { + self.open_elems.last().expect("no current element") + } + + fn adjusted_current_node(&self) -> &Handle { + if self.open_elems.len() == 1 { + if let Some(ctx) = self.context_elem.as_ref() { + return ctx; + } + } + self.current_node() + } + + fn current_node_in(&self, set: TagSet) -> bool + where TagSet: Fn(ExpandedName) -> bool + { + set(self.sink.elem_name(self.current_node())) + } + + // Insert at the "appropriate place for inserting a node". + fn insert_appropriately(&mut self, child: NodeOrText, override_target: Option) { + let insertion_point = self.appropriate_place_for_insertion(override_target); + self.insert_at(insertion_point, child); + } + + fn adoption_agency(&mut self, subject: LocalName) { + // 1. + if self.current_node_named(subject.clone()) { + if self.position_in_active_formatting(self.current_node()).is_none() { + self.pop(); + return; + } + } + + // 2. 3. 4. + for _ in 0..8 { + // 5. + let (fmt_elem_index, fmt_elem, fmt_elem_tag) = unwrap_or_return!( + // We clone the Handle and Tag so they don't cause an immutable borrow of self. + self.active_formatting_end_to_marker() + .filter(|&(_, _, tag)| tag.name == subject) + .next() + .map(|(i, h, t)| (i, h.clone(), t.clone())), + + { + self.process_end_tag_in_body(Tag { + kind: EndTag, + name: subject, + self_closing: false, + attrs: vec!(), + }); + } + ); + + let fmt_elem_stack_index = unwrap_or_return!( + self.open_elems.iter() + .rposition(|n| self.sink.same_node(n, &fmt_elem)), + + { + self.sink.parse_error(Borrowed("Formatting element not open")); + self.active_formatting.remove(fmt_elem_index); + } + ); + + // 7. + if !self.in_scope(default_scope, |n| self.sink.same_node(&n, &fmt_elem)) { + self.sink.parse_error(Borrowed("Formatting element not in scope")); + return; + } + + // 8. + if !self.sink.same_node(self.current_node(), &fmt_elem) { + self.sink.parse_error(Borrowed("Formatting element not current node")); + } + + // 9. + let (furthest_block_index, furthest_block) = unwrap_or_return!( + self.open_elems.iter() + .enumerate() + .skip(fmt_elem_stack_index) + .filter(|&(_, open_element)| self.elem_in(open_element, special_tag)) + .next() + .map(|(i, h)| (i, h.clone())), + + // 10. + { + self.open_elems.truncate(fmt_elem_stack_index); + self.active_formatting.remove(fmt_elem_index); + } + ); + + // 11. + let common_ancestor = self.open_elems[fmt_elem_stack_index - 1].clone(); + + // 12. + let mut bookmark = Bookmark::Replace(fmt_elem.clone()); + + // 13. + let mut node; + let mut node_index = furthest_block_index; + let mut last_node = furthest_block.clone(); + + // 13.1. + let mut inner_counter = 0; + loop { + // 13.2. + inner_counter += 1; + + // 13.3. + node_index -= 1; + node = self.open_elems[node_index].clone(); + + // 13.4. + if self.sink.same_node(&node, &fmt_elem) { + break; + } + + // 13.5. + if inner_counter > 3 { + self.position_in_active_formatting(&node) + .map(|position| self.active_formatting.remove(position)); + self.open_elems.remove(node_index); + continue; + } + + let node_formatting_index = unwrap_or_else!( + self.position_in_active_formatting(&node), + + // 13.6. + { + self.open_elems.remove(node_index); + continue; + } + ); + + // 13.7. + let tag = match self.active_formatting[node_formatting_index] { + Element(ref h, ref t) => { + assert!(self.sink.same_node(h, &node)); + t.clone() + } + Marker => panic!("Found marker during adoption agency"), + }; + // FIXME: Is there a way to avoid cloning the attributes twice here (once on their + // own, once as part of t.clone() above)? + let new_element = create_element( + &mut self.sink, QualName::new(None, ns!(html), tag.name.clone()), + tag.attrs.clone()); + self.open_elems[node_index] = new_element.clone(); + self.active_formatting[node_formatting_index] = Element(new_element.clone(), tag); + node = new_element; + + // 13.8. + if self.sink.same_node(&last_node, &furthest_block) { + bookmark = Bookmark::InsertAfter(node.clone()); + } + + // 13.9. + self.sink.remove_from_parent(&last_node); + self.sink.append(&node, AppendNode(last_node.clone())); + + // 13.10. + last_node = node.clone(); + + // 13.11. + } + + // 14. + self.sink.remove_from_parent(&last_node); + self.insert_appropriately(AppendNode(last_node.clone()), Some(common_ancestor)); + + // 15. + // FIXME: Is there a way to avoid cloning the attributes twice here (once on their own, + // once as part of t.clone() above)? + let new_element = create_element( + &mut self.sink, QualName::new(None, ns!(html), fmt_elem_tag.name.clone()), + fmt_elem_tag.attrs.clone()); + let new_entry = Element(new_element.clone(), fmt_elem_tag); + + // 16. + self.sink.reparent_children(&furthest_block, &new_element); + + // 17. + self.sink.append(&furthest_block, AppendNode(new_element.clone())); + + // 18. + // FIXME: We could probably get rid of the position_in_active_formatting() calls here + // if we had a more clever Bookmark representation. + match bookmark { + Bookmark::Replace(to_replace) => { + let index = self.position_in_active_formatting(&to_replace) + .expect("bookmark not found in active formatting elements"); + self.active_formatting[index] = new_entry; + } + Bookmark::InsertAfter(previous) => { + let index = self.position_in_active_formatting(&previous) + .expect("bookmark not found in active formatting elements") + 1; + self.active_formatting.insert(index, new_entry); + let old_index = self.position_in_active_formatting(&fmt_elem) + .expect("formatting element not found in active formatting elements"); + self.active_formatting.remove(old_index); + } + } + + // 19. + self.remove_from_stack(&fmt_elem); + let new_furthest_block_index = self.open_elems.iter() + .position(|n| self.sink.same_node(n, &furthest_block)) + .expect("furthest block missing from open element stack"); + self.open_elems.insert(new_furthest_block_index + 1, new_element); + + // 20. + } + } + + fn push(&mut self, elem: &Handle) { + self.open_elems.push(elem.clone()); + } + + fn pop(&mut self) -> Handle { + let elem = self.open_elems.pop().expect("no current element"); + self.sink.pop(&elem); + elem + } + + fn remove_from_stack(&mut self, elem: &Handle) { + let sink = &mut self.sink; + let position = self.open_elems + .iter() + .rposition(|x| sink.same_node(elem, &x)); + if let Some(position) = position { + self.open_elems.remove(position); + sink.pop(elem); + } + } + + fn is_marker_or_open(&self, entry: &FormatEntry) -> bool { + match *entry { + Marker => true, + Element(ref node, _) => { + self.open_elems.iter() + .rev() + .any(|n| self.sink.same_node(&n, &node)) + } + } + } + + /// Reconstruct the active formatting elements. + fn reconstruct_formatting(&mut self) { + { + let last = unwrap_or_return!(self.active_formatting.last(), ()); + if self.is_marker_or_open(last) { + return + } + } + + let mut entry_index = self.active_formatting.len() - 1; + loop { + if entry_index == 0 { + break + } + entry_index -= 1; + if self.is_marker_or_open(&self.active_formatting[entry_index]) { + entry_index += 1; + break + } + } + + loop { + let tag = match self.active_formatting[entry_index] { + Element(_, ref t) => t.clone(), + Marker => panic!("Found marker during formatting element reconstruction"), + }; + + // FIXME: Is there a way to avoid cloning the attributes twice here (once on their own, + // once as part of t.clone() above)? + let new_element = self.insert_element(Push, ns!(html), tag.name.clone(), + tag.attrs.clone()); + self.active_formatting[entry_index] = Element(new_element, tag); + if entry_index == self.active_formatting.len() - 1 { + break + } + entry_index += 1; + } + } + + /// Get the first element on the stack, which will be the element. + fn html_elem(&self) -> &Handle { + &self.open_elems[0] + } + + /// Get the second element on the stack, if it's a HTML body element. + fn body_elem(&self) -> Option<&Handle> { + if self.open_elems.len() <= 1 { + return None; + } + + let node = &self.open_elems[1]; + if self.html_elem_named(node, local_name!("body")) { + Some(node) + } else { + None + } + } + + /// Signal an error depending on the state of the stack of open elements at + /// the end of the body. + fn check_body_end(&mut self) { + declare_tag_set!(body_end_ok = + "dd" "dt" "li" "optgroup" "option" "p" "rp" "rt" "tbody" "td" "tfoot" "th" + "thead" "tr" "body" "html"); + + for elem in self.open_elems.iter() { + let error; + { + let name = self.sink.elem_name(elem); + if body_end_ok(name) { + continue + } + error = format_if!(self.opts.exact_errors, + "Unexpected open tag at end of body", + "Unexpected open tag {:?} at end of body", name); + } + self.sink.parse_error(error); + // FIXME: Do we keep checking after finding one bad tag? + // The spec suggests not. + return; + } + } + + fn in_scope(&self, scope: TagSet, pred: Pred) -> bool + where TagSet: Fn(ExpandedName) -> bool, Pred: Fn(Handle) -> bool + { + for node in self.open_elems.iter().rev() { + if pred(node.clone()) { + return true; + } + if scope(self.sink.elem_name(node)) { + return false; + } + } + + // supposed to be impossible, because is always in scope + + false + } + + fn elem_in(&self, elem: &Handle, set: TagSet) -> bool + where TagSet: Fn(ExpandedName) -> bool + { + set(self.sink.elem_name(elem)) + } + + fn html_elem_named(&self, elem: &Handle, name: LocalName) -> bool { + let expanded = self.sink.elem_name(elem); + *expanded.ns == ns!(html) && *expanded.local == name + } + + fn in_html_elem_named(&self, name: LocalName) -> bool { + self.open_elems.iter().any(|elem| self.html_elem_named(elem, name.clone())) + } + + fn current_node_named(&self, name: LocalName) -> bool { + self.html_elem_named(self.current_node(), name) + } + + fn in_scope_named(&self, scope: TagSet, name: LocalName) -> bool + where TagSet: Fn(ExpandedName) -> bool + { + self.in_scope(scope, |elem| self.html_elem_named(&elem, name.clone())) + } + + //§ closing-elements-that-have-implied-end-tags + fn generate_implied_end(&mut self, set: TagSet) + where TagSet: Fn(ExpandedName) -> bool + { + loop { + { + let elem = unwrap_or_return!(self.open_elems.last(), ()); + let nsname = self.sink.elem_name(elem); + if !set(nsname) { return; } + } + self.pop(); + } + } + + fn generate_implied_end_except(&mut self, except: LocalName) { + self.generate_implied_end(|p| { + if *p.ns == ns!(html) && *p.local == except { + false + } else { + cursory_implied_end(p) + } + }); + } + //§ END + + // Pop elements until the current element is in the set. + fn pop_until_current(&mut self, pred: TagSet) + where TagSet: Fn(ExpandedName) -> bool + { + loop { + if self.current_node_in(|x| pred(x)) { + break; + } + self.open_elems.pop(); + } + } + + // Pop elements until an element from the set has been popped. Returns the + // number of elements popped. + fn pop_until

(&mut self, pred: P) -> usize + where P: Fn(ExpandedName) -> bool + { + let mut n = 0; + loop { + n += 1; + match self.open_elems.pop() { + None => break, + Some(elem) => if pred(self.sink.elem_name(&elem)) { break; }, + } + } + n + } + + fn pop_until_named(&mut self, name: LocalName) -> usize { + self.pop_until(|p| *p.ns == ns!(html) && *p.local == name) + } + + // Pop elements until one with the specified name has been popped. + // Signal an error if it was not the first one. + fn expect_to_close(&mut self, name: LocalName) { + if self.pop_until_named(name.clone()) != 1 { + self.sink.parse_error(format_if!(self.opts.exact_errors, + "Unexpected open element", + "Unexpected open element while closing {:?}", name)); + } + } + + fn close_p_element(&mut self) { + declare_tag_set!(implied = [cursory_implied_end] - "p"); + self.generate_implied_end(implied); + self.expect_to_close(local_name!("p")); + } + + fn close_p_element_in_button_scope(&mut self) { + if self.in_scope_named(button_scope, local_name!("p")) { + self.close_p_element(); + } + } + + // Check tags for type=hidden + fn is_type_hidden(&self, tag: &Tag) -> bool { + match tag.attrs.iter().find(|&at| at.name.expanded() == expanded_name!("", "type")) { + None => false, + Some(at) => (&*at.value).eq_ignore_ascii_case("hidden"), + } + } + + fn foster_parent_in_body(&mut self, token: Token) -> ProcessResult { + warn!("foster parenting not implemented"); + self.foster_parenting = true; + let res = self.step(InBody, token); + // FIXME: what if res is Reprocess? + self.foster_parenting = false; + res + } + + fn process_chars_in_table(&mut self, token: Token) -> ProcessResult { + declare_tag_set!(table_outer = "table" "tbody" "tfoot" "thead" "tr"); + if self.current_node_in(table_outer) { + assert!(self.pending_table_text.is_empty()); + self.orig_mode = Some(self.mode); + Reprocess(InTableText, token) + } else { + self.sink.parse_error(format_if!(self.opts.exact_errors, + "Unexpected characters in table", + "Unexpected characters {} in table", to_escaped_string(&token))); + self.foster_parent_in_body(token) + } + } + + // https://html.spec.whatwg.org/multipage/syntax.html#reset-the-insertion-mode-appropriately + fn reset_insertion_mode(&mut self) -> InsertionMode { + for (i, mut node) in self.open_elems.iter().enumerate().rev() { + let last = i == 0usize; + if let (true, Some(ctx)) = (last, self.context_elem.as_ref()) { + node = ctx; + } + let name = match self.sink.elem_name(node) { + ExpandedName { ns: &ns!(html), local } => local, + _ => continue, + }; + match *name { + local_name!("select") => { + for ancestor in self.open_elems[0..i].iter().rev() { + if self.html_elem_named(ancestor, local_name!("template")) { + return InSelect; + } else if self.html_elem_named(ancestor, local_name!("table")) { + return InSelectInTable; + } + } + return InSelect; + }, + local_name!("td") | local_name!("th") => if !last { return InCell; }, + local_name!("tr") => return InRow, + local_name!("tbody") | local_name!("thead") | local_name!("tfoot") => return InTableBody, + local_name!("caption") => return InCaption, + local_name!("colgroup") => return InColumnGroup, + local_name!("table") => return InTable, + local_name!("template") => return *self.template_modes.last().unwrap(), + local_name!("head") => if !last { return InHead }, + local_name!("body") => return InBody, + local_name!("frameset") => return InFrameset, + local_name!("html") => match self.head_elem { + None => return BeforeHead, + Some(_) => return AfterHead, + }, + + _ => (), + } + } + InBody + } + + fn close_the_cell(&mut self) { + self.generate_implied_end(cursory_implied_end); + if self.pop_until(td_th) != 1 { + self.sink.parse_error(Borrowed("expected to close or with cell")); + } + self.clear_active_formatting_to_marker(); + } + + fn append_text(&mut self, text: StrTendril) -> ProcessResult { + self.insert_appropriately(AppendText(text), None); + Done + } + + fn append_comment(&mut self, text: StrTendril) -> ProcessResult { + let comment = self.sink.create_comment(text); + self.insert_appropriately(AppendNode(comment), None); + Done + } + + fn append_comment_to_doc(&mut self, text: StrTendril) -> ProcessResult { + let comment = self.sink.create_comment(text); + self.sink.append(&self.doc_handle, AppendNode(comment)); + Done + } + + fn append_comment_to_html(&mut self, text: StrTendril) -> ProcessResult { + let target = html_elem(&self.open_elems); + let comment = self.sink.create_comment(text); + self.sink.append(target, AppendNode(comment)); + Done + } + + //§ creating-and-inserting-nodes + fn create_root(&mut self, attrs: Vec) { + let elem = create_element( + &mut self.sink, QualName::new(None, ns!(html), local_name!("html")), + attrs); + self.push(&elem); + self.sink.append(&self.doc_handle, AppendNode(elem)); + // FIXME: application cache selection algorithm + } + + // https://html.spec.whatwg.org/multipage/#create-an-element-for-the-token + fn insert_element(&mut self, push: PushFlag, ns: Namespace, name: LocalName, attrs: Vec) + -> Handle { + declare_tag_set!(form_associatable = + "button" "fieldset" "input" "object" + "output" "select" "textarea" "img"); + + declare_tag_set!(listed = [form_associatable] - "img"); + + // Step 7. + let qname = QualName::new(None, ns, name); + let elem = create_element(&mut self.sink, qname.clone(), attrs.clone()); + + let insertion_point = self.appropriate_place_for_insertion(None); + let tree_node = match insertion_point { + LastChild(ref p) | + BeforeSibling(ref p) => p.clone() + }; + + // Step 12. + if form_associatable(qname.expanded()) && + self.form_elem.is_some() && + !self.in_html_elem_named(local_name!("template")) && + !(listed(qname.expanded()) && + attrs.iter().any(|a| a.name.expanded() == expanded_name!("", "form"))) { + + let form = self.form_elem.as_ref().unwrap().clone(); + if self.sink.same_tree(&tree_node, &form) { + self.sink.associate_with_form(&elem, &form) + } + } + + self.insert_at(insertion_point, AppendNode(elem.clone())); + + match push { + Push => self.push(&elem), + NoPush => (), + } + // FIXME: Remove from the stack if we can't append? + elem + } + + fn insert_element_for(&mut self, tag: Tag) -> Handle { + self.insert_element(Push, ns!(html), tag.name, tag.attrs) + } + + fn insert_and_pop_element_for(&mut self, tag: Tag) -> Handle { + self.insert_element(NoPush, ns!(html), tag.name, tag.attrs) + } + + fn insert_phantom(&mut self, name: LocalName) -> Handle { + self.insert_element(Push, ns!(html), name, vec!()) + } + //§ END + + fn create_formatting_element_for(&mut self, tag: Tag) -> Handle { + // FIXME: This really wants unit tests. + let mut first_match = None; + let mut matches = 0usize; + for (i, _, old_tag) in self.active_formatting_end_to_marker() { + if tag.equiv_modulo_attr_order(old_tag) { + first_match = Some(i); + matches += 1; + } + } + + if matches >= 3 { + self.active_formatting.remove(first_match.expect("matches with no index")); + } + + let elem = self.insert_element(Push, ns!(html), tag.name.clone(), tag.attrs.clone()); + self.active_formatting.push(Element(elem.clone(), tag)); + elem + } + + fn clear_active_formatting_to_marker(&mut self) { + loop { + match self.active_formatting.pop() { + None | Some(Marker) => break, + _ => (), + } + } + } + + fn process_end_tag_in_body(&mut self, tag: Tag) { + // Look back for a matching open element. + let mut match_idx = None; + for (i, elem) in self.open_elems.iter().enumerate().rev() { + if self.html_elem_named(elem, tag.name.clone()) { + match_idx = Some(i); + break; + } + + if self.elem_in(elem, special_tag) { + self.sink.parse_error(Borrowed("Found special tag while closing generic tag")); + return; + } + } + + // Can't use unwrap_or_return!() due to rust-lang/rust#16617. + let match_idx = match match_idx { + None => { + // I believe this is impossible, because the root + // element is in special_tag. + self.unexpected(&tag); + return; + } + Some(x) => x, + }; + + self.generate_implied_end_except(tag.name.clone()); + + if match_idx != self.open_elems.len() - 1 { + // mis-nested tags + self.unexpected(&tag); + } + self.open_elems.truncate(match_idx); + } + + fn handle_misnested_a_tags(&mut self, tag: &Tag) { + let node = unwrap_or_return!( + self.active_formatting_end_to_marker() + .filter(|&(_, n, _)| self.html_elem_named(n, local_name!("a"))) + .next() + .map(|(_, n, _)| n.clone()), + + () + ); + + self.unexpected(tag); + self.adoption_agency(local_name!("a")); + self.position_in_active_formatting(&node) + .map(|index| self.active_formatting.remove(index)); + self.remove_from_stack(&node); + } + + //§ tree-construction + fn is_foreign(&mut self, token: &Token) -> bool { + if let EOFToken = *token { + return false; + } + + if self.open_elems.len() == 0 { + return false; + } + + let name = self.sink.elem_name(self.adjusted_current_node()); + if let ns!(html) = *name.ns { + return false; + } + + if mathml_text_integration_point(name) { + match *token { + CharacterTokens(..) | NullCharacterToken => return false, + TagToken(Tag { kind: StartTag, ref name, .. }) + if !matches!(*name, local_name!("mglyph") | local_name!("malignmark")) => return false, + _ => (), + } + } + + if svg_html_integration_point(name) { + match *token { + CharacterTokens(..) | NullCharacterToken => return false, + TagToken(Tag { kind: StartTag, .. }) => return false, + _ => (), + } + } + + if let expanded_name!(mathml "annotation-xml") = name { + match *token { + TagToken(Tag { kind: StartTag, name: local_name!("svg"), .. }) => return false, + CharacterTokens(..) | NullCharacterToken | + TagToken(Tag { kind: StartTag, .. }) => { + return !self.sink.is_mathml_annotation_xml_integration_point( + self.adjusted_current_node()) + } + _ => {} + }; + } + + true + } + //§ END + + fn enter_foreign(&mut self, mut tag: Tag, ns: Namespace) -> ProcessResult { + match ns { + ns!(mathml) => self.adjust_mathml_attributes(&mut tag), + ns!(svg) => self.adjust_svg_attributes(&mut tag), + _ => (), + } + self.adjust_foreign_attributes(&mut tag); + + if tag.self_closing { + self.insert_element(NoPush, ns, tag.name, tag.attrs); + DoneAckSelfClosing + } else { + self.insert_element(Push, ns, tag.name, tag.attrs); + Done + } + } + + fn adjust_svg_tag_name(&mut self, tag: &mut Tag) { + let Tag { ref mut name, .. } = *tag; + match *name { + local_name!("altglyph") => *name = local_name!("altGlyph"), + local_name!("altglyphdef") => *name = local_name!("altGlyphDef"), + local_name!("altglyphitem") => *name = local_name!("altGlyphItem"), + local_name!("animatecolor") => *name = local_name!("animateColor"), + local_name!("animatemotion") => *name = local_name!("animateMotion"), + local_name!("animatetransform") => *name = local_name!("animateTransform"), + local_name!("clippath") => *name = local_name!("clipPath"), + local_name!("feblend") => *name = local_name!("feBlend"), + local_name!("fecolormatrix") => *name = local_name!("feColorMatrix"), + local_name!("fecomponenttransfer") => *name = local_name!("feComponentTransfer"), + local_name!("fecomposite") => *name = local_name!("feComposite"), + local_name!("feconvolvematrix") => *name = local_name!("feConvolveMatrix"), + local_name!("fediffuselighting") => *name = local_name!("feDiffuseLighting"), + local_name!("fedisplacementmap") => *name = local_name!("feDisplacementMap"), + local_name!("fedistantlight") => *name = local_name!("feDistantLight"), + local_name!("fedropshadow") => *name = local_name!("feDropShadow"), + local_name!("feflood") => *name = local_name!("feFlood"), + local_name!("fefunca") => *name = local_name!("feFuncA"), + local_name!("fefuncb") => *name = local_name!("feFuncB"), + local_name!("fefuncg") => *name = local_name!("feFuncG"), + local_name!("fefuncr") => *name = local_name!("feFuncR"), + local_name!("fegaussianblur") => *name = local_name!("feGaussianBlur"), + local_name!("feimage") => *name = local_name!("feImage"), + local_name!("femerge") => *name = local_name!("feMerge"), + local_name!("femergenode") => *name = local_name!("feMergeNode"), + local_name!("femorphology") => *name = local_name!("feMorphology"), + local_name!("feoffset") => *name = local_name!("feOffset"), + local_name!("fepointlight") => *name = local_name!("fePointLight"), + local_name!("fespecularlighting") => *name = local_name!("feSpecularLighting"), + local_name!("fespotlight") => *name = local_name!("feSpotLight"), + local_name!("fetile") => *name = local_name!("feTile"), + local_name!("feturbulence") => *name = local_name!("feTurbulence"), + local_name!("foreignobject") => *name = local_name!("foreignObject"), + local_name!("glyphref") => *name = local_name!("glyphRef"), + local_name!("lineargradient") => *name = local_name!("linearGradient"), + local_name!("radialgradient") => *name = local_name!("radialGradient"), + local_name!("textpath") => *name = local_name!("textPath"), + _ => (), + } + } + + fn adjust_attributes(&mut self, tag: &mut Tag, mut map: F) + where F: FnMut(LocalName) -> Option, + { + for &mut Attribute { ref mut name, .. } in &mut tag.attrs { + if let Some(replacement) = map(name.local.clone()) { + *name = replacement; + } + } + } + + fn adjust_svg_attributes(&mut self, tag: &mut Tag) { + self.adjust_attributes(tag, |k| match k { + local_name!("attributename") => Some(qualname!("", "attributeName")), + local_name!("attributetype") => Some(qualname!("", "attributeType")), + local_name!("basefrequency") => Some(qualname!("", "baseFrequency")), + local_name!("baseprofile") => Some(qualname!("", "baseProfile")), + local_name!("calcmode") => Some(qualname!("", "calcMode")), + local_name!("clippathunits") => Some(qualname!("", "clipPathUnits")), + local_name!("diffuseconstant") => Some(qualname!("", "diffuseConstant")), + local_name!("edgemode") => Some(qualname!("", "edgeMode")), + local_name!("filterunits") => Some(qualname!("", "filterUnits")), + local_name!("glyphref") => Some(qualname!("", "glyphRef")), + local_name!("gradienttransform") => Some(qualname!("", "gradientTransform")), + local_name!("gradientunits") => Some(qualname!("", "gradientUnits")), + local_name!("kernelmatrix") => Some(qualname!("", "kernelMatrix")), + local_name!("kernelunitlength") => Some(qualname!("", "kernelUnitLength")), + local_name!("keypoints") => Some(qualname!("", "keyPoints")), + local_name!("keysplines") => Some(qualname!("", "keySplines")), + local_name!("keytimes") => Some(qualname!("", "keyTimes")), + local_name!("lengthadjust") => Some(qualname!("", "lengthAdjust")), + local_name!("limitingconeangle") => Some(qualname!("", "limitingConeAngle")), + local_name!("markerheight") => Some(qualname!("", "markerHeight")), + local_name!("markerunits") => Some(qualname!("", "markerUnits")), + local_name!("markerwidth") => Some(qualname!("", "markerWidth")), + local_name!("maskcontentunits") => Some(qualname!("", "maskContentUnits")), + local_name!("maskunits") => Some(qualname!("", "maskUnits")), + local_name!("numoctaves") => Some(qualname!("", "numOctaves")), + local_name!("pathlength") => Some(qualname!("", "pathLength")), + local_name!("patterncontentunits") => Some(qualname!("", "patternContentUnits")), + local_name!("patterntransform") => Some(qualname!("", "patternTransform")), + local_name!("patternunits") => Some(qualname!("", "patternUnits")), + local_name!("pointsatx") => Some(qualname!("", "pointsAtX")), + local_name!("pointsaty") => Some(qualname!("", "pointsAtY")), + local_name!("pointsatz") => Some(qualname!("", "pointsAtZ")), + local_name!("preservealpha") => Some(qualname!("", "preserveAlpha")), + local_name!("preserveaspectratio") => Some(qualname!("", "preserveAspectRatio")), + local_name!("primitiveunits") => Some(qualname!("", "primitiveUnits")), + local_name!("refx") => Some(qualname!("", "refX")), + local_name!("refy") => Some(qualname!("", "refY")), + local_name!("repeatcount") => Some(qualname!("", "repeatCount")), + local_name!("repeatdur") => Some(qualname!("", "repeatDur")), + local_name!("requiredextensions") => Some(qualname!("", "requiredExtensions")), + local_name!("requiredfeatures") => Some(qualname!("", "requiredFeatures")), + local_name!("specularconstant") => Some(qualname!("", "specularConstant")), + local_name!("specularexponent") => Some(qualname!("", "specularExponent")), + local_name!("spreadmethod") => Some(qualname!("", "spreadMethod")), + local_name!("startoffset") => Some(qualname!("", "startOffset")), + local_name!("stddeviation") => Some(qualname!("", "stdDeviation")), + local_name!("stitchtiles") => Some(qualname!("", "stitchTiles")), + local_name!("surfacescale") => Some(qualname!("", "surfaceScale")), + local_name!("systemlanguage") => Some(qualname!("", "systemLanguage")), + local_name!("tablevalues") => Some(qualname!("", "tableValues")), + local_name!("targetx") => Some(qualname!("", "targetX")), + local_name!("targety") => Some(qualname!("", "targetY")), + local_name!("textlength") => Some(qualname!("", "textLength")), + local_name!("viewbox") => Some(qualname!("", "viewBox")), + local_name!("viewtarget") => Some(qualname!("", "viewTarget")), + local_name!("xchannelselector") => Some(qualname!("", "xChannelSelector")), + local_name!("ychannelselector") => Some(qualname!("", "yChannelSelector")), + local_name!("zoomandpan") => Some(qualname!("", "zoomAndPan")), + _ => None, + }); + } + + fn adjust_mathml_attributes(&mut self, tag: &mut Tag) { + self.adjust_attributes(tag, |k| match k { + local_name!("definitionurl") => Some(qualname!("", "definitionURL")), + _ => None, + }); + } + + fn adjust_foreign_attributes(&mut self, tag: &mut Tag) { + self.adjust_attributes(tag, |k| match k { + local_name!("xlink:actuate") => Some(qualname!("xlink" xlink "actuate")), + local_name!("xlink:arcrole") => Some(qualname!("xlink" xlink "arcrole")), + local_name!("xlink:href") => Some(qualname!("xlink" xlink "href")), + local_name!("xlink:role") => Some(qualname!("xlink" xlink "role")), + local_name!("xlink:show") => Some(qualname!("xlink" xlink "show")), + local_name!("xlink:title") => Some(qualname!("xlink" xlink "title")), + local_name!("xlink:type") => Some(qualname!("xlink" xlink "type")), + local_name!("xml:base") => Some(qualname!("xml" xml "base")), + local_name!("xml:lang") => Some(qualname!("xml" xml "lang")), + local_name!("xml:space") => Some(qualname!("xml" xml "space")), + local_name!("xmlns") => Some(qualname!("" xmlns "xmlns")), + local_name!("xmlns:xlink") => Some(qualname!("xmlns" xmlns "xlink")), + _ => None, + }); + } + + fn foreign_start_tag(&mut self, mut tag: Tag) -> ProcessResult { + let current_ns = self.sink.elem_name(self.adjusted_current_node()).ns.clone(); + match current_ns { + ns!(mathml) => self.adjust_mathml_attributes(&mut tag), + ns!(svg) => { + self.adjust_svg_tag_name(&mut tag); + self.adjust_svg_attributes(&mut tag); + } + _ => (), + } + self.adjust_foreign_attributes(&mut tag); + if tag.self_closing { + // FIXME(#118): in SVG + + tag @ => { + let mut first = true; + let mut stack_idx = self.open_elems.len() - 1; + loop { + if stack_idx == 0 { + return Done; + } + + let html; + let eq; + { + let node_name = self.sink.elem_name(&self.open_elems[stack_idx]); + html = *node_name.ns == ns!(html); + eq = node_name.local.eq_ignore_ascii_case(&tag.name); + } + if !first && html { + let mode = self.mode; + return self.step(mode, TagToken(tag)); + } + + if eq { + self.open_elems.truncate(stack_idx); + return Done; + } + + if first { + self.unexpected(&tag); + first = false; + } + stack_idx -= 1; + } + } + + // FIXME: This should be unreachable, but match_token requires a + // catch-all case. + _ => panic!("impossible case in foreign content"), + }) + } +} diff --git a/src/vendor/html5ever/src/tree_builder/tag_sets.rs b/src/vendor/html5ever/src/tree_builder/tag_sets.rs new file mode 100644 index 0000000000..f7ce8dc0fd --- /dev/null +++ b/src/vendor/html5ever/src/tree_builder/tag_sets.rs @@ -0,0 +1,102 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Various sets of HTML tag names, and macros for declaring them. + +use ExpandedName; + +macro_rules! declare_tag_set_impl ( ($param:ident, $b:ident, $supr:ident, $($tag:tt)+) => ( + match $param { + $( expanded_name!(html $tag) => $b, )+ + p => $supr(p), + } +)); + +macro_rules! declare_tag_set_body ( + ($param:ident = [$supr:ident] - $($tag:tt)+) + => ( declare_tag_set_impl!($param, false, $supr, $($tag)+) ); + + ($param:ident = [$supr:ident] + $($tag:tt)+) + => ( declare_tag_set_impl!($param, true, $supr, $($tag)+) ); + + ($param:ident = $($tag:tt)+) + => ( declare_tag_set_impl!($param, true, empty_set, $($tag)+) ); +); + +macro_rules! declare_tag_set ( + (pub $name:ident = $($toks:tt)+) => ( + pub fn $name(p: ::ExpandedName) -> bool { + declare_tag_set_body!(p = $($toks)+) + } + ); + + ($name:ident = $($toks:tt)+) => ( + fn $name(p: ::ExpandedName) -> bool { + declare_tag_set_body!(p = $($toks)+) + } + ); +); + +#[inline(always)] pub fn empty_set(_: ExpandedName) -> bool { false } +#[inline(always)] pub fn full_set(_: ExpandedName) -> bool { true } + +declare_tag_set!(pub html_default_scope = + "applet" "caption" "html" "table" "td" "th" "marquee" "object" "template"); + +#[inline(always)] pub fn default_scope(name: ExpandedName) -> bool { + html_default_scope(name) || + mathml_text_integration_point(name) || + svg_html_integration_point(name) +} + +declare_tag_set!(pub list_item_scope = [default_scope] + "ol" "ul"); +declare_tag_set!(pub button_scope = [default_scope] + "button"); +declare_tag_set!(pub table_scope = "html" "table" "template"); +declare_tag_set!(pub select_scope = [full_set] - "optgroup" "option"); + +declare_tag_set!(pub table_body_context = "tbody" "tfoot" "thead" "template" "html"); +declare_tag_set!(pub table_row_context = "tr" "template" "html"); +declare_tag_set!(pub td_th = "td" "th"); + +declare_tag_set!(pub cursory_implied_end = + "dd" "dt" "li" "option" "optgroup" "p" "rb" "rp" "rt" "rtc"); + +declare_tag_set!(pub thorough_implied_end = [cursory_implied_end] + + "caption" "colgroup" "tbody" "td" "tfoot" "th" "thead" "tr"); + +declare_tag_set!(pub heading_tag = "h1" "h2" "h3" "h4" "h5" "h6"); + +declare_tag_set!(pub special_tag = + "address" "applet" "area" "article" "aside" "base" "basefont" "bgsound" "blockquote" "body" + "br" "button" "caption" "center" "col" "colgroup" "dd" "details" "dir" "div" "dl" "dt" "embed" + "fieldset" "figcaption" "figure" "footer" "form" "frame" "frameset" "h1" "h2" "h3" "h4" "h5" + "h6" "head" "header" "hgroup" "hr" "html" "iframe" "img" "input" "isindex" "li" "link" + "listing" "main" "marquee" "menu" "meta" "nav" "noembed" "noframes" "noscript" + "object" "ol" "p" "param" "plaintext" "pre" "script" "section" "select" "source" "style" + "summary" "table" "tbody" "td" "template" "textarea" "tfoot" "th" "thead" "title" "tr" "track" + "ul" "wbr" "xmp"); +//§ END + +pub fn mathml_text_integration_point(p: ExpandedName) -> bool { + matches!(p, + expanded_name!(mathml "mi") | + expanded_name!(mathml "mo") | + expanded_name!(mathml "mn") | + expanded_name!(mathml "ms") | + expanded_name!(mathml "mtext")) +} + +/// https://html.spec.whatwg.org/multipage/#html-integration-point +pub fn svg_html_integration_point(p: ExpandedName) -> bool { + // annotation-xml are handle in another place + matches!(p, + expanded_name!(svg "foreignObject") | + expanded_name!(svg "desc") | + expanded_name!(svg "title")) +} diff --git a/src/vendor/html5ever/src/tree_builder/types.rs b/src/vendor/html5ever/src/tree_builder/types.rs new file mode 100644 index 0000000000..c88afcb731 --- /dev/null +++ b/src/vendor/html5ever/src/tree_builder/types.rs @@ -0,0 +1,90 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Types used within the tree builder code. Not exported to users. + +use tokenizer::Tag; +use tokenizer::states::RawKind; + +use tendril::StrTendril; + +pub use self::InsertionMode::*; +pub use self::SplitStatus::*; +pub use self::Token::*; +pub use self::ProcessResult::*; +pub use self::FormatEntry::*; +pub use self::InsertionPoint::*; + +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum InsertionMode { + Initial, + BeforeHtml, + BeforeHead, + InHead, + InHeadNoscript, + AfterHead, + InBody, + Text, + InTable, + InTableText, + InCaption, + InColumnGroup, + InTableBody, + InRow, + InCell, + InSelect, + InSelectInTable, + InTemplate, + AfterBody, + InFrameset, + AfterFrameset, + AfterAfterBody, + AfterAfterFrameset, +} + +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum SplitStatus { + NotSplit, + Whitespace, + NotWhitespace, +} + +/// A subset/refinement of `tokenizer::Token`. Everything else is handled +/// specially at the beginning of `process_token`. +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Token { + TagToken(Tag), + CommentToken(StrTendril), + CharacterTokens(SplitStatus, StrTendril), + NullCharacterToken, + EOFToken, +} + +pub enum ProcessResult { + Done, + DoneAckSelfClosing, + SplitWhitespace(StrTendril), + Reprocess(InsertionMode, Token), + ReprocessForeign(Token), + Script(Handle), + ToPlaintext, + ToRawData(RawKind), +} + +pub enum FormatEntry { + Element(Handle, Tag), + Marker, +} + +pub enum InsertionPoint { + /// Insert as last child in this parent. + LastChild(Handle), + /// Insert before this following sibling. + BeforeSibling(Handle) +} diff --git a/src/vendor/html5ever/src/util/str.rs b/src/vendor/html5ever/src/util/str.rs new file mode 100644 index 0000000000..140779cf1a --- /dev/null +++ b/src/vendor/html5ever/src/util/str.rs @@ -0,0 +1,58 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt; + +pub fn to_escaped_string(x: &T) -> String { + use std::fmt::Write; + + // FIXME: don't allocate twice + let mut buf = String::new(); + let _ = buf.write_fmt(format_args!("{:?}", x)); + buf.shrink_to_fit(); + buf.chars().flat_map(|c| c.escape_default()).collect() +} + +/// If `c` is an ASCII letter, return the corresponding lowercase +/// letter, otherwise None. +pub fn lower_ascii_letter(c: char) -> Option { + match c { + 'a' ... 'z' => Some(c), + 'A' ... 'Z' => Some((c as u8 - b'A' + b'a') as char), + _ => None + } +} + +/// Is the character an ASCII alphanumeric character? +pub fn is_ascii_alnum(c: char) -> bool { + matches!(c, '0'...'9' | 'a'...'z' | 'A'...'Z') +} + +/// ASCII whitespace characters, as defined by +/// tree construction modes that treat them specially. +pub fn is_ascii_whitespace(c: char) -> bool { + matches!(c, '\t' | '\r' | '\n' | '\x0C' | ' ') +} + +#[cfg(test)] +#[allow(non_snake_case)] +mod test { + use super::{is_ascii_alnum, lower_ascii_letter}; + + test_eq!(lower_letter_a_is_a, lower_ascii_letter('a'), Some('a')); + test_eq!(lower_letter_A_is_a, lower_ascii_letter('A'), Some('a')); + test_eq!(lower_letter_symbol_is_None, lower_ascii_letter('!'), None); + test_eq!(lower_letter_nonascii_is_None, lower_ascii_letter('\u{a66e}'), None); + + test_eq!(is_alnum_a, is_ascii_alnum('a'), true); + test_eq!(is_alnum_A, is_ascii_alnum('A'), true); + test_eq!(is_alnum_1, is_ascii_alnum('1'), true); + test_eq!(is_not_alnum_symbol, is_ascii_alnum('!'), false); + test_eq!(is_not_alnum_nonascii, is_ascii_alnum('\u{a66e}'), false); +} diff --git a/src/vendor/html5ever/tests/foreach_html5lib_test/mod.rs b/src/vendor/html5ever/tests/foreach_html5lib_test/mod.rs new file mode 100644 index 0000000000..13202d10ff --- /dev/null +++ b/src/vendor/html5ever/tests/foreach_html5lib_test/mod.rs @@ -0,0 +1,41 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fs; +use std::ffi::OsStr; +use std::path::Path; +use std::ops::FnMut; + +pub fn foreach_html5lib_test( + src_dir: &Path, + subdir: &'static str, + ext: &'static OsStr, + mut mk: Mk) + where Mk: FnMut(&Path, fs::File) +{ + let mut test_dir_path = src_dir.to_path_buf(); + test_dir_path.push("html5lib-tests"); + test_dir_path.push(subdir); + + let maybe_test_files = fs::read_dir(&test_dir_path); + match maybe_test_files { + Ok(test_files) => { + for entry in test_files { + let path = entry.unwrap().path(); + if path.extension() == Some(ext) { + let file = fs::File::open(&path).unwrap(); + mk(&path, file); + } + } + }, + Err(_) => { + panic!("Before launching the tests, please run this command:\n\n\tgit submodule update --init\n\nto retrieve an html5lib-tests snapshot."); + } + } +} diff --git a/src/vendor/html5ever/tests/serializer.rs b/src/vendor/html5ever/tests/serializer.rs new file mode 100644 index 0000000000..338042d884 --- /dev/null +++ b/src/vendor/html5ever/tests/serializer.rs @@ -0,0 +1,107 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[macro_use] extern crate html5ever; + +use std::default::Default; + +use html5ever::{parse_fragment, parse_document, serialize, QualName}; +use html5ever::driver::ParseOpts; +use html5ever::rcdom::RcDom; +use html5ever::tendril::{StrTendril, SliceExt, TendrilSink}; + +fn parse_and_serialize(input: StrTendril) -> StrTendril { + let dom = parse_fragment( + RcDom::default(), ParseOpts::default(), + QualName::new(None, ns!(html), local_name!("body")), vec![], + ).one(input); + let inner = &dom.document.children.borrow()[0]; + + let mut result = vec![]; + serialize(&mut result, inner, Default::default()).unwrap(); + StrTendril::try_from_byte_slice(&result).unwrap() +} + +macro_rules! test { + ($name:ident, $input:expr, $output:expr) => { + #[test] + fn $name() { + assert_eq!($output, &*parse_and_serialize($input.to_tendril())); + } + }; + + // Shorthand for $output = $input + ($name:ident, $input:expr) => { + test!($name, $input, $input); + }; +} + +test!(empty, r#""#); +test!(smoke_test, r#"

Hello, World!

"#); + +test!(misnest, r#"

Hello!

, World!"#, + r#"

Hello!

, World!"#); + +test!(attr_literal, r#""#); +test!(attr_escape_amp, r#""#); +test!(attr_escape_amp_2, r#""#, r#""#); +test!(attr_escape_nbsp, "", r#""#); +test!(attr_escape_quot, r#""#, r#""#); +test!(attr_escape_several, r#""#, + r#""#); + +test!(text_literal, r#"

"'"

"#); +test!(text_escape_amp, r#"

&

"#); +test!(text_escape_amp_2, r#"

&

"#, r#"

&

"#); +test!(text_escape_nbsp, "

x\u{a0}y

", r#"

x y

"#); +test!(text_escape_lt, r#"

<

"#); +test!(text_escape_gt, r#"

>

"#); +test!(text_escape_gt2, r#"

>

"#, r#"

>

"#); + +test!(script_literal, r#""#); +test!(style_literal, r#""#); +test!(xmp_literal, r#"(x & 1) < 2; y > "foo" + 'bar'"#); +test!(iframe_literal, r#""#); +test!(noembed_literal, r#"(x & 1) < 2; y > "foo" + 'bar'"#); +test!(noframes_literal, r#"(x & 1) < 2; y > "foo" + 'bar'"#); + +test!(pre_lf_0, "
foo bar
"); +test!(pre_lf_1, "
\nfoo bar
", "
foo bar
"); +test!(pre_lf_2, "
\n\nfoo bar
"); + +test!(textarea_lf_0, ""); +test!(textarea_lf_1, "", ""); +test!(textarea_lf_2, ""); + +test!(listing_lf_0, "foo bar"); +test!(listing_lf_1, "\nfoo bar", "foo bar"); +test!(listing_lf_2, "\n\nfoo bar"); + +test!(comment_1, r#"

hi

"#); +test!(comment_2, r#"

hi

"#); +test!(comment_3, r#"

hi

"#); +test!(comment_4, r#"

hi

"#); + +// FIXME: test serialization of qualified tag/attribute names that can't be +// parsed from HTML + +test!(attr_ns_1, r#""#); +test!(attr_ns_2, r#""#); +test!(attr_ns_3, r#""#); +test!(attr_ns_4, r#""#); + +#[test] +fn doctype() { + let dom = parse_document( + RcDom::default(), ParseOpts::default()).one(""); + dom.document.children.borrow_mut().truncate(1); // Remove + let mut result = vec![]; + serialize(&mut result, &dom.document, Default::default()).unwrap(); + assert_eq!(String::from_utf8(result).unwrap(), ""); +} diff --git a/src/vendor/html5ever/tests/tokenizer.rs b/src/vendor/html5ever/tests/tokenizer.rs new file mode 100644 index 0000000000..cfe99dd12d --- /dev/null +++ b/src/vendor/html5ever/tests/tokenizer.rs @@ -0,0 +1,426 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate rustc_serialize; +extern crate test; +#[macro_use] extern crate html5ever; + +mod foreach_html5lib_test; +use foreach_html5lib_test::foreach_html5lib_test; + +use std::{char, env}; +use std::ffi::OsStr; +use std::mem::replace; +use std::default::Default; +use std::path::Path; +use test::{TestDesc, TestDescAndFn, DynTestName, DynTestFn}; +use test::ShouldPanic::No; +use rustc_serialize::json::Json; +use std::collections::BTreeMap; +use std::borrow::Cow::Borrowed; + +use html5ever::{LocalName, QualName}; +use html5ever::tokenizer::{Doctype, StartTag, EndTag, Tag}; +use html5ever::tokenizer::{Token, DoctypeToken, TagToken, CommentToken}; +use html5ever::tokenizer::{CharacterTokens, NullCharacterToken, EOFToken, ParseError}; +use html5ever::tokenizer::{TokenSink, Tokenizer, TokenizerOpts, TokenSinkResult}; +use html5ever::tokenizer::{BufferQueue}; +use html5ever::tokenizer::states::{Plaintext, RawData, Rcdata, Rawtext}; +use html5ever::tendril::*; +use html5ever::{Attribute}; + + +// Return all ways of splitting the string into at most n +// possibly-empty pieces. +fn splits(s: &str, n: usize) -> Vec> { + if n == 1 { + return vec!(vec!(s.to_tendril())); + } + + let mut points: Vec = s.char_indices().map(|(n,_)| n).collect(); + points.push(s.len()); + + // do this with iterators? + let mut out = vec!(); + for p in points.into_iter() { + let y = &s[p..]; + for mut x in splits(&s[..p], n-1).into_iter() { + x.push(y.to_tendril()); + out.push(x); + } + } + + out.extend(splits(s, n-1).into_iter()); + out +} + +struct TokenLogger { + tokens: Vec, + current_str: StrTendril, + exact_errors: bool, +} + +impl TokenLogger { + fn new(exact_errors: bool) -> TokenLogger { + TokenLogger { + tokens: vec!(), + current_str: StrTendril::new(), + exact_errors: exact_errors, + } + } + + // Push anything other than character tokens + fn push(&mut self, token: Token) { + self.finish_str(); + self.tokens.push(token); + } + + fn finish_str(&mut self) { + if self.current_str.len() > 0 { + let s = replace(&mut self.current_str, StrTendril::new()); + self.tokens.push(CharacterTokens(s)); + } + } + + fn get_tokens(mut self) -> Vec { + self.finish_str(); + self.tokens + } +} + +impl TokenSink for TokenLogger { + type Handle = (); + + fn process_token(&mut self, token: Token, _line_number: u64) -> TokenSinkResult<()> { + match token { + CharacterTokens(b) => { + self.current_str.push_slice(&b); + } + + NullCharacterToken => { + self.current_str.push_char('\0'); + } + + ParseError(_) => if self.exact_errors { + self.push(ParseError(Borrowed(""))); + }, + + TagToken(mut t) => { + // The spec seems to indicate that one can emit + // erroneous end tags with attrs, but the test + // cases don't contain them. + match t.kind { + EndTag => { + t.self_closing = false; + t.attrs = vec!(); + } + _ => t.attrs.sort_by(|a1, a2| a1.name.cmp(&a2.name)), + } + self.push(TagToken(t)); + } + + EOFToken => (), + + _ => self.push(token), + } + TokenSinkResult::Continue + } +} + +fn tokenize(input: Vec, opts: TokenizerOpts) -> Vec { + let sink = TokenLogger::new(opts.exact_errors); + let mut tok = Tokenizer::new(sink, opts); + let mut buffer = BufferQueue::new(); + for chunk in input.into_iter() { + buffer.push_back(chunk); + let _ = tok.feed(&mut buffer); + } + let _ = tok.feed(&mut buffer); + tok.end(); + tok.sink.get_tokens() +} + +trait JsonExt: Sized { + fn get_str(&self) -> String; + fn get_tendril(&self) -> StrTendril; + fn get_nullable_tendril(&self) -> Option; + fn get_bool(&self) -> bool; + fn get_obj<'t>(&'t self) -> &'t BTreeMap; + fn get_list<'t>(&'t self) -> &'t Vec; + fn find<'t>(&'t self, key: &str) -> &'t Self; +} + +impl JsonExt for Json { + fn get_str(&self) -> String { + match *self { + Json::String(ref s) => s.to_string(), + _ => panic!("Json::get_str: not a String"), + } + } + + fn get_tendril(&self) -> StrTendril { + match *self { + Json::String(ref s) => s.to_tendril(), + _ => panic!("Json::get_tendril: not a String"), + } + } + + fn get_nullable_tendril(&self) -> Option { + match *self { + Json::Null => None, + Json::String(ref s) => Some(s.to_tendril()), + _ => panic!("Json::get_nullable_tendril: not a String"), + } + } + + fn get_bool(&self) -> bool { + match *self { + Json::Boolean(b) => b, + _ => panic!("Json::get_bool: not a Boolean"), + } + } + + fn get_obj<'t>(&'t self) -> &'t BTreeMap { + match *self { + Json::Object(ref m) => &*m, + _ => panic!("Json::get_obj: not an Object"), + } + } + + fn get_list<'t>(&'t self) -> &'t Vec { + match *self { + Json::Array(ref m) => m, + _ => panic!("Json::get_list: not an Array"), + } + } + + fn find<'t>(&'t self, key: &str) -> &'t Json { + self.get_obj().get(&key.to_string()).unwrap() + } +} + +// Parse a JSON object (other than "ParseError") to a token. +fn json_to_token(js: &Json) -> Token { + let parts = js.get_list(); + // Collect refs here so we don't have to use "ref" in all the patterns below. + let args: Vec<&Json> = parts[1..].iter().collect(); + match &*parts[0].get_str() { + "DOCTYPE" => DoctypeToken(Doctype { + name: args[0].get_nullable_tendril(), + public_id: args[1].get_nullable_tendril(), + system_id: args[2].get_nullable_tendril(), + force_quirks: !args[3].get_bool(), + }), + + "StartTag" => TagToken(Tag { + kind: StartTag, + name: LocalName::from(&*args[0].get_str()), + attrs: args[1].get_obj().iter().map(|(k,v)| { + Attribute { + name: QualName::new(None, ns!(), LocalName::from(&**k)), + value: v.get_tendril() + } + }).collect(), + self_closing: match args.get(2) { + Some(b) => b.get_bool(), + None => false, + } + }), + + "EndTag" => TagToken(Tag { + kind: EndTag, + name: LocalName::from(&*args[0].get_str()), + attrs: vec!(), + self_closing: false + }), + + "Comment" => CommentToken(args[0].get_tendril()), + + "Character" => CharacterTokens(args[0].get_tendril()), + + // We don't need to produce NullCharacterToken because + // the TokenLogger will convert them to CharacterTokens. + + _ => panic!("don't understand token {:?}", parts), + } +} + +// Parse the "output" field of the test case into a vector of tokens. +fn json_to_tokens(js: &Json, exact_errors: bool) -> Vec { + // Use a TokenLogger so that we combine character tokens separated + // by an ignored error. + let mut sink = TokenLogger::new(exact_errors); + for tok in js.get_list().iter() { + assert_eq!(match *tok { + Json::String(ref s) + if &s[..] == "ParseError" => sink.process_token(ParseError(Borrowed("")), 0), + _ => sink.process_token(json_to_token(tok), 0), + }, TokenSinkResult::Continue); + } + sink.get_tokens() +} + +// Undo the escaping in "doubleEscaped" tests. +fn unescape(s: &str) -> Option { + let mut out = String::with_capacity(s.len()); + let mut it = s.chars().peekable(); + loop { + match it.next() { + None => return Some(out), + Some('\\') => { + if it.peek() != Some(&'u') { + panic!("can't understand escape"); + } + drop(it.next()); + let hex: String = it.by_ref().take(4).collect(); + match u32::from_str_radix(&hex, 16).ok() + .and_then(char::from_u32) { + // Some of the tests use lone surrogates, but we have no + // way to represent them in the UTF-8 input to our parser. + // Since these can only come from script, we will catch + // them there. + None => return None, + Some(c) => out.push(c), + } + } + Some(c) => out.push(c), + } + } +} + +fn unescape_json(js: &Json) -> Json { + match *js { + // unwrap is OK here because the spec'd *output* of the tokenizer never + // contains a lone surrogate. + Json::String(ref s) => Json::String(unescape(&s).unwrap()), + Json::Array(ref xs) => Json::Array(xs.iter().map(unescape_json).collect()), + Json::Object(ref obj) => { + let mut new_obj = BTreeMap::new(); + for (k,v) in obj.iter() { + new_obj.insert(k.clone(), unescape_json(v)); + } + Json::Object(new_obj) + } + _ => js.clone(), + } +} + +fn mk_test(desc: String, input: String, expect: Json, opts: TokenizerOpts) + -> TestDescAndFn { + TestDescAndFn { + desc: TestDesc { + name: DynTestName(desc), + ignore: false, + should_panic: No, + }, + testfn: DynTestFn(Box::new(move || { + // Split up the input at different points to test incremental tokenization. + let insplits = splits(&input, 3); + for input in insplits.into_iter() { + // Clone 'input' so we have it for the failure message. + // Also clone opts. If we don't, we get the wrong + // result but the compiler doesn't catch it! + // Possibly mozilla/rust#12223. + let output = tokenize(input.clone(), opts.clone()); + let expect_toks = json_to_tokens(&expect, opts.exact_errors); + if output != expect_toks { + panic!("\ninput: {:?}\ngot: {:?}\nexpected: {:?}", + input, output, expect); + } + } + })), + } +} + +fn mk_tests(tests: &mut Vec, filename: &str, js: &Json) { + let obj = js.get_obj(); + let mut input = js.find("input").unwrap().get_str(); + let mut expect = js.find("output").unwrap().clone(); + let desc = format!("tok: {}: {}", + filename, js.find("description").unwrap().get_str()); + + // "Double-escaped" tests require additional processing of + // the input and output. + if obj.get(&"doubleEscaped".to_string()).map_or(false, |j| j.get_bool()) { + match unescape(&input) { + None => return, + Some(i) => input = i, + } + expect = unescape_json(&expect); + } + + // Some tests have a last start tag name. + let start_tag = obj.get(&"lastStartTag".to_string()).map(|s| s.get_str()); + + // Some tests want to start in a state other than Data. + let state_overrides = match obj.get(&"initialStates".to_string()) { + Some(&Json::Array(ref xs)) => xs.iter().map(|s| + Some(match &s.get_str()[..] { + "PLAINTEXT state" => Plaintext, + "RAWTEXT state" => RawData(Rawtext), + "RCDATA state" => RawData(Rcdata), + s => panic!("don't know state {}", s), + })).collect(), + None => vec!(None), + _ => panic!("don't understand initialStates value"), + }; + + // Build the tests. + for state in state_overrides.into_iter() { + for &exact_errors in [false, true].iter() { + let mut newdesc = desc.clone(); + match state { + Some(s) => newdesc = format!("{} (in state {:?})", newdesc, s), + None => (), + }; + if exact_errors { + newdesc = format!("{} (exact errors)", newdesc); + } + + tests.push(mk_test(newdesc, input.clone(), expect.clone(), TokenizerOpts { + exact_errors: exact_errors, + initial_state: state, + last_start_tag_name: start_tag.clone(), + + // Not discarding a BOM is what the test suite expects; see + // https://github.com/html5lib/html5lib-tests/issues/2 + discard_bom: false, + + .. Default::default() + })); + } + } +} + +fn tests(src_dir: &Path) -> Vec { + let mut tests = vec!(); + + foreach_html5lib_test(src_dir, "tokenizer", + OsStr::new("test"), |path, mut file| { + let js = Json::from_reader(&mut file).ok().expect("json parse error"); + + match js.get_obj().get(&"tests".to_string()) { + Some(&Json::Array(ref lst)) => { + for test in lst.iter() { + mk_tests(&mut tests, path.file_name().unwrap().to_str().unwrap(), test); + } + } + + // xmlViolation.test doesn't follow this format. + _ => (), + } + }); + + tests +} + +fn main() { + let args: Vec<_> = env::args().collect(); + test::test_main(&args, tests(Path::new(env!("CARGO_MANIFEST_DIR")))); +} diff --git a/src/vendor/html5ever/tests/tree_builder.rs b/src/vendor/html5ever/tests/tree_builder.rs new file mode 100644 index 0000000000..7539b82029 --- /dev/null +++ b/src/vendor/html5ever/tests/tree_builder.rs @@ -0,0 +1,282 @@ +// Copyright 2014-2017 The html5ever Project Developers. See the +// COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate test; +#[macro_use] extern crate html5ever; + +mod foreach_html5lib_test; +use foreach_html5lib_test::foreach_html5lib_test; + +use std::{fs, io, env}; +use std::io::BufRead; +use std::ffi::OsStr; +use std::iter::repeat; +use std::mem::replace; +use std::default::Default; +use std::path::Path; +use std::collections::{HashSet, HashMap}; +use test::{TestDesc, TestDescAndFn, DynTestName, TestFn}; +use test::ShouldPanic::No; + +use html5ever::{LocalName, QualName}; +use html5ever::{ParseOpts, parse_document, parse_fragment}; +use html5ever::rcdom::{NodeData, Handle, RcDom}; +use html5ever::tendril::{StrTendril, TendrilSink}; + + +fn parse_tests>(mut lines: It) -> Vec> { + let mut tests = vec!(); + let mut test = HashMap::new(); + let mut key: Option = None; + let mut val = String::new(); + + macro_rules! finish_val ( () => ( + match key.take() { + None => (), + Some(key) => { + assert!(test.insert(key, replace(&mut val, String::new())).is_none()); + } + } + )); + + macro_rules! finish_test ( () => ( + if !test.is_empty() { + tests.push(replace(&mut test, HashMap::new())); + } + )); + + loop { + match lines.next() { + None => break, + Some(line) => { + if line.starts_with("#") { + finish_val!(); + if line == "#data" { + finish_test!(); + } + key = Some(line[1..].to_string()); + } else { + val.push_str(&line); + val.push('\n'); + } + } + } + } + + finish_val!(); + finish_test!(); + tests +} + +fn serialize(buf: &mut String, indent: usize, handle: Handle) { + buf.push_str("|"); + buf.push_str(&repeat(" ").take(indent).collect::()); + + let node = handle; + match node.data { + NodeData::Document => panic!("should not reach Document"), + + NodeData::Doctype { ref name, ref public_id, ref system_id } => { + buf.push_str("\n"); + } + + NodeData::Text { ref contents } => { + buf.push_str("\""); + buf.push_str(&contents.borrow()); + buf.push_str("\"\n"); + } + + NodeData::Comment { ref contents } => { + buf.push_str("\n"); + } + + NodeData::Element { ref name, ref attrs, .. } => { + buf.push_str("<"); + match name.ns { + ns!(svg) => buf.push_str("svg "), + ns!(mathml) => buf.push_str("math "), + _ => (), + } + buf.push_str(&*name.local); + buf.push_str(">\n"); + + let mut attrs = attrs.borrow().clone(); + attrs.sort_by(|x, y| x.name.local.cmp(&y.name.local)); + // FIXME: sort by UTF-16 code unit + + for attr in attrs.into_iter() { + buf.push_str("|"); + buf.push_str(&repeat(" ").take(indent+2).collect::()); + match attr.name.ns { + ns!(xlink) => buf.push_str("xlink "), + ns!(xml) => buf.push_str("xml "), + ns!(xmlns) => buf.push_str("xmlns "), + _ => (), + } + buf.push_str(&format!("{}=\"{}\"\n", + attr.name.local, attr.value)); + } + } + + NodeData::ProcessingInstruction { .. } => unreachable!() + } + + for child in node.children.borrow().iter() { + serialize(buf, indent+2, child.clone()); + } + + if let NodeData::Element { template_contents: Some(ref content), .. } = node.data { + buf.push_str("|"); + buf.push_str(&repeat(" ").take(indent+2).collect::()); + buf.push_str("content\n"); + for child in content.children.borrow().iter() { + serialize(buf, indent+4, child.clone()); + } + } +} + +fn make_test( + tests: &mut Vec, + ignores: &HashSet, + filename: &str, + idx: usize, + fields: HashMap) { + + let scripting_flags = &[false, true]; + let scripting_flags = if fields.contains_key("script-off") { + &scripting_flags[0..1] + } else if fields.contains_key("script-on") { + &scripting_flags[1..2] + } else { + &scripting_flags[0..2] + }; + let name = format!("tb: {}-{}", filename, idx); + for scripting_enabled in scripting_flags { + let test = make_test_desc_with_scripting_flag( + ignores, &name, &fields, *scripting_enabled); + tests.push(test); + } +} + +fn make_test_desc_with_scripting_flag( + ignores: &HashSet, + name: &str, + fields: &HashMap, + scripting_enabled: bool) + -> TestDescAndFn { + let get_field = |key| { + let field = fields.get(key).expect("missing field"); + field.trim_right_matches('\n').to_string() + }; + + let mut data = fields.get("data").expect("missing data").to_string(); + data.pop(); + let expected = get_field("document"); + let context = fields.get("document-fragment") + .map(|field| context_name(field.trim_right_matches('\n'))); + let ignore = ignores.contains(name); + let mut name = name.to_owned(); + if scripting_enabled { + name.push_str(" (scripting enabled)"); + } else { + name.push_str(" (scripting disabled)"); + }; + let mut opts: ParseOpts = Default::default(); + opts.tree_builder.scripting_enabled = scripting_enabled; + + TestDescAndFn { + desc: TestDesc { + name: DynTestName(name), + ignore: ignore, + should_panic: No, + }, + testfn: TestFn::dyn_test_fn(move || { + // Do this here because Tendril isn't Send. + let data = StrTendril::from_slice(&data); + let mut result = String::new(); + match context { + None => { + let dom = parse_document(RcDom::default(), opts).one(data.clone()); + for child in dom.document.children.borrow().iter() { + serialize(&mut result, 1, child.clone()); + } + }, + Some(ref context) => { + let dom = parse_fragment(RcDom::default(), opts, context.clone(), vec![]) + .one(data.clone()); + // fragment case: serialize children of the html element + // rather than children of the document + let doc = dom.document; + let root = &doc.children.borrow()[0]; + for child in root.children.borrow().iter() { + serialize(&mut result, 1, child.clone()); + } + }, + }; + let len = result.len(); + result.truncate(len - 1); // drop the trailing newline + + if result != expected { + panic!("\ninput: {}\ngot:\n{}\nexpected:\n{}\n", + data, result, expected); + } + }), + } +} + +fn context_name(context: &str) -> QualName { + if context.starts_with("svg ") { + QualName::new(None, ns!(svg), LocalName::from(&context[4..])) + } else if context.starts_with("math ") { + QualName::new(None, ns!(mathml), LocalName::from(&context[5..])) + } else { + QualName::new(None, ns!(html), LocalName::from(context)) + } +} + +fn tests(src_dir: &Path, ignores: &HashSet) -> Vec { + let mut tests = vec!(); + + foreach_html5lib_test(src_dir, "tree-construction", + OsStr::new("dat"), |path, file| { + let buf = io::BufReader::new(file); + let lines = buf.lines() + .map(|res| res.ok().expect("couldn't read")); + let data = parse_tests(lines); + + for (i, test) in data.into_iter().enumerate() { + make_test(&mut tests, ignores, path.file_name().unwrap().to_str().unwrap(), + i, test); + } + }); + + tests +} + +fn main() { + let args: Vec<_> = env::args().collect(); + let src_dir = Path::new(env!("CARGO_MANIFEST_DIR")); + let mut ignores = HashSet::new(); + { + let f = fs::File::open(&src_dir.join("data/test/ignore")).unwrap(); + let r = io::BufReader::new(f); + for ln in r.lines() { + ignores.insert(ln.unwrap().trim_right().to_string()); + } + } + + test::test_main(&args, tests(src_dir, &ignores)); +} diff --git a/src/vendor/itoa/.cargo-checksum.json b/src/vendor/itoa/.cargo-checksum.json index dc9ce97cab..8d60b0c24c 100644 --- a/src/vendor/itoa/.cargo-checksum.json +++ b/src/vendor/itoa/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"a2b867b2e28af9bde20a669a6ff0f366ecc5150b89314cd7ec97ed95bb427547","Cargo.toml":"82b9e862ca8c12656987883e7339d992b770b2a8b23a9cd9ceb5ae0083252687","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e18259ab3aa7f39a194795bdad8039b3c5fd544f6dd922526c9326c44842b76d","README.md":"f2b713cdc7ade373c4a733c09693cecd288201ec76bde725de65b4ff74530284","benches/bench.rs":"3e7075b70a899ab1e926403856afeb04b34a254b234bbca834f6136a703008a3","performance.png":"a6e70647a44084e65cedaaff3633b0624b37e0f0a84457362c1e078fb56c877d","src/lib.rs":"16169ef9fc6c6a6521daff8fefdfc1b54f4ce145763b9733308d6631dad4d14e","tests/test.rs":"9c7629f758e2833757c15617cd8c1ec2a2fb8437865d05b5d20abb07279d35ea"},"package":"eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"66702e366bf1950c550cf6d367d514bba6afff5fb0600ab387ea554dfe0473bd","Cargo.toml":"f15208d08be5a846e9c39390cec838d3023be8b77b49ccdc1259bd615cb10ea7","Cargo.toml.orig":"4fedd28a0921ef6a816c71ccf45a44679e8589504e3451ead68f8f3641cc11e6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e18259ab3aa7f39a194795bdad8039b3c5fd544f6dd922526c9326c44842b76d","README.md":"610fb3fbc82edf79762a85d5ec0b324db34c92b0e44dd6003f06a95aad23339d","benches/bench.rs":"23d604664f083f95220a082a926dc6ca782c1f913d0a70073b4a064aef94750d","src/lib.rs":"b8b5a562a03ed4667ce7de0e0fcc3caa36853bae4f413a690e45654992d023bf","src/udiv128.rs":"92734d7475f1a250a739daf4b69d5e6f7efcc5c9710aebe186d5406c8f2720e2","tests/test.rs":"b64b62c964d6985d1a482abd26ab75609d0673336e483f7755a79ca83e891021"},"package":"8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c"} \ No newline at end of file diff --git a/src/vendor/itoa/.travis.yml b/src/vendor/itoa/.travis.yml index 0e4e98974f..6a5a6dba7d 100644 --- a/src/vendor/itoa/.travis.yml +++ b/src/vendor/itoa/.travis.yml @@ -1,6 +1,17 @@ sudo: false - language: rust -rust: - - nightly +matrix: + include: + - rust: stable + - rust: beta + - rust: nightly + env: + - FEATURES="i128" + - BUILD_BENCH="true" + +script: + - cargo build --verbose --features "$FEATURES" + - cargo test --verbose --features "$FEATURES" + - if [ "$BUILD_BENCH" == "true" ]; then cargo bench --verbose --no-run --features "$FEATURES"; fi + diff --git a/src/vendor/itoa/Cargo.toml b/src/vendor/itoa/Cargo.toml index 7312236df3..00b16c4591 100644 --- a/src/vendor/itoa/Cargo.toml +++ b/src/vendor/itoa/Cargo.toml @@ -1,9 +1,26 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] name = "itoa" -version = "0.3.1" +version = "0.3.4" authors = ["David Tolnay "] -license = "MIT/Apache-2.0" +exclude = ["performance.png"] description = "Fast functions for printing integer primitives to an io::Write" -repository = "https://github.com/dtolnay/itoa" documentation = "https://github.com/dtolnay/itoa" +readme = "README.md" categories = ["value-formatting"] +license = "MIT/Apache-2.0" +repository = "https://github.com/dtolnay/itoa" + +[features] +i128 = [] diff --git a/src/vendor/itoa/Cargo.toml.orig b/src/vendor/itoa/Cargo.toml.orig new file mode 100644 index 0000000000..62b8fe69a7 --- /dev/null +++ b/src/vendor/itoa/Cargo.toml.orig @@ -0,0 +1,14 @@ +[package] +name = "itoa" +version = "0.3.4" # remember to update html_root_url +authors = ["David Tolnay "] +license = "MIT/Apache-2.0" +description = "Fast functions for printing integer primitives to an io::Write" +repository = "https://github.com/dtolnay/itoa" +documentation = "https://github.com/dtolnay/itoa" +categories = ["value-formatting"] +readme = "README.md" +exclude = ["performance.png"] + +[features] +i128 = [] diff --git a/src/vendor/itoa/README.md b/src/vendor/itoa/README.md index 230fa5bd36..524957bfca 100644 --- a/src/vendor/itoa/README.md +++ b/src/vendor/itoa/README.md @@ -14,7 +14,7 @@ but avoids the performance penalty of going through See also [`dtoa`](https://github.com/dtolnay/dtoa) for printing floating point primitives. -## Performance +## Performance (lower is better) ![performance](https://raw.githubusercontent.com/dtolnay/itoa/master/performance.png) @@ -41,8 +41,9 @@ fn write(writer: W, value: V) -> io::Result { + ( + $( + $(#[$attr:meta])* + $name:ident($value:expr) + ),* + ) => { mod bench_itoa { use test::{Bencher, black_box}; $( + $(#[$attr])* #[bench] fn $name(b: &mut Bencher) { use itoa; - let mut buf = Vec::with_capacity(20); + let mut buf = Vec::with_capacity(40); b.iter(|| { buf.clear(); @@ -26,11 +36,12 @@ macro_rules! benches { mod bench_fmt { use test::{Bencher, black_box}; $( + $(#[$attr])* #[bench] fn $name(b: &mut Bencher) { use std::io::Write; - let mut buf = Vec::with_capacity(20); + let mut buf = Vec::with_capacity(40); b.iter(|| { buf.clear(); @@ -42,11 +53,16 @@ macro_rules! benches { } } -benches!( - bench_0u64(0u64), - bench_HALFu64(::max_value() as u64), - bench_MAXu64(::max_value()), +benches!{ + bench_u64_0(0u64), + bench_u64_half(::max_value() as u64), + bench_u64_max(::max_value()), - bench_0i16(0i16), - bench_MINi16(::min_value()), -); + bench_i16_0(0i16), + bench_i16_min(::min_value()), + + #[cfg(feature = "i128")] + bench_u128_0(0u128), + #[cfg(feature = "i128")] + bench_u128_max(::max_value()) +} diff --git a/src/vendor/itoa/performance.png b/src/vendor/itoa/performance.png deleted file mode 100644 index 1e23b7123d6aa8bf373789ae5340c167bfe278b0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 74625 zcmeFZc{G-9+ctbrAyJt!MrB9{g$N-;G#MiqLP9c+nadDKN-2d5CG%9K$e5wgJSJm< zc`72px1Du&ulM`oTkBivUF&`Sd9L+5>+V;a*Lfbte(d|UZTq&Lp&BPuX{p($NhA`j z+EHaK5^4PiiL{P|iUNP*NZsX&|5@*>sHQ`Ok2lr%5d8N>hofhlNhHo{;{VC)nq1}Z zm#i*F^j);=FSxjyI$4n1-Q9()?QERQO&u(R?VT)RrexVjq#YzR<-6hpC%-nJ91>bAhnxv5`B11UDvTBO$O~7OBXNg(`1n7NY#8?7e8CTzO(S$g!)lN z^Nj*sbaWbrl@-n<7a4V}tW-UCBYak2&#^DRV(&QLi<1#^@1h?b>|2gX)e&F{`S*Fv z|D05i9Nk8A}cE=Bf-6v~0vd)^CnQ8H@@cicylG0PuZzRO*x1gq{r+2YCcJgH1DfyN4 zB$fl0qC4Dn#y&4BtV`F;82I`%XuLDWyz1(Pt{2X`ot&KqKY!NFUfKQcGM#!+EQc*$ z2h+08)H0h?c6YPhzJ2@FQJ$f~%uJ!Ko*r&)?sb+8u~j|x6?^SU9Y0g_d3>gQ{naV( z+O@|=!nW0o$o+eom3O+ocF~o(Pg9V*f6vyl4RVk;WIeWyj*gDBB!{U;QIg_h-0S?w z)+K1A<=XZXQjd%L`_(ezbQc9?4vrgM3sZ*$qRgrTHyZSudL+A%#3JjF+prR}Coknx zyl}-y`v3e!vrU#drAzh53*VKcM90r{20ihIyeLRky(I+=D?J4^l%$Y|2$2-d|Ni`V z_FD>}pj2<*Qe(9xG2~TvFJ5+H9mo3*9|{VOC2Q$qoTfc<=FF{kU!@~% z-@m_Gt}s8pt@~S7p1FRB%N}(tAxQ}dCM?7rwT00CM0F~CPoXXQBiYO3f`S=lJ*VIJ z3kwKPBl;*c^viU)yu7@WRaAEI@*Wxf`Q^)bQc#*zoV2S;p>;N8 zl{WSD_s1gp2wL_7cT-I=^m6L@{nltn(bBF0(ry#_Nh|*jU#e5rbIUE{>(-fEj2B>T z+7qSz`}dDV(j4=(t@(DnY|O%jl|h@BMNO;MXHAlkk&*oG?LB+%vV4rB{Y_z`lDoeg z|NFqdW2GF2k{8mPbPXHhWUfU=v*_#VpZiqoNYO*xYSI{ca9xmMV%~{ltwC%6DM9Sz ze~-C9D86}P?M_5@@W%oh_DelkXC;eYycqoTV|a1md82sYUW0rp989Gwg91^{xe=%T zB_{jQ+Za3V-eu&FaidX~Z9Tie>*quj!Z+7_+W7a%LjTO%Tus*5qOJ7wd?e*Q#X&*KrqfWy_us>LCW`H_vXZDHF#F{VgSd6e z`lnBI$MpW2`@RP5X+3sJJ=v-w^Fdze;eSDxb6Y^7HtN6o^33$Vo3H$TAzS_*Ny-27 zC4Up;|Iac>RInW_yJck9k?PZKIUkgkPC9m9n8``_UogAUjHI?mJAb1&oo%=wE%eNb zN2nW-V(iNgpFFvlC@nYGB;I!QHqVUbR1Ce15+n{;>2&$o zB%l3aVsuYkrjkg4dfCeJW9{Le9xFCCHv<4E7#eaI zzw{uBQr{pVB0|G{;C+{op!OCnxtc#^GWvim+e1lvyDyxh_wL;bPCj;D5q0NHBrjl0 za9-)3qm#o5kYK?hEWFuoWuBQdkdaf~^6}B5N1KBb$+We#ySln`9Hx&tjJEKezi=T5 z=h~o$k$OW*YbzP4qoYGqOt8N>W^Did{khhyeCI7J248KF7w;-eNFd?)iQI_eUpu^4 zNT}(Bv*pWQ-(pw}I`KSd>9l{KnXDCx#6waDXOlQ*YO0z3xwfVzB);eO@82zMImeM| z&b2;0VX%@{R3xbt@%Gfki|LEYT#|ONjxy%{;o-E;o;};j$QXD!%V6+%qnw!Aed6qo zNWAjDnR?ZGWxkV&h9)p0L-1u;S$OaEmcfQNnW6B7o7-gsT#TyMQEuKLD7eY?WB%d# zo7?9LwbOK><$R>G4PWe%v%GL2`07>iFJHgz#4cXBa)m|Og&(C%Fln4gfRB&j)wL~@ zBq5^`Mx|hyeNGW?JMb5?pCjax@XQF3k?!Za4Gj%<+&<*<~ugdQo(GdV24p_pMVH91%>9eR})>`3u9Fcq-OX!K8_aOB4MS zT%?)Rc~-rDPeW|>12Id7-;1+O(_&E(@@u~tHf-2H z@<-M_{c%scu4dz-L(RpG<~4P7)t#Np41C8$|E*Ou*6Kz+@PSs4+>^5wu`0vk<6-&v z;)uN@zqM709*XtrNu(!QdroKSlTlJqI>v;BZNkHex{kF`$2~e`u^VASI%jEF?~omN zTRoXY>;eri9HqUzJ#zWeHtozmizQNi$bD?&d5O%90StY$LlSCVy|Dn7Il+US5DU2DIqbR{FC*-B^IH* zduNAgH~s$EWB;{Hj5zV>*QKuF`PZ*sKQJ8cM?uOosW28&B(`aBd6^qWuP%*?O!SqplV&Q`R<@izecHhGLgORa2gfNA z9J0y}*nYGsIP?4h?No0mrK+l`(AmP{-hKW3T)4nG4JlrYG#xWUE!D=2hxPSY0Cs1; z-Z#0nSO@4<2O2!I+tu zsr>!x$NcHy-*c^}5;gdnD*XHcxLG^1&Rzqat_CO%V;1S?GFg%Mm}gG8nM*D+Z#v;{ zz)YXlblH&ox}(RB*I-2)ha0vdaaFdpF$xO{17O5gjn-FNdM?fk4zXP>Ar58)=OIPx z)9eSye$wLb3Z=sDMtpvOrAp8UF=_5rA(N4ZfW&Fi6rGV!E%%1 zAUR3D%(Fm0QxQmatUaUHWt2YKxa@jyu@vY4Dd-3j2L}f*lobt!)J)WJiNl~`O-)VZ zhYz$SzQ2t zX^OHkGl@muIONF)PEZ@Y&)oQ(nGlhdb!LgwaGP1gcn1&9dQwe{PJ+YRhcmJo#eLCB zjU<STlT*S%A$#cDbIeA>ZJo@tK;-Uu%2MwvI%**}Kr(#XZQq&Gcz{Pc> z*n`e0+;h$j4qI~;mgdI|hUu>=S0T3Z$@e08gRv1wbn;_CYuCNlSRqb%zWUFtQUw!xCL%T<|>8s!z&oc&}K7HCsM|b7rOIZ-B zCtdz_YpW|EH*P5Z`1V0U;0+%S&$*BJ)@-)C^c$fHw6wGUU0=O*Yiq3RWqNi=JLQ6l z?ZzcJV5lf)3ZTlgg$(7E^Fgl^2A6#MeHOOkH^i4>9y|!eu^GaVD0UbmM}{_?*}`1y zu4-nstNPU|T8f!J<(;<{IX>2BX*9)Sq1Qd{QuqE(C9a#m)N2;{{c@V0j)a7Ry$cx@ zZzY|xu?f5U<69!OFgG{1a%Fj87R7wWu3h9Z?o)xGp&OB4l3a~LmB~@DXju2JC(7sN zx^T_s7fygJ4gv3XmADF?-_Lqa=*-p6^%3JVIywSTEFmlp$5J%-#ji5Bo;-OH95VnX zqdh~ffV>3E>f&$j$&V7pABwAee$mvV;pk~!WY;_Q^D`J){hi&XW|~#Gs$RWX7tStO z_hYy*%xHHylChNM96uiX8sHTf>7Iy5HWS6RGHa7!hs0vOPiO7Rf+FOWsIYSDNP$5? zrYlSHRGT)vdU_&eU}EA1>ZbKXj#iq(c*kB+lZ;V8D)GZOqQuIf?gsIlc$i?*na%sL z^HxX*)%!<>*m2v@&>&FxZei7NS3@WmbUR-_H=(hh!pDwLhzs3@j#R1Ga7Rn6+Q4Q&R-qdQu#dvrSv-m7;!McH#@}@SSMNHB}-oj#h5R-9PNZT0 zDN?`SA`|jl%7Xrhm;+2)fC*xMB&ekQRYd4PK&bBMIQ^2WFgLd;T0sATk7rN-rnBrh~&B_YIlgDIni6njLNUvTjDBl^GCa$$tcH(mW0FWfSIX*0;(Vz z+OK;@nv!#MezH;S@Lz^!36w;z7 z@qRRkN5OExZGq@-Q+038I?F6};k|;Vr?l_#q~zkSUsWjQ(`5^s-x}jZrfhKVrYqJs zQH+b-r#UxsNcpSX;fwm!R)LUJM5dMROw+C(u$g;iW^SGboIN+ut?ixdv;1>$G$mft z>*poIa&K1En;cb?T;Be5;q3RjF9$7fk!F4D?d*)7cAjp#wNq(ec=%duEJv)2`{n=& z+LrFAnotH3iGhJ3D=po1yrUKww1C_Jh(F)9XB*UxLl--(?;GIR`=bPay$N#_6f@neryK0m&lh_lVi%GzgM zM?-*O+(5fmCZa8D5&*8CaMP7;Bo+xp(Sdk>JNy8$iIT5x`Dwqv0}$KVUj)Yu$glm1 zsG@VTP@~d}i;LUmIHVkB3%pq4`W$l7RZu9aOL``%*|_Q2M`8yKY{OYlR9B}qD6*q< zc6J8IF&k@3L*{KNHBnF?LF2q8X4x=e%YFMOaTwxSCEGi7pgV5sJl5JNeCW`29CneP zo&xM!wrmM^IC<{k#p}Rshj-oHAun$-JUrQ7hwz>s@1!JUrOl9oRLRTBlN10^kZv>b zdf&g#>}pP{PJMx5@dg_G~2=Mdi zg)^msslVB4ndz724qj()YUtMO1MQ5nAyK2^ox5;>B0=jQ>y{E_S#8n>{I#g5!W?vRMnH+mIJ+x+sfb6iiG(h#96 z)2g-i)YjIrzI^odb<^;N?(Q>&86WfyYoCc!PaaBrF)(m0?ue?e@k{=jGvbU*8Smb{ ztww0nPDcj@u6O*HS3dpVJD;~-8;+ixot9zIK3l@r&=R49wnc<{6TN%6udkY-05qsJRgSlG@C zRCN{DFau>EZzoI8>qPe%l)Bl*ac{a(z2_I?)cPyyDDgw<9J7uZJv!ud+AYp+qhi+u z(D4aNU8)yhUbqvx8>$hU){L=00=sqz&_XVVy5V5Xe_USd$>9JB67{QFN#liahrG4{ z-n3GE<)Pm=)|n%ctw#jHBg99}CvxcoQ9~KGiRe0MmumB!9t`A9UcWy28#IdjOP4;H?%TId|E0&iyXw5Lv9W|MEF~lJ7zu;z@cY46*JgnyqGp*! z_}051tq}P3p}6%`YyoL;ceG7c?oKxB2BLHQh7E~+YpZ4oC=9#f#vi8%jwh{ON?kBV zO5_!0(a1LfDzB|yQaMbn;Nj_m zaU|2x)idDe`CY&o`zGF#eM2i6~GRm{Q~ikDj_pyXlRfW+~-e|^q*e{XKC*d<}paoih(;F81Sex)`+12*arLVc=VQVakv z-zg%p1$a|MQIQ-7FYuuAw;j-TfJRKbXKMxL^MoEecyL(wKDWGl|GCnnAY}n{Ev?9^ zw7TjNPskn3XI_XSp=ai1MC4_kh+kbdT{nids+5g#A3t&T-PfViJw^o0pBYpN_gSx)xDh2dK-&WCmau6L);s`e zuuA{xg5f~dZ;;@V=!DamucJp(B6Xx_SNikxQ=i4eAEu+=nK?M%=;+CrU|nl9{esC$ zkXphjxRo|gt3+__rEr8)#J~kb$|+($xv5_08TDL#3RpuSq2s@&n)Nt#K`-m>?k0(cRWe$n zmN=+UwJwHjI?$dH$IN>i3~q}^^Xo4|sm!+=93A(}&k`9qgoFgc^c4AuL>)%WLOn|8 zrL(hWE6f!lg|HCHqk;{YyXlI^Acetwloi&@u{|8B!uOeF#?%ruLMCbGs*6w^3|}}M z`FwEx0Db6sJl3n*+^QtTZCqCE=}}eY_cshmkke3_^6~MFG(V;!RW>%BqOmi+2ks7L zwyx4a!4g>*(hfNZT-gEXw9Hn1abVr?2m6nFegQaAp|$q*VQz2_NYeZ^p@Y7^i>ns0 zmZSqLj83@rm-~1w&v%;mh30_4mgN_2I;;gs+&8~KXc`NPB7z8{-|wT`%H&O{ZGZ`h zBDU?CsH9{)8ZRzWk9`~7zdv49--mLNw#4_+5~y!JyVV1N%WMQw30ONBKh4p#>9y4y zofR_iuo@QX-G6j8V~TasMCfn|@bfpNY8mL1x=j-L0;zJeHKjJj!d23~kLawJ z>U-wM>teK-!d5Dvjrdo`3|!YYfkfgtbopvP04>E$RDl4{6=XOt7Mo9{?x*Z~P-)E3 z(34i3b{TEipfG4-!M`3A;{c>Z{M@&OxUfyk!Xua5KiARz{xuxW*7zY_o=e2|Ws}#$ z^L1nM+UpD$K4pNeW zxDVIgV0~PW<6sL41`Qy;<0qnGR_yGy3k|V*e7`|;IOdv(61|bvo${X-AhiD!aP*b< zcrMZa2Pn2_D~^uYcML*aVdK|%HF-KQQgmfrjZ zi9b}%XNg4Gyj?mFu`y1o&P2Uo_4kicv!$xcrt9O7?iwU^{@tw&oeA9OI{?P3ft;a- ziQ04siCZ<10Az#G()JubaU##GmO5_|W_wiW|^lefymH#gRUdG zZ(r4Y6W=slQ^(i0cFz3#sdC{0-?#S<)*)}tP4+NDsV5Yald)1{p~_cL^T%iy-X6nc zYHw}ir;%wBjHCw4lX$vtx@PY5Ks=oiIH!t(gQ(}viH*Q1`?j*lc^7xxhX!O0&ZVDc zw&9VS&-INAdVh~n`Fbj*4^Bm;o3mQtx{QEPpO_vPjo{; z&Xb?pgJ#u&f8_ZryS+ZY|2^CQ*jQDBt44*d4+J$rbikE2Xt22a9BE#M)vW=x+jsf< z`H8N)bW&Aq*ZO*kW2*+9CvUbjWEzQIW$xqZv)C6GCoqU|@&wHDPX*8|6 zCMxUoljgpNi3(`Wh4;~k68q2Wh_$TrC;Mvf>miYH)3*^hx2CU;6AEk{2;}Y9*c*V+ zd9MR-%v2OQQ#H9cq+O_R1|{tJ%foS4t-_-(ek$Gv1#*w@SavoX3i^eYShtB7-?(vu z24)aF?wQ=YysEX8{m zTJ@VZ8$mz`9cSv(ivR-G<=;NaLv);;o{Ha$8-Ue=$U`2BGdl@tf}ndREo4dbM)}cv5ES{4m*MH2Zx7u-&9^loiSq9l}i@y^RruU zK8xSfvotD7_d#Gb;;IgUR&D%vdj=)yV!-n8IB6zLO-&-MQ1TpqFFJC_`x<_dJ6A~( zGAZZ4FZh1-1*swagAiST6`_CBylqezTo^#84}AV?J|jgidz33eG$6!!XhCi4cRL`y z0Mtqsp-;3EO#on^c;BT_K@(=8pQ@=zuWQYmlequ6=ln&Ykw)?deGS{>uBW@aFkOE$ zzE_sv&OaaIyDv5))b8~>6;~h$~(}x zAq{-{#+RM-R9HslZ01&La83Zg(L=`P&p%<&qTMd7V@dUN3j2lB>EPs~m~t|1uFwk{ z5ELjFeiFwImB1gNLBefJjEnvqbd9Q0?G8Ae<&A90TtQgUnZ>eY+`2 zJqS5JY`kUk&Ye5!Aa`IV<(3>4aGb4XU!Wt%n-kuuRN076sN(xU%ZXd8z6 zB;?Wdblq29-`$6ZNJ88>UQ7~3(*}l@J&}=-HLl|{^QZ0;B0yjuiQjuIIy!oXl$1V= zJOX|*()%1z&CQ!PYdJ;s?AeIUlG6F}JX%^>17l;?5lBgZSimOC@`4B^rcM>@-G*LB zOie}hCUL{B-@d(i|2{J{HT6ulWxOer6W(%TrIYb;dr!V{b8z5y6!rb}-NNvt2f|7- zx3Ex!fF#*QGLAJ9U=z3E;uQKL18;^i3;b(Wu)ow^xY*8tHJ7^O&-gW zMR{m*(y#au=`MBb7tVs?_b+N_=y2fij3(r0Mg-_6Tpq64g11=%_)x;F!y<0A72>kp z#FPfF8yet*AO+e_psD$622~3|s7DZ}g4C0*=H%owjQ;wgxL0Oua1o+EtgN%~c&rqi z!eGb&rJw^Lz(!;w*o`ct^sWRB5z%fc^~cg;aTD%6P2MK3hdGow>L5kSAnjdO(BKZH zVZA#0xt_gLP*>&j>3jNymMu@z>ZUdmhqNqMpXaK;UNGk;a`W}!laWsH_kk^{Tw6l~ zs<+7hh7CbBEB&SNn0{=+kZz9gcH}I0W@@un)c&vx0T1S*Q5}#4^S70Xh!hqULP*QF zHnlII=b6CA-*8a4!B9)_y&YgH>DV4 zuAmy#f%-wzbFj|40=xlhF!jhG z#UlFAS0dC(Xz2q(L*!|9=N;59jM-O>>#_4mKZue zF9KAGo~!iVFLMe<9?AsNQBCkIlK*i&C1YbwaN%?2=GsH%wRLqvFz*qa4pi}5k&#!4 zh8hHg_U_edT)qT*hvc{k=>dm>42rICsGK0Vk;&2;5k ztJUAX7GX7*ovr8k3^(CEH}^lsClUmH?%|upMpd9pQbIz4g3U6~Et(tEChA~TQD1tl zFBX;t%`>7~jExSn^#iL93=U2LQUK+;R#gZ+oj`4L^z=yuA0T$2qGh*9P9zhJ5R`5b z2}(O!OCPkhi=cOkxI?ZH2%CyBz(a$rh65B@*=qAMT6kuo*TR9_L;v)TvPr5$pCb%y zEuv+xX`2v#vm>Xhhd=NkJO|esA4Erw{FbHu{^JMHM?je&@zZ=rqFL6?iT>?BW~lv2MISl8n*rCsbEthtZfjhD4N0Z{v&D|i&H-@WkSd||zAn2g0W%E3DgwbgEcYs?(GDptE+&y4$@>{!@Ok{!yeZ)@QXn6Cn(zjgO-w;e zN3yP@gJlY8phB`1fPQtG&O*Jq@cniC9EVIHn&*Zi6W6&&4K5y8IC1nS(FX?^LVFO( z?q+mze*g9+`Gd3Q?>OvX?6X~H(0hy~{0q^Yzkag z7J|0$gt|I?P6{ZBg_G}~d(vvDAU}U44$du{8Td&@XlZ}^_~AI+cZi3Vx2m>w0~V^q zixXmHcH&cCVI?0eKNG6uO*s-8i%X0?C&Fah)DEG@;v^E32klLo@>A+wwqw(#rh8?P z(!y=T=FW&cg`(y-c~B|G&yh{yVjx;tySTX@XO^7))@CxC)PH7)DkrhLQv?tJW!M9% zKiptY1GgZs0G95h=ZsCKJ@cF<59+6_c26oOkT58`bSBA+11;eCnZZ}Y2|`cOtSwbD z>if!Zq7E=!TVvx9Sm*rhJ>fP$y1a^Bm$hw$&vIe$h&Sk;1DqvUw}jz0Kq0-No?QL% z@()osNF94p;(f29m7Z65H6mgQykB54LWKw60D#u%I6tO)Jmvs;fk*SPS|k$o^y+l^ z^2tl_XzPN1S9}3kt@mx;x^o8;e|ry{79#w<5rH za&nSrfS*5qJ|xlD8hw&k_x)&I-Qbevgt@R5b*IKU9|(K9v`Z!Y6g8SBfy@Z5gzrnv zQz(mxVFw-$gR9m$ZX8WV7CCPz(m4=^k^YK`KHst*4Kk=4Y+-8|(;|xl0bBBa?w#tY zJ?JvB)or4S?7?xqKZdnLrE6w=FE2|g1;I!Xsx1Tea|8~-_#s~$cMz+6I8xs8?Rq4C zg1cpvx)JH=kf+Y?Ur;$};KwljRr2=TxE5^*4@wMp{xvjdP`ymi->2xgh9)|QH_4x{ zSsY)RUs$Mz>gfQo7Uw5L(0_z$V4-bKxt0lhBVou*WF%yq)S~F?OI{1ulB>YA4Q^lq zM4xbVb=f}4j28?<|Jix9Jf{5q%fs6}yu7$~?gZ!}Lr<#zo$)r}yqBpE-5HwV($aPO znSIfZ9%UD-KzbVbjK(3sF*8nRXx#E}o*o=bTrfnoAo{bBckY;)n+HJRy=(R52x0}t zi4w64LFgB>;rvxWX|peFY1%|j(*d%|m*L^Z!1E67?$@3_7lR-L^c?~OMff*sedjYIxi^%kft`A?N3~?2e|1*23Wg4gMK)s0Sg@fD!uR0;tY#E)xxy_sG6*y}w55 z5e@&FXgn$iL?yvsfzG@%X#i}GMc&u@g-(?CzI|8F)~LH{%+Ai9)v4`KW>Sm&%mw`- zS|so|fCCYV5O#)u#whF8nF8KL1_cFmENiYDl#!9qKYRB6ugjxfqhn&i2B!%k5E*Hf zyMj$3F_yarHD*;Jy(4?Wf=~D_5Oa`wn&AK-DL`M4`#qxsa`}=Y`^#6NKa3+GOjy+T z8L0tk65)lgf+rh#6deOY0Io@t5Q>P_d~5BaJoKc>C_z1eq;cZh>V)dPpmPz3qK3i& z{aKn|;Yb+?x*3E!A)-N3_$vBepJBZ!_F51|8VJQIggv$l3DnfXI#*Ry8MJE#T)j$S zyeX^2&q&Bf((Y54&riS*0fSlU4_TCHqO1^oPQtT-1{}#Bl8e590o%l9n8ie`ns#`@ z=pyC4C<Y#X z19^t1gq`u$-MfL1jc9S81YP6XB;aeuj|5dzROr{bLX)sKT|1BCML?qF zJabh-&E(L1aOX|{GGH|bA{K{e&2%u4`SbY1)tOf8MT3jxO`_XpwK>*uK4X`uej2=U;FA4POco zzVY$#Qi|F9|4WZLUuE=wO$QB@zq<0ux`M*qavyfWgba*+;8K@AG7S6Pgm73|B>TPB zy)Y!Ogphu&0!K0p36O!1KC+EUgFnPbqO5Ux_RD~#D&b-?zkfNy7^v6A`?sFzCaW|h zCnuXiF|%rYN-GDYJ@ahQ=41DSstBhm<^|AqJ2ecpmlc3BRu{n)fnLwKp_)*up2EG* zc7(Gfly%QSd9E$!YEykj7G$Hfk^394Qrh$ICFg1@StBPWr-7-do46I5PY$U3r8=hm za^SzgT4{vSB+q&HWVpj-{#dh;%Req4Jx~I>LQ4|)8C@MhXM~1LO+7E|5(-M~9!G?# ziO;<5gHIC|EG^Bk$}ERG^XonuKxEvyP9Ob!JexT%*+_fFeGgmYet@uT0Ud8e$A&F% zh`~Y5;Yrx85E>t7sQ_VcBT=h%Gf6s)^$*3l_f^3pA-=lUQtECG$8f{Iu$B>k<6`lX z#7Dv~1+CXul%pYfCvqiAV|XY4Th`bMhHOwZnKF*V%U;&9F9ikIg=rrJ(^S1SXj(Ia z*SN+EE3!aWyYj6WkfmQ3215L8lnM+sL zOrO9b*(m<>HUoiv+O;jnOP~+KWVQ~h(!tsJ75e%!*r?B#Od*<^v2k&(+1v9kd|)F= z4)maEByGas2bF0fw4{1;on|2yunt~KeFlMU=!9eQ%q0GDsI3WN)jAT~dbPFFWIN>K zIB~E@3C>1Rt{{7YcaE%uIYZ_B^zyQ{Y)UzuAYmhUcmjH13%_DwHjpxjCPF}~;_%)# zR3$(*=TMEXu<`@}#(u)bAzsYan|bEBsO?8_pmTkDc1ugkENc()dAdkKfO@g|NO>2X z5?vzwXvGKUL$1Q9BDas$Oqb*AKvvC6Dm`r-C-29pFgTqmKE6MFTJZRCc4lDZBm`$d zTkBRqgFWMPmLA*SM_88fBJI`Iy?cgQb_HdZL;~s$0X!eMM0A%3mn9?&ODi@HJX>x3 z--*8TLIJT_S0qltWsCsc2)64O&QBs>0c>?i1*b4Va5~4BmP8DBQEcI)ArYAiS6vVB zn6Pi*o0LJD1inb8es@AutEiBN@qj>xI7g&K%dv|v*b#PJK#rX-HlVs)yMO<_3j3hc zk`5LMX#1-N)0##_v#_u@qslT0B)6QL@$iTw27u6nc=h(J`m|%4su`2yRl-Cxlr#-b zc_mhLV01JT+6ZBFh0J{rfZ%CCs<)!zRgXJaHarmtQ2jksmtls|^S;C%NcYjg(vtho zp%s%NJA>x5k7GnH9+9{UA{6K{Q&+}YUbJip3GwJrqR7mEXps1SuLUkG`HN&g8YJwz zDEGAm?_ob7^l)@f5|o*=1>YiB5t1i=lsY_>S{aqsKx*M>+FK}*f8hdrF`V+ggf9iI z#f!gpDn+kyGB9Xw*3*2c_v_?(>NSPbX)i^etPhw<^63d%*T>$AYhwl{CohCTJJM(Wi0JaAh(5bP^SzEjx;lE!8Clom{) zD28p@3vDU{sQJscZ)Wh!fizrBds<8$Wa@-Q8lDcmm`k>{rjTI7#GdaT^XX=Vf*@>C zPV?Y+FluUu4I}wl=84V{SDU!;?`b*$D5NC7;UVV(2M##6xvAhAYNpEo#s*=sc-`12 zGMv$T!Y6$R+)4dqeEGDpg4m_sk{cHP0I{FOjWX|c1+xFb!t4|hqQT^CH8H}7Bb*Og z2Id29H^fkw>V%ZX2$@#>_8VW)()o$Q7YsQf^vYQcjjgz2U(gi_D>^x_LovOhx= zFh${ymbB-Dsw_%0@je!8LIG#&jAQ8duq@eBlx=cZ{ltmAjxmvemzRF`Y?{os@8_&o zT@v)K7I=qR%S4FpaK|cU8Y~HbUJ`qxn5dC}Z-hZAs;c>6-M6n_H=x6V1oipLn>%I- z%-go{ouqA-^qhhi#PI3`lvHy}+)Ar9y*mw#8yQfi384??e1IS^CF)vfZ3B1TUFW+Y1@qd~ zv^49`jPx-v)9OI8p43y*65_U6WeH&#tj|(`^UR1rY2Yd3JE9HhGWCfS&Jto2xFdi5 z`ct`fW1qOJBt)V*Iy<2s96|hW&omQ71`rbyKKv3w$1UJ0Mag*b%_K4!+A)rVBL~x8D&cHqj#Oe|Vl*tG z3d7}!bTj~bP()Ry527Eu^mavX=~3H4`2q2=(~BqHH#WlGrT|c8@2gR1Xi3BJoTs3L*75rPy9#QutjTUF^iF>|rvmzFF1)=_$Fft1Nd@da!d zoG!bs5J$2W%`m~$OV-wB_NZ{HC2QqzVYKjmpBq??mfY1{HIiACXX}K<;aF#4YuwAa3R!4 z8XBQ?Dv5;L?|ECz;xIk|HJ4Gz+^6;I6+e|UcIh=V^wgOYGMIgu7cSpas?d+G-B2TVapfS+$LuU7 z!mi_F#f|$vh>0Qi0*E2e^G02p59Y%0gQcC40Cx*70a0u6ty^;)KdS_+{Fs>`%`~>T z7(talA8Iq?2Mo&*3Kq+f5Iq!P25FzGB&J>8 z-503`>ZZN<6C(h(7wZGjF5Zc`0_5f7sTduBT2zo+3^IkwtB@*TXQ>TZLq<%MxO#Hs zBV_Qjf?!h>6$(P)fv^@3C2CR!Z&SE+>r>X)U1#XpOa$jUe^_js{`y^5(dj=58vreX zYV^He(d$zvR>b4^S2rhNs?>9VPNhZ#<69WcF+ZOJQ~&n8Of zgi4H{4H!cBfLEfE0*8swKnP_w2QW%nk7>=?3J2yWG?Ee=dRcrVH={)%JUjOUa%5QR z_R;CD0 z8E`u*F+GuI+j9W$?cDb-Z!ip}42L;dGyyO{5%Y7iv$IeIEcP?mV{DL^O~Ry+0R5yBB>bRs}mU%}V_3y;U|#eGOyHPq2KWI+J|>mW!HEoltTY(!tR zg*{s@aEsg`3A?A@=5_%1J6L7J z71!`R(vd^V%+=$+JZ45>$Cp=(JBN3Cd{xwM(Jp`)-Vvx_5cO}VtE-CxC6+--qFp`s z<@4vq@ar03(4ibKzy*U$Z{U78n6V(@_bUuOO_7p9n1YSmdtE&t;LIc6p_tS30xLuOp!%K!7bYn8;zxH zYqymp1wE;t!F1NM1gWF-A$Z>b)1|I==6 zg&do&fjB0EJ$-m|=&XCcPZ8(kFLY{__gi~0r-`Y=Ig3v3G`J?`8f2!WEG5toxd?&f z;|u4Mx9{G~w`rdm#huJ=nEp|@Zp+ARRZdM*Dq!ZP>FEN)Im2H$s%M~bkKvbbz)}bi zff!zEaIk|o;CdGNP5$s zI1}>ob%<%s^55T&S>W!`m_Ym-&I;fZu7#~L2P$0#q?YvT><$34d^oPQ%X(-)-^ajr z7~A~X51FZAC20SytC<$(&s!lu5up|p8=DFJLv8alCFI-ef|sj?AWzMBdkv2orhunu z*Ie<6BZVKG0vDPfpr616+M&zGKDTbrWyr_eQm4U5mrf)1QM6ZEFm`28)bn*}%Gl-; z#_r-){Mk@|m*5MF^E()E<3?stKdQ)FhRKS0q_Brn%_fnT3D>V*r&y4d)vNGb$wzBi z?V_mcDBgYY$YW^Zq1hS+0XW-TA71cD*|vb#hI~Y9Hdg2lP`>!&ak+}<_6F2TZ^c{}5B8l1BVVcU|lU6vuDTERTjG&5l z)1;@SY8V>E!V4RhI7tAE(%!y-#!y56oLree=o#th>gXXhSY$KHC=p|W?Z`uMVgg)V z6Z<|y?^6QV1IN&HaLC0q+M%j$3Z^DBA5|`&C)g2zy?al%l^UVZq*9zw?B;&SiZfeWC?Hp3gEW|p)w?&)*Ed1CNO3vbQwlFeYhh_#C zE8?*hESFPjX5F^X(AxT3bA@f$X5#DDWQ>c)gUUdG+dZlCQCEHlET|@sNp$ z2`%gXFOfgh=6?Q^o9^1QVkrLYdFLfsVm2EOY3LH3y$#L+u>Q%uvgOD+n2bj7O_6My zyNwcGzBGi0AH{>PcxACbOGRZhQ`7Oo5xNZLL!Ed)!#GJP`-$m0&>6xx4nFIX7i3%Z zDpi4r8_J}}*g|lxo%>YSWt6EH-z0IY@55`S>6+9~)*^8dQs6qW23=#rTb)42<}f0c zif@GRBYi=H9}^LUb|s^HWq23H$7dE{*)P7aO@4Zz4RCx1GF+d}La#LJ$i?17QXm!# zLf0squ2J~MZr;QPOK1>4Yq_vA@q!J|@$Ku^8g_QszRNxR7ZHFB3<~Vjr&?QD?%uoi z6kB@{K%zlumv6aYTNGh!i23jo(6!|JWkEJeBu;6^p+IQcE|?dMG1^Q#Na$&XCR$Ee zeU#@o2ixMo7MCW-T^rgq!K?Zpfv@iWuWgqO74Z}msKK_><9Da@EGr!7IT=aF3rs1ST; zohxxO=t7r#z9|!Xtc;9A^2hV25wNWQl|)xhZyaF=x0;rQM$?9R=w#SZ&M1g`MC`Cn0OK683Mu zHQzWnAA?`NehnKnpof?7{kv0veZMj6r9`%6ldbB~h0|h6Hk#~&B|Hch?qj9>FQn>j zaFGBT5w?!smC>G#CM2r{-PoPz=+s8J<;;e(B$b(k1^YQ)2a?0iZDB{S+2O`XSUQjU zu~%tug@Lzi0_&s=S%tQ}v#@pBg1;e=9C!ytMF0GWRH?k)r5;D}Fj2%NNkKB9DF+yDOQ+%9tIzkm9l3;tb)|5?L7>+pXjY;Irc z-tc$X6E35|YaKp(MOIe!%k(t-Cf~eY(Ekh5hHIzaWF=K&uUf94%`0uz=9F$C8 zN9}=&V|Qt9HY}FilWxO$AC~baAT$DyMm}M;D?W^ib~oaa8PQ_KvaN~8cdQK;N4eCnfFL ze$c5V}$9UAgIpWPtt9Q~U@D+#MbxpZRu`yMHg@Pl4AH*+3gv0uP3B zem_CZh+qud$Pin_K^C_U3$pZcQc#WHT*NHY{B-}?RdZv{QC^&acNlep;&I^LBN-~7 z7lJvTG-wSbgsv3HdyJ8Vg+J84fj`(=4@>X_QU1=Z$Oh(a=1*6g%R|@1T6>GtC2|h%CC$FSoIsME3sYIZ!}?${aiEes zd*ILq7HfZI@h;zE)=W(c&D=Q@^wO92pmo7?G5*pAw6vjg!3Z$tfPEiicegY+;GpnC z!*6M5B=F}36rP_0ONSenSSrjo6Ez){yZ6EwNA?RPjQ5q55_5zEu|k>l97{{NSl|wQ zvtt=EVTgq{80Gj{KumVw#(FO=zg(UweYoF3z04MEmlj~M;ZIxh>fnJ*gQ2S={rI2n z^uMqh3blLR<&FMc+!~u-zrd;I`OWhbO1MrE_)~qF#bP6bJPC(L26pscI2sX zTbq${*YSFdF@QO}m;<(RU~V=wA#@+heM*hJXOA5E@m>r7z6CF1yLi6)?+RO0(a?@O zdH(M7+K>1(%ng|1lJR&~L;^zWh>A)&)`U2;9v;Qb7E(4ZqD8YH0ibVxk7uv(OZ}o= zw-}y1`}S!zCXiBKMw71tfwePEH-1G)0d-0(07;crpdXEv-Z|V8GO2bqoTEP{~ zH@B}{y9VhJdGt)mDM%j&>^`ODlrO1+06QAHJ3DIwSrdtj!21^6dmrvpwFiE^i2VTq zw4P%@79(&d{#)g)iyEX*VYL##=W$yuP$nJ%Yo9ocms#Lb`8Bn>WE_ZFM^{%j?kA*+ zGMHVLfLbpGIk$8V2nz}xBMu$LT69mJ?$EI)vI>5mAmcHU0AWr1{OemPly*?~)sG&% z#WSpylYI}F4Kf5l4)KCfEigmA5{w@_YT)C@%8^iIF3!WZ-3bA;z13 z5(z_p+Q)`mS`EAHc)9n_UXZe}3CYOmYFg;v$Nhv)RXq~K&;`7ybK~%n=L`>!Mv;9* z4{~hANq^UsboR;xu>d;=?2%9KE};AUUhv-r$wd8Q~+W-?V0DO=iwGFMWW(<~Yck@hxa$WVv|g(w;fl}5@o zCJh>;l8979kqpK6ds{i@J6)G^opUy8t=9j2pW(jm=YFmkuZ=O`fiL~A%~tPvp>0M% zyT3_n96}Qk5)53^T~^=_WHX`qXokU6 z(X=ttmKr>)uY;bJ5!jr~;uK%SeEZn2pr98?Yoe}Sk6xG7=O&UJuVCzfjE?78a*aa! zD4B*r{@Rr!jwB#Fc0N5`?;bs}L)~kDCVW1&gig5! z)-j`%m6yOpkZTqJ4_L?eoIE+liz}nF<%;hNm2ZD21H8A10=aTj2mq4m0^%@IXv9< zqXG{Lm$!Y2m6yXJCxvF`-x)UfE%f4riZiYm-)4{$;82W!O;r#9e>zNyzJS>++%c@j zjkg;O9F&ox#{jQQOjFI~jBbiL06tX2Q87xYj|UYkXw3b-y?a*@z!zCqTm*U{H(F(nu$pSxBTGz< z5kH-8mB!fk02W^1>^6z&?cPwOG*nift5^Io!Ez}kav%jFu>YcE%T8nW7BqO)-dyW} z6Ibshl||DoYScX(y6yD|BY!(VUr;@iV~2*XN|fFV=~GF0ecgC$LUm-~=l?p6ABp%4 zlY%|Bn!YrDX5Jy;%QKr3Oiv+ldW+bliu}fDSz=}ufx4XP^x@P*d*}syw=-IE*FGE- zN##_43N@0Uh@tj7pPbq8WtsR42aX&u0$_q=bLM*E=cPv}SJ%)`fSPN3*CPp~hnY2) z%X7^k6J6#hTec)A@llpV7zz-huD)KSY;jBA^W}FDNv!}>QbZVD4x&ik`8#FuNEMZs z=dNgO@(b^D@wtE0J%dtY%JVJfyT{&A6z>5R8qGwoUFofotvBi96F3Z?A5YE{nBwzm zBCP_53M5|OB|JE4_QfbI>SL3e^w-=!WOez7^i#Z;#C6LhX@|hPR|y+zFb^fQ`=~rx zx9f!Q)ty{WsNtd4ohK*;+1SHuQRPwao132jq831XR#uk!k4Jf^+-3;8g;a4-OmlNq zy%0%0StsqB`{7AFr>n;2qkw!TaGFA^G8&Sd{83k1dklmOJ1*2yA&3_+ib#9swcJ0g z!&~m!XX2@P>3n~+h5VMy6@Ti8K-0P%p6aP8)2EwqA$_Lb;y>Y|64-G}tLB4jsQ}*g znq$HR^D~LP`RFZxm|7g>qPB|a%u*P(oa`P;_p8Limi z98oPki2Iw6@Nkj;(FdA_nP=bF^m@*PkPtt!cJ7AnuD256Cj7W!GMwkn&)jyU`HS)m zOeRAjBqhJP<0L zBo&xCdV;rlZV`wFKc*@6vD1>347pe`y z==_r3xg=6;PL2~ceWVSjBa#O8f5Y_ZzI;)Q|2`&gu#6d@YM34dEnGdF>8&K5v+Xl} zYXO>HQ)yF<{uGtVf$0`G;w}LJU0xhwAh4!Rk>TPjiLOg=)R-}89!1g0*d{OiSZ%;0_pO_dv=8RwWDV|4;_yAyk`B2JH**;W3dVD3`+>z11zvQn~UDf;Y z!k<2W&PAsn@+sAbTRom@-Mz|uV89M?=kUpve(<2;j8QDIFm6@a@SI>p|@h&Hf=^unD8;_m7clu z6Zwv(OaT$o^FpqinrHfr4ud|>nFzD3NSrZf>wdMy(s$?=&SQnWSW5q_IBLj{|H=yiQf6r8aq8n~~yD0kknwsY?sABdoly#!HDjKfWQp~0XzIe1=SF7N{ z+`gg|(J5wcgZNS689JeSKB&Ox-`P^~0FSNRlDvd|Jz5~H7mpYt9+g<)_Iy0>8tCUG z`fE95%cCTYKPyakw_sgCy_uQGL7`#qA{*h$Q_%pO6bA}@T`056zYmmT+`pU=%*L2l zKaZQpWnS(3z42~mk@Q=E%JUF(ruUG3xVRy**YD;LZ+QIl^)wjYfe!@|tShp*G9b0+ zvl%F@%8&5tK1}+rBt~s!wMqa79ECv$moXcjJpvliiaZG0WJr7xzyYyqj+ZFPnL8Uv zMb8yAgw^OitmXKmd#&21jeYXlz5VU8I!`Gcq$m79Km+Pwud1<}Hk>o(fTCa{@O(Wc zq95HdG%`SWQD{r+?QU@))nH@iPMvb8CRM;3^%fZz9G)D$9COs`QBkiQ)S@Y6FOF_0 zVI~C7x0>p1-5L)D9!=aAjWI$eP#LMInQWLy0EaNgHtPu4C+-q-w-?lt(&(|5`oZhfh*&l`GV`nkUqwrZoV)0y4^ zdB&}lb<@?}xbd2Jz66t!ZPgLJnmcdNiF>-W^#7)JEuySQIhe%-xHZ=P%~|y|TXb;s z!%hj5VAjCQ85(a<#s~fP-(NfGQb&9*_nxn)N9V9fX|w*kc}ujlcXz)a{X8$GQ$>rO zTrZml+)G~GEKGyuWW&w+ljOC40wRVWn)#k0^la=dbviwB{WasC!_HU%iM$Jn(-*H88!B_rTLXHB2;PXwLmj0A zwXd;F*J;wmU)8oGY_u zGf$AMKcz%bk9zX&oZip=RY~onu1A%b%gmPlk8D8+xFp^*NJawv`@ja}jS+1QW4;9m z_OV3}UPJV%N|Sd+6gp$xu<@|7+5n=7{t~;iwZ>W%9Cqx*BACdaG|q$esM07KN_?io zUxF^5VbloxXU@-&p2(EU+}$=RGtE5dtU_tQy_C_In%Ggap~m(pu{Yv~Q5g7MeA?&H zQTH0YExOGt!g^KoR##UWe2nE7ClD_x)4CnQrpHeR29KWzb1f}*#ib zrNtE1Gk!HX!bVl?>1)1#LQ>S(q+F_lN-)fb$-^}?%)EN_>Xn=O`i3s|ly+)c?Y+ML zn*$z}maY(`AJRHeK_RnWUqpRY{{Fq$`p1Y1M(7a6|VuBDw(EYpzaU$1ffK{PW4;`5G4x z8zAUbp1E$^z~2b6oae5=rkUyQTH=a({vlaWMOiKqZ6tD&$4YmcL^Z^1rHYbBlv_8y zyyYUsR2I@~(b+y$x~Gg3OP~vX!nmX#pwEXwKkn;$nf(5`q9V;ADm6VlBHt%5G4Yf0 zub)jSaI#TzlV3K|GC`>*HYzEDR1!PoZ2wZY{#k-^Sm8RDp5wp}IHXLQzuqofG+0X( zR{zyP+GHI7-6g?*KP>hlj70E;)^rs-cwzf_oNtvKH(cJiUW{$#fzpAJo?5o;;LQJd zfgR9))W661P!tXde27bNamMI9PC|BzNo75ubwH`fG~MXlefm_9^$%mtq%mFTU%qo!B)@oRECsGYrNR_NpA@lUO>V<6^l`2O$mpy8)y zZhrGvb_YI`>f66wg~ATz`2%HDQD-k=#>}}oBK8q{@TfV`7@jf}I#p2*(~nK&QAO{) zmTZuSvOokWnCk^(HF~Wqp?3UamO^`8hd@ zHMV`u5*Gk4Mu1UX!LpMr`YmiJTdKM%C@ASY%E*Xx)=*bB@jCABZ))QrY97+Mq8=ek zf62Nu3tg=S$|Vs#nEx@kwt5dhYerErz$_zurxiyr9sGQ4AL-|fH0pWxJ-U(ndPL-N zy>M9yT_3o^@Ym0VXeuu>R(aXwQ^O3YOK(zj|18S>YZVuxLdqozzTLgb1eQxVT_c;&mxsZ98uTI4u) z4c)Jb&B<91_tvKPLy8tDVyiT*xNj6K4p8rv=QptKTE=)2o+^IGR>~dJa!Dt!|BHw% z`leQC>!?AQ$I{Cd>KJJ;UHV%-&clufjb-M$NhZp(PR8+fjJ(9P=g=fC zTw}hatSvnx&E;I$i{C)LM0pw^5iCBRx(qm7bh|`NaM+~hx1gPUC$B#Ydy=f{B~sni zE&V24lajm=OP5Y_|3i=~$^j95yYBqD3lp_4#t}yWh|&KqK)l0mDoF3u%yAq%ilLxsP4aMgvW=GMx4s`pvYFAUw9z8yzxJ&~S|Ns8M(P5D)&1wZ| zyDK-p_zHfB!p!77@0X=gV0xay)EZ745sJ^T*V6Zqq#rKxySa7C7E7us->Hrl6~()7 zWKn)BA%r|R(sFNnV}@&b4(4WIsIK4C(nH2UgmZAL37?PpH3d{q6A z!ZuE&+dp|Kjv71marqqS|9ioCVj)bKc(OkJFB1en@hm#^+YAwJ#f`oBy(+9i-cL$_ zSwh!%1rq^{epg&`SH>0ro30RLBjD&lnjDd1@wY+KRSh}llV`1x__)u#VKOD6+`Zd# zer~)Qc2Ai@y#+{yPktIH7NPY%hy&=WVDe5JhEu%fpjP6n@;+n8<>L+4wt%pOr}++d46_>7&TqvEsOi znwEYthk=5C_S<1FPTm=6dY4lSm=PnCZP<(T+vqBN%%;Jg+B$*Uz`A%TiZ7_MK`2ov z;q}Soih88ijEQm6FMentjwP=G`e((*=N;b7m*IZ}-ZH~AkV%`VD+1M>AI;8-vHx9p zM5KJP$Rg0`deP-pjkRlxtQruoIS#xNZ#&_UR-sSU@XOd_1S z9Ae(MVCx5O92E3AbnPmTidFSDhCgo6jaas&kM2EiVChO%XXpC14MBKOEC5Uheijn{ zvaVZ86Rt)6cecoB@&`E(&@=tUWY>&6zXd5-Bl+(Ff=N8oXP*W&Q}fUNr?5x}9~Ma? zxdbjc%z7h@tiv~r>L_O23T;(ISV8kt@qRfRJjI}BIuZYf1`uW>OVL?c$|?7y^vee; z%LVh@vv;qrnZZGaWinc6h$PffY%<>du$>u{PWJ=Ja}@ zW1dwl?V&6UZvCj4wgahwP_H%P22;oVx2~;|m}sCBFkL#=TW~CJ*11T<(QO6Ben1)O z1l|0|I;ILUodPCo1k!o4?$MZSw24cNjYIpIZ#lca1(TmZo8Y6!1NJl3OWU|iCVR>Ly(ho_W4!eEwE7kRK`Vd5g`=##O%CAL-y}*bvYNIB}e8ESRo|6Kj)pFg_D0&`c$z4B3o4 z*7_W}e@J#HG#s*9*=3WOA5zLXaZ1;%qn5txKe*^ZsBBs%^@8|tStlc3NFSjF7IY_c zaX6ijf9x+PUPV2M{jk^_07u{J(#FTYUZWx|dR;GlQgM&)iV$w}c?p=q3>SwyK3?U! zbNY;*ZJZ)=F1Jd6S2M*bO%Qsy+~|&9=Fgw+qgap9>iHK4jJaM|JDC|tD?E1@>o13= z8F4s=WnLT_?NzaXKM%iT(|Gi!0tLj`M+4-{}sdlgZ&3Z%qQxncET+>`)GFno9 zV>2FN*@dt6jA>RpRQj%#es!jg%N%sQAO<-80cHLr_E+jlzncgCxCQK`>gTEP*7WW5 zRtfZ~6fw7##;D~rro_!5ZHdzQ8_YcwuxI387!EHvf4jcDdX;Yvphjm5Vuf2vpVm&e z!Uad2e(*lFREZ2bHv#J>FG`#Hoz(kU_nvFI9tDAj@`1k^LufnXRPIA(-MKT1nz!CS z=0t{lq%o@)e{OiqSpIn*^KF+Fo?XM%&hyq%R5H#OggK*ttWgCKG^`|K_=v&MuZIK_ zLR5^q+-H_>Q4qzasOWi=4d?Ff-Lq#DJSY*O{swVwPIh7E-n(|~s(!x)iOlQQd7$W!MO%3*UV@&ARO&B1+OOSe53U$VrNY5VE_XF`OjlvV;P3OX-S6y)`RZPS zduJ5tNb?OvbYm!E75P#|p?>k|B`aHL%LM*)bev|m|b~o63Z2A|Sv^mU-lm9SG0GV1sCkkd%t42Im`Mnxa_( z%7W7m?^=|3K@)u`oN>YwNHPawBug`IFJ5SqL}{YGFuyI60ZoVoa)mROsJO)os9>-r zcoeKCPm`NO<(XPqTKaC0^ow=aGf6p!Y0(^^WMKH(JU8x5k1V}xOsrxUeuNEQDoQ1g zpb>iFLyGWJ;*_I5+(?Q|yN@}&N~VPGjPf%C%!dJiW6Zu$Wz2(PqXje~azrKPS>Fur z++qAZP>_Iqy8f9MAV<44*wZPFX1OQ9yo&$QbBXjdvp)0nIKfC#^FHFZQveWQPEb!2 zUWm~}f3frHoqe~g-}WKGqVp9cq55?t`{Llg?Og^zCy|FL@>X$fXD%| zl7-DHRg~;PX&a^$R!^duh=JeGBDn9qLYU==(kV`&ufJt z&vF*z@gkt{>gWwDmZIwS3T6y<%*2V;3_Lc&r3jWa8UtXI-Q2Xbk%Dfp^Do|`be5FEKl%A0@|Z;G~Q^=0n_)2JwrY$ZI<4+lPS}>6pa&)_1845M= zh}2B%RQ7352~WxBeYk$AbJg)?=rsX!rqku<`iY7`-Wly21--wssbFVb!VC(3!61Xj zUUt^N0UpBrntkGX)!)FVWel>qb5P;BUNf3LFBZ;LfF7G}AWvJt3{W;MarHU#HS=Tx zX`B&90pZT$0=02cDqofBi7gO7;*t%^LCsF^7YODyG%y^$l_o9u{x+sh!O(_H=*NU) zB^px~;H1%f7SUxRJ_oPANTD?o+@HX*i)w)512wH!`OYemcIqhEBsK8j>j`6nY!1=H zi%C$6lhR|6Wep%M>L`NhmCvN+M;1|)up>f|9pOgxMcYAr8tQx#l^NQx1GG!N*WdNY z%xxi>wA5YIZi@n@;^V*h<-G>)ZE3 zk!VjM6PgU3^6{fb9d7>=N=!k#(P`rQD7RF4%`Nu=(uHiu-{9pl|7zidQ}+$c9m))p zgmjV&%K(R-2)qzExZx~Wv}g$3*|+cC5fIHMCPVvg$bM$he^x7X^!P$sLx)S^H$UGh zfeBWV@yU(X#dHIb%G?%(rq*OCj$z3LL-U&Yt~h0Oz-p3rVAg)KOuxE!V8{Y=^6$Q~ z$1H=5`gEJzC6TCkVO8tL^kl>IK+~7%>Fxi0qQMG4oarDGg;K7n9)759hHVq$OIJ%8{HmL>vjCpDTE;7 zTV6}~8WpApM(SJZf*F0JoxB1I8wyy#@DPvLC*2}UrDw>Sn0u=yhS^Fw4+@&RckoVwUTI{}I0%*+%3ho9moxE{82wtE817F%S@*-uhW;i6u3SAL6et&@Iv-M4o8Z z$NbwP;kOom47vFWk506$pI%9f#t!Y`G$_bXepYLBJs}^%q9FzoDZCR_aF7HNC&=2j zZ!Nj0Vn#>Dr#U%H5No!b0D3@D#8so;GTgG|>(&Qiv`*mJWIGjPEyFMOvMhu!^1m{I z@P>tRyIpmG$uk;ufQt$Hb%eSnkd#9V{TT<)?)MU=zfto`&9v!{CYApB>T2Po^>Fu+ zY`s6u4qZA(x97rc9T&B6zBcyJnWaZwJ}by8HHi(aURJPfvsv%%*TVmOd9KwMmr9l6 z3tA~^4Z7K&`nuuojK_nApB*-T$mz`LgiTYoo~dxFlVAJS8}AQ0N)h1J_1su_!ADqC zQS>~b!rBVKbDBlzz>Vvos*U0kYNBtQ(5xQ;Woq)Ks zPYE7}|E&1+y&z)AvAB^z!cPlVQIU$xG`-OJ7jJ24Df|fIg1G(bPI78-qWJTUoB_bIk+M3$Q*W z6)Er-8oy=kw6F8UqzUa(4AEtTr z4hTr6E)*7%bk&nl)Y>h*p}UVVSLS1#wR#6(ys*om=G;N0Av|MucL_)Ylfqqzi$E(M znPT`@RQl6!+PK&uvwkU7JU)Gg=Am??|Bs2(0OZvNRL0+EQ$5EvJ9KMnKl8|`T#NDp zl@f=rwEP?US(N{%Ko?MnF9+kkHESr+lQ?43MbUaCL*$Q*39`A|KhHik3S1@Vr^?r~ z6+gWgtUJi9oD88(Q*e9D5smw_(h>w|Ysj#6MoH}{c3inUU%^x)ai6p2F5~#YqmF=K zPOaWaBP_YfRjjJ2Iy8E&^pHH$#a@7CC}B%f+>>vN^q+yZZ=EHM@wT^vg|_PxgWlpE z?T6JkOwM0@%{Vk7;x&(KL(ccv>OK6@+bNgK^f`T64=Y89h!x&fj3|YnG3IjLBs4_I zS4}e4=xP}m8s4APRijM`IW-MR_`y;mGnTd5My;Wo6xRJC$Boli^WgWv{v5CV{ISJk zxGK+#4eVod#|8qMN3HGKrw{i|-7#b2NUR2l!gwy zv!mv~m`KYn3T$38b%A=Vvb0+VNyCH!Lgs&I-B4^?h>}|~>2$HobAGKuBY%G-3zjbJ zEq0!J=3hn$gj-H4hZ5(40GFN~v~t*>%V^=@J~QutlkNJRS$p>Fo06y#aIiYY*xcN} z&dzR(QDr>+iOYxQowZnB!97>6-bhB4)NII>UXe{@;CJnV;GHz`@-C1>Zze5ta{4U$ z;q|uUQ{LV=kCqIVq9$8Va>1QXummNC)3JLHe%e4CC zbDy%ApND&BOdpj@ZFqHh5&n|Iu-5f5&rioC)Urk@TfE}l9Ota~|2{|kS&YW9H#HXI@hd8t#S;>s7 zrZ;!)&|$;&AYW2;4P+<@EWTathT(&URAj{HXNx@?Z>)4O*- z?TpBDMKmtc*7}^B9K6e(H8+xX#qr(i2kXw?i7zXf?HXXedZA61CO7RE&T z=qKPQD$l5MB;4-rkTG2qZ}TiZy2~u}w-org^X9FE@+#i=KtA03WHSbB!d1!$Z;KCq{t}U{JcnH3<`l$s&;bN1B}|)_-Y_gZ!CQn z8}HB$>tT0}XsR`87zui@^G_^#4%y^FY`hc^agSd)wy0#F=FjOMK85!$@mn<9YR#mm z-7>d*sOH*V1#1K8e8g8A_x9YuE#=4lj(c}K%|SgttH~vaDY?}+jwdyJDUx7Y@iDQq zDI?TNddGw1S$4ID@&~L;*UoMnjtIlVp9RSyZx-6#w~zgVi#PE~Q+ex=F;|kj7{`rX zS2#re5!iOWiL12_*5Ix_2O}DTki}`^;($rdLRrRV*9%Gyb_QGrc|cwwlGd5TK`$>Plr{_8_n-H_}2HMZ= z&+cn>mp{4B*EtCYNWvIJdn^kom@!|8>sY$E02*o^jk)6QA{X9#>OD-K;=K`%xLVDv zQH>c1+$X5nX0drf)vxo;ojbz~#~6lz1$9GiRu~#TOFMWdU|8$7C5cn$yQTttl%ssY zYSjVykD0Pc`n0}gPTe_+$_bsM&->ec?EA5Mi4^(uTk3@AL5`ILR|bkfgRt=orhX~rpX9kb5=-IX@*ZyJ`e`fYM|TC4k|>ySTTCSTF6qV)~j_?SN1&S{zS zV`O)se-Jf?MU-q*;URK;U2^1u5VxB5@6+j0i++sqFg<$1w(vhBZur?R@s=qep?u2; z{JFcte!~|9@xn|K4Fxa`%s*zk4)Fz=Tvfyt1Nd?)lntoe#E-7vW$%+iGi<^UmQa^{ zD(~=oY@fO39SuUf8I#G{q0e$YKn@iV`S@+37!m}e;Z;`Qr&vtz|DCT;*u;~jJri!W zJW>{3B)ugs;z@CPALIF&Hwtal1{~H|6Z7T7!*7mzHkg}xo)-?38)`gd5 zujUg$EEvovi73FLhdt=~#R75k6*;Q+TqSgdm`y8e|2R{E8~9fE9TqpM3%lPRKl;}W zLrcR?d8=;y`==qRaDHaPBP8Vyn7-vGm1NoL*Y{x8XW@w7f9a^^ER)e&2(%(Gw`*7C zf3VQj0;cla^P7IeK|~h4vSNnJa%;NkTy01*bk%q)KKNc>2L3>7qy1az#mSTW(s|bj zthN>I9g^oL6g_F;7$^29s1#TH4{Ej4cw>CA4&(hbBkMF{%lGj1FmVk64k7* zmGu}uSb6{yScG_#eTYZBz3)KMJWCvKtYd!V#W*bWWV*@9ZoU5lL)+FJ-kSK)pF24@ z?fNB6A0GbW3zQQaOWYS||IDmZe@kid&v#GbjJv%*=H=zZtae`gP|gVeYm|gpew||T z=I=^-x(D0U`W)QDY!*2srcc1btqyISXwt=U0#(0~fhYjV%gZVGHr*HIqpiF3>b2JL z<{1VCg!MAUkH-n0QP_Jb#h3ROdh3w%1Z;7K0@xgvm0J@zWRpc?K^CB1CVxuHRRA8> zCmpd-w`B~ey7!3t^ES5qO3TSAZq9)H!VxW{gMZ(UoK##FHb}^RFU^0=(a!K36ErOV zq&3i`%34qFPl(7iRh!qI%wMD1-i*xy+|}uD-^DwBHBkwjf@GcO^mYLe!O#E0_RB)4 zz3`;2-$u0oW0&7J1Woou>4F&)kZ2ShTikK$J7|+3qN8HXr>@tT5pXD6=M%G8cF2&I zWz>!||9|v3c;PnQtbHw_3Y%cRr}BqoRD~tiCeuu~M4FfX=UikLyKwR1U3bb-nsb5Q zc2hI6_vYR;4;YnvSF@V^1D0uZ)24HbSv;c#lk9t^5h60cs zpY*X6sMve6If+Oo2CJ!^0x^o&JnChcXqP`R0o*?<@7ePU(fx@gct7GDvXfm9HXk-= zYv0|~3{|+S@dS(mm1?&{@ujS(UqrMvPP5{=4ENTv$%>f-vebQkh4r65vB)aTI1wF~ zkf5%x<%P`qcSAM*WYgZ=`}ai$$Gg6N!(sqY+CH_t@kNPCHoZ7Lau2e&p`sY5hO|h-_a}uw<~jCn>6fB?cPMB>9Z5J{J_9P8 zI7n<7L~TK|UCjQ66ikm`-J^; zOjFb>RQ;OE%$@35^mRa}FR*kfL+Gx2UGeufcqED{1YY%HZEYDq(pq?}Q}c#5Y)vf5 z0Gv*P^L->PD?n7r1s`)nlfoVw(aT;YfCZ>MNL|TA${BkSYKw zm7a2;nHk_!ca(Fa)FqruC zy-Pza+>LOBkx)tFQslX+e%lc4ETRVHENG&`jj#R7EMdTx??FL>W_*7;R6<3kgX;8m zt?S{jMOfTQ(0FEUu>&V2_H*(Z)O>&gR!%xi#fKGg_E8g!6N6<+&SyCw@A-S?K~0)FLHx$DE}9gjO(^)>Oki@xedNWCZ^FJOUJe({~oWO6Pd^O>(nOq@MQgj5 zDh95ivM_1{bqaGWCy+ZJwkxs8fM{iMq~z_45eEJ#7Evd7>v z#7^WGmX&;kjx=y~cJ?kf>Ek1;W$Ks|XvKp0Vx7_;X*3F6^Yqq^Y0Uk#1x2*{{QOEn zf-kIy7dwk`D%%8whaVE1#QYiGf;ArXuAD$e5Dzz8vUmyZKLVuy;JC^9innr9y|3dd&a8nZxclHi&pv(FI@X8t zDQ@D+m#ZaANaH|zcHyv=?rIui(}N@gFwJK&_nnG?08+JI*KY4m7(XHUI&@nUj@<@ADd zt=I2l`((+O()WwsOVDLwO`?U-GiR!}qN%a*ZBgua;+H0KULAoL9+DBsiLF@-x_J5W zgMoo7qW)RVmR~%VyR!wsdH(sQRWKXghtb+));9*>jzmYF4*X(?(OB&YUkQ=tD z{i(5nNrg3zG&yb9dnN-Cap86x!B3`saFR#A2fc&l7TM&uM{4Bd=3a&2V{I0#IZb7^VasCmshpoHy=V-freYLDgO3kS_aF9X0kIHTyAK#J51<=O3x4UbH`8jerVj~P z9&Op3yuM*q{RdML#Ima^Ww_mlj4q_sf-k(fQDkp3;km7<2o(u^m3s4aANyjoWJwz> zN-Vj6G2_NLk2lzfUS73BMUY$jM813!Ug?bSW+ICM24|Pg-6QEk>{i1oZvXb$2ed+t zxXcOqiUw7NdxgaKpt*0R^#Cy)UTlr+u1@|9}vgyM24DvxdC%h^TP5(Dn@? zx;6N5+m_x65Kd>Jjutp6b@`1uQDSZ2v4M_F|HWLgazO8;)zmBTpi;uy=my@q9MIt| zsIQOD-*n2=eaE~!iz!12#2fg>Oa7y+gFP|KGx7?3Be=BWGggB7*1ak{z%ak!^qGoI zqxiwR;XUazFw!n;qJo}fWi<+!CpIjt>(3G)tDh!qolHkN5FqA#y|Vh)BY(UFMoa-e z&42rK?)aN`K{3)0)I1WiQKAi_LMf@RQ|+IOz=qo>mU;+SNMedP2HT*{Fz#0SK=bD@ z{@rrTXc0pj+c=Ae-%#Z%bIJXYN7|_j0L2VaYVkw18TqP z4Ciq+?~x!w!@^!MG5?CXM{(@f@Q6nZe<*a%LUWt{W|aKBH$#fvT#rosqPDY8p&JL>aKj#Z*RgNBn62{dZz!yvsk2Q&XqD z8uFBcS5tYNXWPzj?90aMrh7doPBU`Tco7@AnZ3b04zHD3Vto=QdMPZcG)e|h7e#xHnh^|c8| zLEBZJGHI8BJE8A@X<~f>GFp4vW6ht|kpEeH@L@8e>@|^7Lrc}zwu1se4xqy2*$*Ir zwuvPMT3TAWWcv)sPJsW}f9Q~{s(KwtxZ6DTWw*9ZB{X^7pEjTi9o}QiE8A#s#*4yT_{ZntS|wam)m86H%lcsL zq=kltZ)%i%xTA|r&1f{YlY*j zJ*0%B-mD$=4OSKF*#(@v32C+9bG&~sTM^5-WN=cF!bf5& zU{v=NZ~Rd^eZ>AiSxuF#PJD(!OTU#NIhE$7Y7-`0P5lwyNf~pdNVL$wE|w9 ziRh~gJz)PTP-Z+ao;cI~Q$8rGE6B+`0*ra(Hf!jR`?xDB4jsCS$_?dwaetk-8K$e; ztJhGy)HJLbvgr$?;8PYif%OGPM%F~56(t3}^M*fu^IbLtlq&T{z_#Mm*Oa%Wi!YFf zyiq-1efWpmh{K}%3topolmjp+Ms&m=WN8WF6X$k%!ZF57yG@XI`)>82# zL6?KCT~Bq7!~!h!TBMkWi7wnZGBWOE2mTz0y8Iez)QTy{M5oSZE&cVpQ$3e^pvWI2 z(bJx}M4R23WD()Vd^4?kttA$3e=?s0lj4Z4*Ca?caZ5!ShIET7+uMI6-8_$PJGYJ- zGGTqj0Jy(`=NAr|W(n~Gh*%+s2U(54A~D>M)c-Ot;frf*EUr#NJWokk^g_WR>NN7% zU5Ej`Z0R8HJ2){8BdBg{o&qf$?-8+?Z51+d@3q9;l{Bn)IjNI@bvLC(fRzx0|y?a2H4ATB|@Q-$*n3H1T3)>Ekc#Jq9Y3m>!50IRtv9iI9d zjG(Zgz!f=|YJQhP#@b0w&`*eY6Eudo`RiO6Wmks>rc|HcmtKrAblme{cP*(VT3LSf zc}IScd4ZUB!=_?v@mi%Yx=8SO1!?gbUk|6iRd3$#XfxAvNjeV@ip;UK9g7-s$h2vG;QM!~d)2l9?%e!g z@Syjo`s|_4DAXS}ZH0lQDEPSSh^AlX-Wm6Hs?an zqH4d|tXU8$?S@nM4#&3^Y{QN5Cc&-wFQ?{R93FBqZT>EF&Vo+48?ik2!ly6>4dY?h zOs)+Pum5e-BCQdtJCPMB`Dx?^s5!U;<+pnA;X@`pm55Uq%&i82qEzw@49sMkh6JQy z@QfL^G}8LUujHh<;+VS&t6U!KSDnPTIX^ymjEWQ&<)thpx*2dW++O3ceHwPo-?3d) zI8MtT)X-t$VprbL7Dp=iteH*2cYszyz+7jOjLs_}^uNi;JC~mvoxO@E`_5$=mQw3s z2iTQ3$-Y<(V6YO2w5!husY>Nom$g(Gwv659)r4Hq8WORA-irP;?8zH<3JFoYk2Oy^ z1p?KuQ6vC;zx$xHMRALc9lnz%TjO=ɴ=ma+X4ykITt-Loe#0Wr(8aF`c*5OLsv z909?_T6s*>#7?(omz%k?$`rT8y~!7H(a{`w^WO&s=fsH1f`tob>gcqCrz3Rh`002& zjNjPS30`LRzI{NKyVEVs-?(uS3lFBr+i?#1X}V4CyJGu9A%!u$RxzS;BC3E0l62f7_QNy#rE9)Cnr<1ZxPtZj%wC{MPO5i}!$iuD*Vu#x%zLXVyb3eV51 zbL|C^n%ww${B-77a`W<3Ts_k*UNO`$f#K3d(_*=^fp~s21?guAV9DsFvJU%ElU-$3I6;onqii(3-{*a z{p}`?4HAigFmg4~v(DEhaut(_Ezl3&ErDG~dR7A>0r5@P`E$F{t*@)!13>Uofq%*U zCbh|vKP63bpluHlM>2b&a_qGD)V1f}(;%J%BqM+pGF*w@Fs4L3xV~kaeCGH25qIc^ z?aY%$;BSLCs2HU&to4}tmb zCM9_%{cFZ!5A~b)Z}*`(@Xxd9)@)9W=+`SNUUjRkp5D~ziF|&FF4cbHn~wnhW1QnH z{+VcqYBho<&dLw!&rUM|>C$nwX7@vVRUY!CRg+FA7sG`DX-b{F-DX9mkVQ8-@! z5@WOCA?i$D7JR*oo@R*;(FvxpW!(|ZB=jAKi+%IYv6EY0?jLcr(3ZdpC*-?IR|`iJ zvBvily)Y#7c}^X=;(c_EJ>bo$%$RTzEHV=tx4u`6k-+OCX`OhknV6`20b~oea@$rF zs55`%lmdD_0GA(kY*hnm9dGXEcpd(4?IhMympoth(5l~!Z8e?B)@sV^3NACxjd+wB3 zIt+vZV+>Bsg`wxfFo|g=nb=T!DtaJ zMIUE=a~sP!EuCNytywbASe2w(AV( zdjq0~xuYX_*6Y~qpl`m|*!UsT$lvr;yI%c$cz{e0K~&U^pe}OeS3H#Ym^zS|D72oL zp6eVvGHA^B@fDksf5=NRHGef)UFqDhj)i$nPenKgG6{_fK6udli z85*SZzzJY+@mKWb%vp`LK=91qoU`Z6>x6CJYQ{R=FNz&uxlyZu22|3ybpq#Jto0T} zWcz{Jv#xh&p$z{eD@c6bCw*h)prp){{-52O=Qz}OKTxthAohI&U?c8;xItZN10WQI zyB$0H1{d5H4CSDtNj$>pDle#rIw*@C;NC>!6e@4ILs5mc;&Nd9;rQ<9L^0GeY*44H zbDB@yxQP8D!==$Gru>U|bRaMPY}zByZ|f8(=`sirA}zachRkMd7+@FQH~OJn%QxKY z9r_{o2Zd}=T+Ooq~x>i|x)IO4GMgS2WaZ8we&Le_f#Ts7&mXnlO&U+)hR zi?n$VO95^w1$g);^27&7Xww8zLVLC+b*-oapr->cEfL#4kW+B`i!t*fxyC&hsIl!h z)9;>`%-s6<-G+aXXFmVJ9qWbd1LzL`n$M7^>!ipE z{4#^&(wYW2=){;_wVed>RGK`{9lHA%p!=EbvUHksnT`^Gwe~3#L7>B>_ZP-D4X-Y$3bpPCW#!|Psuc0QC8ol}LvVkzwp0@P zj~tm2ZZSXirF#ui#7uz-(#-GKGZR7f&k#aM0!eB1Z(T>gctly4gvc*1#jq`|v9(2dv069cO!z&SfaGLN5zSe^ zunO~1aTfWcOq~70ICuU9sUD?PE7Cpk^5p}>0fd*$y1l>9t;%Fn=~EreSTJyqa;#y1 zkkvU?F(&%XB9I;~<(G4~Y6#sK5Uwcb6K2mzYpovo%kv=skXRKd5#lu3ds=I)oTbv+ zHvK4ijEyhb07BW;`Eo)UEV`#})Zb?p`RQ+MgG{z$yCb6k<^A~Oi@lTQoid5<880^= ziqY=FKMWoR<{~|w*;S@8C6z)6<+9lj3<<_wLNJUl^z#_;XQxh1no6TbTi5wgAWmXx z6W+_f^$H$^It|D3pNy7w(#!|jqv3YP;<0A(Ep1=wkST!fXkmvD#YF8|a7aPp`=N$Wq)~aIXPHb zVBE0Zt?9e$Mw{k$=pHZGy?5`8ulh;oXM_Y9#-H1LW6`=HXYQ??giKoiBLv*3VG|-p zQO>uoD!9IQ<@4|8N}F$j+i2!hq*OqB-;2L$n{~TDW-qF%MN@=m@2*wB-5$qobcdK> zKcuVA;7zyvSdIoRuK#n|7FF7Qi22r4iObCV=IB;0)N0nT$v-1oAhRAVls*FPbBKhS zF`&)yOOq=#CqdB>{It|f09V)RfK?H!GZ;@J~@Lg?P{(t)WgNE zbeX%f!iX;9v`(hclLYf8m3qX?D+<6otE3m#F@4(Q&(*)-+K4vbm;-mCA zcrRu2Z1o#}XJV=h^OscMCjd6P4_?84O11vMcQ)yJ<4S7dkaYdJ7m=&=gw=OFbY3AK zNX^J_X}=xLmb#uin=sA4W9QDpNGN!vQvs*Kd$C&F9oXX(jx)z76*+i9A{`ldSp4?L zCOoG+e|l#A?DBt^?iS)x!7~C>mXoM0kWYA?bUXS*3^(wj9!>?*>S#a!_cg^fvUDi! z`F|#s98O&;uD|DfW`SG9P4Vn@%rSOby*0zoyZJ9{C=iTnO+io^RM)Zur%{9{k@Lnx z#4LM$k1hA%4?;(W^`Zl)|o8NKP^O{Po`aZZlF7tkR}U8$qB;gp=`^t#4#S z+aZdb;u7@`X2cXZGKg-7f6t=Lj~#{*e%55}KtT|4QE^qs2HPog|w@q#EK?1 zcOOC9<0tU0+wmwNC*mpBP@}=U5z@rQijWg%;%5B(x`rqsi<_-?z?*ClEvaVb z1FVBB(8%Aq9gk>7G;*8Od`_Lx1y^K#{3ZQO668cuV_jN{qC_E-Y$l|DY!Nuf%|rpT z|KPzjByi8j80xqg+xuZdI!G(HEllA*-&azAq%>`H6aay5r+Y9YYTG`K1pkUy5$e(Y z>qe~#5v-VXxOc+HJe5Fi%uz-O0{us+n?Z>LX1cwZ-UiyBJhk6+IRROEs|3nq2~Fiy zozj(4Y@fb(ey?KSudnTC|EEZ>ZIWZ@MqeiYAwGoD|%5^%AVYECxmPX_Q&&t<}J1yyc{M5@xRDtT2Y zI@X$3q>$TiSG*?;!~f1#c-ZVK=g908W2mVr5yXMJFYg_st`-8Ej~_qk6vRDW(XL%P zx#d@N+rO>~n&wv%OKl3Bp)Te&VPZFDfdV(ga|(zacc5_B8b_RYOczd`3ab24PW#!) z9b<-zGkr`!HQ8AvP(y5Q8&=bC&*)9J zG2>qMv+VryM8S0Q2Zhc3D8odj_^&{ovrK{H}*a_0McI@EF&|Y#-Drr&VnF z2$QwL1M!%0HG7B@BYGL*OsVnnHfqSz9N>kSPSw5q{Mc{ZIp3H{zzz#Zm*6f1g)Ar& z&c?+bjYncD@$RfiA4>V$a(1QzfXRwe<0>B(<$Nok-H_~~%%XrNtwnmxmw}+;2qhH_ zyT+>}Q@Nv^+srn3*gLDTG$v4$_XCEniF?&WI9y=3qepN(-LB~Sb8fol{p~wQnm3#~smkct`cXfAWn>)2aV;)72r(FdOy$&E3Jo1JXplSHIu%$E6p)h| zg|Y9%VcGYEc9ZKJ?p`w@g}1IxvYi$f~M++;KpvV_5e(AQ4&|LaxP+UgNm^2RPhB zVapGb$~$%H_!g98jA6KtVA9d`^iI{r;?yPMxB$* z7fazST!jS0F!}aIBP}+nKC^s`GGQ)tIp>vn#pD=dG3cMWySt+T1bJqhc>v|u<1A?j z5v&1KEJ4zf{UXk0%0r@xFoJwx%CSa}C10~&H%-uKB77iV;HaVo0_U8Jzb+z#`O{R9 zkD)@9a~IS3)dl;buhTM+q_OhG+p87mJrWG+yK&F!muBeuwRHjrqx<;3FbakHjnOm3 zbC`Z8Y;g$H+wI!s%7)=A%R|-AXjy?oGUAK=)~-UeD1@y@4CDrkA2VhRrLABF7)cSc za;io0c+M6d>D|X8+W$`3ps7<&WAwY}{6#PN!QCO3}HkM1%wTdETyLy%uUL z1j5tV8zWdOAQn~ifoO8*n;$Sd7yLe|nN?m&XqH)EZ2a^^+!!YecI=cb=+v6kUfzj0 z2HF`V=p>5E{At6HE22q7T!op;c?vsflT)Ni`{brC!>4qdt`d}E@$ZmvK<#%q?EIZ+ zuVQFO;L`;sLF-)k4g+jQ`Nn-2_Bg=;oqg0d z{g-)hNIbT5E7;Iz<~KX8)4&=2;tWxmMj`WJdqo#5wtuikj;fK3Ow{z}Svx zCh+)`s`BfrBq{K!H zbQ=Eo)%*AQgmVxh1m9i(0q<*+H9^>5uSQb=jmO}^Ix{4D6|z89FZulXe&rYClw5N? zjL1h!3>zEwBa?F*uOS|zBl`=q4RU=Q&zYu0ZfWz>>6W8Zig4Y!2o)%oK(mBi^lvG9 zC`8vx4RnIj6|r)IIgCyA89@^p_^Zui=e>kHxpAN4f>QkgC=)@6h*>Y@mfr=HYTYGEt>Vk-YS{R;~fe=S(PJPm^HKH{nE_v?qg zE|#qugOvStrrVdvG>t^^wSWyms}*u)+X0F?Pe9g5FamBwByTUT|By-7e)%$-djvz= zf1&hA4uZjDo-+-=6A7o4Lk*Zy2z`Q9(>6y$88r^}oL*6yr&sr`9BG>K0LkCGngvbQ zgcz?PVDltECke$^X8#RIPl9X=x^ADsFM8P;?*S`h}lDQOon2EcPgJCG|kZOAy#gZ8<}7or+p{?-D-Ys!;p zFj7hsXVE*X0ySHosOP9o)^qr#3HX-+_Vehux{CW0O zj~Ln4E8;M1mB1u#E`4CBEJy6JmLK2zCcC?xd%bbfrdET1Niyo{rftOv_=I8!!j-S= zHiPV1a$|1WfNq^%y?fWGamvDi1!4%&c=_`4i!LdsD=qX}*ux3KcdZhuWeuxGCMpm3 z-&egf_3MY3C+}$d{Q7Fn`~)V$1sAu@Px0+;%YL(x^KcmWf9<_#Sk7zM_I)Mu7!fi? zrZTG#ktB*j6s`tC(j+uV5gAIRS|URODV3xmrAZkgLMf?`%rsG~N@nlxWZlp6Zrl6i z{qlZ#pS5lGeXn~htFG(+KhI;>_x;!pO4HJCmnwAjg9mS~k(n3qsrQf>WuimlAHaEM zCtA_>AD-6A*Ff{{BiSW4DgalA@#>RDj(oq}Ka8FQ2J7m!&5yT_bm|dt3Qd4_P5TU? z^yy-*&z_lUgrJz@35QJOa#tQ!>dMhfUjO_Ai9-yrzyjAj>D!1U%DvFZ@@Fg+9BgcC z_MiXnx#*}JM2P{$bd~#*?$Iz5H{8s`*Natp059vrl{gC*tZ~bXhw0`ph&BPzpV47M zNITI+jvhT4uyQwT_XDhl_zk;LQq&O;!F)N@mX1Pokmg$54Zw>tEL#YcMMD|#wReh< zo_^u+;NX85(w=~)*=WnleyCOf+g8)EZ}Rkf_2!L`fWaf5TYlw)==7H@+k@=~{m^*5 zIUJflA3nGcV%y2h4PU^$q>t}Lg)`iA)*-h!cI+78d0^gTrT!0uI%xFhcg!R522hql zX8x89%ej9)fa2zMZPA?lf#I@(*CA&Q6_fu2W6TbRiT`nbx;(PzJnJp2JY>(*ks2-ru*zLWkzTp;#U-z#dT zRAA7Tc4?X6e!y9rmEkoTs3(CleYbuN_+I^@ng~~;PG@S-TYoza`8J&^#c5w~mQj0IA zI}<4RBRFH=!GjBHow{`C@(b_1S1(>@vmH%%)}(L!nUSOg-2o7_0Z3M~6=aPdr|r*;(FxwBUs8pe7>TTY23ASG;Q4+shbo)%Ozc!If-G=+Ay(NjzDKml%Jv8~!vq9G$@sRtvwbJN*P|B$$62-J=={$~8M_yLPbDv1d~wv5Xq3gMJ2;ORduD{T;Dpw`J$snVp0@(B zY^hWlQDUD?RH2oRMtlR&-&Ij@Z%T^Wgz2hE(xEeV6#TPxQCtbZ>NUL*qt?P@XT%kKjs3& zvz4n>U8mUWJ$|*{xUC2Np7FhaSrW>{0wl?&red$;pO`r2H-kHmlkMa+!~6v;#z|BC zPBKD}DEw-K@dg0=Fm#UG1@*5DauModM+U?tDi#n|M6E-RC^$L&K9n?b-Lm17uk?$e zlf~NLCenZ^rh!y~&g#GJ8KFMza{mwS-wXYXW>`YOQPa3tiDi)`_C?jM=2y0cVTLF| za#mKBv2{X;{e|S@weM^mYLb0eTOg}V8>u{b*q;-a21MQ0*^4hzCZar=Cwe7xJrzItCnHoChH!%BpR-&H|j&Hm*g3nef0-KS6;Gg$Za zg^L$u&n_1xicNJ^$DRfsI^KaqyV5;HYDmJC5U+)FtMrj4mbxDIyGO`S@LfnsibzgA3`+Nj z8I4n=vu1HTkX`9S77=Ujap(nN6xKlOmFw59)0|8NrT%(Wrq7oyT?$_Dy|A*Y&SLhe zPJ!LGhEYX#)lk0vN>|uZ7`zQmPBL$Non6Kn>_A)l564gN@w6CvobO{3Y&ru+pkWpEh`vy}bbiX>da|r!}_3{$76mxQP?j*Dcs3 z*97~vg1SW1N+^tcc$cDG6XIp6^r$u#6ZrIX{Me}&iQPGwA{~X^GRJGcw$veQbo|;Z zl1dHmDuFW+2A^}icgY>d`FJAAICeqoVuPF7GxNr`P*Oxte3)=VHW5Vw3-C%mL+N$x z*6lhSFBSPK5Qo)_n~1`Yg&VA;Ur=lAuBf53D=n3Le)r96C8;13#00I7F!9eGBiTZ7 zM;>VrhyX~>?3K!{v7NIl=i{A#P?OtDfv~84D|cxnAdcJ6LPoB}Ax09LOVzF^=%|H8 zMGaZ-^QARa-Tr=46oL$V2vKn*5yjOtTc$J92V{A(q9P2#{n96+^<9+|MP4Mx2*V|p zw|9EHbqX?=MO>f*m3cu&{~DpdE3!1@WLfMEJPczRGrJKT*05>l6S|0E(V+fpS`uZW zI%h}1h~95&p0ep@ncl!%w&tX8`0Wv3Tpbt*M@s1rw`y@&oeZv)yl=|;A>rV_2a`Y+ZWY5 ze)QULix*d)|N1-;H_JRW7TnIuyH-%(51uS4VJw9n%q!}^ zZO)GYxqt*Y4_pQjVPs+wG;+~;9TF$1BJnt_LZyBh^<+-%rq${6S7nd0Ne4qF$C$=_ zYG{ZgNRky1KF;DEGgO0M@L$^N^DH?|R~>X?=7PAAdB?`eYy-DokrdVs;$Xu<^ra?? zbI&`wKfdSnFH{9p6`9TbQq6dh9BA2+lIjO7&A93kGcz+fx5w`~M;Zn;IqD>j^|hV^ zTpxRJ)WnHLg|6&Hb#)%jc^ll^;#1}WXV7?CGbeC5I@%e33yL4dA6I>B-x7tEqEF^g zt{`2|>n@6oZut1|OiH3!MUPj_vjSZl%?J#Pnc3Hk4K_qYa`D$Mc5PGCi5Uu2)efAW z-Miah(@JfzM4HrA=}E`0Wk!AqpaBE4dvCK?#2AA#UV%H!+7rK>J79F!r24Z6csF%SQ@B@i4yNsvl!+Q4RwA$y*O+zucFiD zDUz{LpJnIy`Zi8vJ9JapsokgJ;#Bc?q@K>yU!|b)13*#HK1Ff!=FL%B`^F|{1cmRT z7zg0uSCB9MbEa!{+YWMYQ}kOKUbbLZMJNw`4eOEr>T`!5!5xchpg>I5J10cbLg0#_ zucxQ?y3GjPK4l~?0j^wHS8)f>{xPdTjSb64&C6b?+^E+Es^P#Oyn0nt<%d=AmP-yh z>A=RSr(SC=y8W(m)Z@cFE#E#lF}u&t%M5UFgIl}1H%}Q_nawP+WA+&;tZ=x zE7`I{uum+q>aO$jF)}1?)3~>&%%kA9^7|QutY~a$!HY&<^MWZo^URG07}oTZ@1Wjq z+LpA1RzfQG1pqh=JzSV00r(~T44R^G?o+ptXoN%yZshH_MT-_O9#sHnrl@YJR2RiN z;m`!0IALjHhHh)YHn)loxN&w7Iz^leC`Z)Sh3h0cQL>7hPT#uq*H;@_KjA1Qv~q`| z76*pQM>ruFWrSq^fdhtFUQ!vxngss=z?;&4N$g_!%kh}rY2JM>OEt#WYUT6D^hjF` zPe-4H1rBMSu|nMz=1BX0&MdXFWWt0AaA@TD>v?(Yh~eyypdu?i1*U4ryaK~UrVfK<4(Rpj>`PA002e6S#K&s-+WkWX z&^$F@<$&3?bOpH>zl|3XwvVhS0!N=Z5_%xE_Cl6KhE+Y*{B5_$IndO5tZnnu&ljg2>oiw{8= zCeb8AnYQcD!3xg>!){mP;`L|DkfB@9jVuZ`jjR7!@$uryo3pte`0Y~;zrBmC_P12U z?M2;O5uG4s=pAOX+aM}}+(@-8-1{&(C4mlO*2vDl-r9N=D|HsG63qkIAoh?qT0R@- zH37E+VC1z`TT~LI#G)HZLJ!bnr6kD zGWU3urf3s;7Y;l6{_0DS$BQcczWw$K@B#@jx!1%MN-g=sqRGcp>O6p~FaZ!j32F@&+?Sk6%{rmUBhYx>26J?&grFXpPUH%T#GTW-h3AtBwv{Hxn zZN?K!vm7H>@0ug3O`WDq5W|L$tM5>=1LyW0wp2-)PND{kzt(!d6?V&M28ab#4-~Gq z-S2E2USjV7Cq<<%SV@SZ1k0>DaBq@LO(jd{!_>gv`vOSf$$;qT4sH11c83pC3s?J8 z&4TOKcYrTlqaWbsG-R^PSO|0uJqGG;m>_j%Jm!;jh@>U{u>{YR zoNd1v21Esf68cCWk+kcA?Laf9c?(1BH0)wD_-oVY1AP4oRccu-ERuB+LKmD@JXQhoYW^dGO@zd!fG0|-olCt1`ps~Bzq zji~@``Iga6vOOYvg%X9Fz)$wuyZ05cJ*Ix;dFkXg@ec7s8@W8YFQ@a0Jlpj-CQpr8wA%EL8nURPC}d7oHfA2lZIV6V69)v4gmWPhng!xi>HefKDB zx8iH+4yA=6(V)y2YFOC2weciP&C+ExHn#oEms>6RQvYweoHo;xf*(*0fvCO`p%67N z>Zp11=7s$fm%_TGy0D;QlYsiHGXk|K>g}0iy7-~vrjsX5_#4H{j4YCqliN_%fa<9e z)deC-68vG1qfT!qCGZRoW5HhkVCZev6L9 zjS;A9CbYGedfNJqj1FD~pqHR^I$!XaY%O{J;X|#~Got>PY%6LayU>9?X|gUZE@DZc zu=qot>$%)Imn=#n@Ch&B1_sLSgw0V3#-B0B`BV}TU|}JGunhchU0tDQ6Avw!Fn)Z^ z$jWhBj4r06%!({}+F<$nB1#tjF5DsbRjwjOxnJO9qmFJQly|^fTa4}7iu|O zM9DyLiUv`}RwSh3KrCD`o8V9urzpR z_ExrJZa0As;+@18P;T4!BZ3qIbuGm1NW5=pd822RvA;#by7`8_EyGrJPkXuZl$>GS zvu7tbmLC~>hNCiM9!%63m6cX2S8fOIEiEm5^ym?qW*heoUg2b1Gr@26v#}MZnN={Jw#6{*NF3Zj-R|We6`LHTT z;->hp&$KsBr;X}?_kvmalvVqO`lgy)Sk+1xrChpn32nj{1Gkggc@w{8UtY9+S8%}d z*lr_0n#4zJE?Ism`;LxM-BGf78$^SlxS!+Hbt|nvReC zm_s!+1x?w5qQlBpRX5Js%4%m!%rJq1FT2qT5nk9g=M5Z zu;Y;#-30?Famo~H?iTVlVcKaC4a;dSuG8m-+tE#b&T zNoCj#}*0}@wY{dybo(Wgb0?VJ}^`C_#M{{yz zO^vjE+FxI&=VfJ}A$?vxZU@4kLd9pWJ{0kiX8FaWq(>OaEcm&dDU%sfvE3JzT~mMm zXb=NU2I?CTg%ZQoXT0#S_4!q~?N=Ldk><`_#_Iz9TQY5VLCC}|{Iel5HhfR&Pf4?B zQ(EAHdK$>&9V8?G&^;7dS*7Zh%#_o+M4mtY(f^hm;1>D_ik1piCPRIDFaXAxl@%gDvM7EG zVeqsUm>>_&aLO8Y_vkiu*NXLhBS(%D3QNMD`_nR-V<7S=L#sLt&It<(tDkgJFeJR) zU)ifR|Nhy$ENOL~*9TSTt*^iXBL=L%Uc7egb2?T4OzP!NV$4#sj8C2+NfPt6K<7-+ zyaDUMYVY}{=l}{5S`#rjF5VXXqd3^8z5Bqa0;h?bMY|>fEQDkss|rPNGkWw;jKuX7 zFp?QdExl%|hE8&S4(8=AulC{7;#BukY|tbu4Pv*+GP~7wc6e!a;?@WbiI#S)qhrWq zXC>(vYDpLxJdi3O=>hk4vH_->qM`wgDe*sbi@`;V;!W|BS&gelPqE&2$c#20@qpZk z8js8@ zv@wLEMe#T(-qdlXLW73JZc*(gMr546HQF)Bn$URmVrr@lrVkLX<5u66i@YIRD)D*^ z(2IP@_qO7mHc!p{=P=cqSynkcX~%)?OK0s%=1ei))PXXr9QLMY75rJ}FZHeT3^^04 zt*vDqX7q{;IDWi06N(a;UUR?FbNJwlT$Z$plB1Anesf)-sgMl65F-ccw|x)!_X)vE zL^ASjV5oe=aY5cavI_Y0VD6Zg@86r(y2M|e7UECMRd+K#{{Wdx0XF4qpnnZ}vb~E8Lzl>e1!sc^kdS8KK-8tNV5NEMU zP}1Ro+}zeZd-kk$8p3tSZ@?lBQxzeve0tgA5Hg(D;p=ts1HU(CYc9?pFcY|RA&g}rm<0(^Tl_u?sX%q*O44Wm1=;!TTh>g(vezHd}*)K=UEGjobb z1N)wL&{Hxg_#e`dG;)Dj2#INtrKLX>ynlMb+8$%os<3F2O!vjD&hH-6l$KkVSza z5CK35cG)N<&iy1pDd|izz?EF6)u^-PZHuCNY0VJpIjI|k1qKK~_ntkUHyk78>K!(gZ6SfB@wtllqKQ;2k%ttVH%&iCq8MLP(G`{tlAl)$n z0Vr7gY_Ig>^k%QC9t-~6KHG1%jh$T{;+AVD)hBQHGFF(NVyX}pW?}Q3Q>q!T+0#?B zXrT2WDyx;GVNl7a)^`yl(D1C_FcHMm-IwYlFXMf@LvunuW<^tuG=s1Y+j@{kKxIz0 zje(9Qh=_p}WZ{CU!$~cgNe#MG_^4ZmbV9xZlCQ!f$e98~pd6shg2f@}!4fZMt^K2* z*Qgn(d?5PKHEVLYq}-Pq1k|NhjQupO(Z5o}FyJP19Q44pDbi%=2ic$0{%f!?6hQu9 zSO+ofF2+nD7)42n4i=@yIrnX(3xo)Q{ey?Je`V`q>(y~?B~n)`5}*qEQfgsQ(~Hr@ zHEY)7_dO-&c$YfO*EH^CPR>r@x|E_~HpQ(*_sZ6v!tqNSM)K*TDO2QOA`voRm$V8{ zWtWeSpsZMpFb}kS!n8h0(uDG(N7!z85vZ3*D*mw?UwOQ4wfptq?f!^IXrCp31-FRr zs=u%K*eb&+f=LMFXh?|J&ifaw?<#1cg0Wq@apT52VOznZxR%u83{J10UW|_)1z+KO z%h*qEO|Pws7oj~9_I&@*?;I7eud-uNyP`{s%OVfmV-O|u*_p4CT<{)BHb82({MdOsUbp5ibYVZolgdn0(+$b<=N1%!1b`6s~8SkZL3 z_q89o|32S}{^Aew%e}KLO9&I>sUxe-+Mv5PQE^fH*5%B~j0?c7WBUW|a!mvw+i$O2 z{E1UKSBaT|rlM|j0=jk8?8hAvs0O3`!sv~cge6|$hCDx>R_@YQ5&Y$`MB%9(va z!2;^beOBH1oxCA}_2VRK@vTpu98({oqvowN!^X-g2jrS*+t!3mL0>X-7Vg=rinI>90TbLg!V|F&w(yFYU5|BpS4UJrlr-7{-Sl*tqn?pXATnVTx;JB za`N)>Tx%nq66Q3*#gzgC+l4NRhjk{-UPwt1iZV*DyN$pW%cwdRRQ!5rNe@k~5E}px z?@;QLw425c67n&p*S#%2%kT%%}iRmi^y6`ul*Aq=h1Z~#5mAb zZ)WC+U*Z5)KNtdS&c0e<=0w}{va)hEj3Cm)zWyHSk8#|M3>AfdLN-agn)DLi9_ z7u*yG`~f|evU=Z>4!@QuKJHL7);+G{lK;CTfbo+T;A|WQL=YlE!A4%cc1?^O!OQF* zEH0YfN|4Mu63_x8WSGE++?2XZ_Q6T032OTdUutaJVqlh;kL|#mPUkYwKEZt9Y=n)> zee%|1M4U?!%x5H)i5X?u1EJ*Sk-fsQ_7wfFVUMjLs{lNLf_eZ|4iW)mXP0R!C?;~h zs1sw2n}*(~XKQ?aF`~UQ;5VK3=`ly-I&Fw)zrCDOJUIb?>7GF+7mpjRkJ zmN0P!)f9F-=PzB-C{2{7cG%i;-2m&~;T?O97IfzGZe^K8wraw+fpl~TjXI$N^EG8S zendRr`TKh($i+Vc|A4eRg~$U|7ZylVQKh9v5ONUXgS^lw)Wj$4^Y?Fkc6k|vw?on+ zA6vt^=GLVjXlRbJ=3r6uJSBt{B7A&!qW<-qy)GXVidt0TfW-jbLKZ-V7RV>!-1@K6 za}l)K{}snYx@_F|j**xdF~ud{#$O(0pCTniFqk`#x{bQlij+#^;MhJ;(D7^SdFr+p&sZ=U2 zA#GRy*?n`{BJED#ZbH8(D~ldhSxYO7mMgd8{D&(Jx3mBXq|A(OT1tZfkMNRg+C0^= zn=4OQs=_!|8l8bl=KkG}9VAbbk_5 zE}lP++PW@xQi`0Tl+DS;D~rLBtX+Rn+}o>%URfglE%^H!A8)yQ2Tmc}l;GC;_U(IY zytRSLZe`tljbnSbHXXF@r=$q@zX~%4^8fktnO!3cJs-8#EOO;{)S~a^GL6fP^|37m zUN97zWHAedvWw$FBq(~}l8qJ}JV@Qtk5|!a+LlQueBV&!3Ccq(1`{5-BH0T%UdQkG z0d7tMtBYWXTaA7451OvmagVn=TkzG?#H49KrP{cI+gk6kT}LW~StenSWVg>dDwgxt2*Ob(c`{(7FaS)E=I?4J|0Zie+oE@(7*@DZQIS<( z1u{lss6JCo$1m6U*AI^A*!$!`xlt6wgM`B9*fEw1-0=KfWI-MSH6|J?tDlUd z=spMgtzObW{T)U^Z=$O{W^Bi-0c=RF_uPNzki3xX2oQMVCY@wY&?BK3$MJ*}UK?Cp zZ-Aat>d-mXdHkTBnE~xYx>QnAd-z4BLs5GQPn9f_K7&}AQF`*jhYx?s8mMI#cBJv$ zg;!6_+5k`zhK$l`C(}sZB}i;y0*49Dai(V+PyBgnetI0}Tzc9dnNjZFz&l7GGIW?o zy{sn($Q=;e=oUa4QG*0n|0%cWD{Vx{$ScWx_^{ExG7ZHoRE-dwjnGkuFnLhzdgnf2 zg!ue6+9dy)wP#j#spMkt#B}>WgQdtkU~Rqe&C)FAt_oIj!B5Q&?b}CyD5A$WRbtOu zWLQHUiXaI6mzY1pn6IaYq2iwZ{h56!C0%yYs|kbYvnFv^#mQWy&E#SShJ?)JI}#c} zdp01!5I)q;o%W`i#D1VV zSD%oJSjpjJ$(AG>%EIl^0O~dp45gSzdGvsZDHV{1s7HnP7|Id3+iQScp&QhHwceqw z;+m8hKAM{Ymm%KHus;oqi$>KthP!u zm9@2Z$wAEL4<&#KsWkpnP5wQcKRz$(?~RriRu9?4Do4iyz}Aa)jna+7V=m^$`!{bE z+iXU9DjBS||IDG#I6I0ggAhN&NqYXqy>1WdB0f{wqE}&Bn-$I&breqQ0BtfQb2oC{ z1X(4bkA$HNF}4SRS2{UhF?8W1`sDi}3lNl04~c3D3x$((nx4&x%SH^lTe-`&FSYNT z`#^ARmtvx#WS%YwFa?AQ&u~L_^Y>co_$kCwaRZ$S^SovC)8HSbQlbkn9a0(SX#@>+ zVFsFlq1?o?Cihu0;6eBN&%uM&ctuRGxrE6WsH;}JX&R>c`h0TojmM9BpGg>aMN}gT ze)jj6UfJUG?6|IfFCMYU7F7=dPzh1AKBJxubHX%%-WP0U8ed+juZjLZ_ch$eYZi?O zz)B+gtHrm2lCd5T8!mr+61b$yBnqUk8)>H|o?aPSTfwZR%nK}UZg*`j`}NJAhpHr1 zGtSvpOGxs6iYVRzn@eRw`=mY}&1{Pzdvd$tDS3G704p?cHFQ>Br(<+D_#(FFCtp z{)wr1!mF2NmRg*1d=ykdc(78R-h!Wtew=JcP&Il+I$i=LX1@!w%0eQX{^X_v+7zf=F@-N&>Je|oe}F<4rq3zg zI`y5l9>wvg9{+j`dxV4IE~6MLCVuXyP5Gz2rc$sD zmH5)6)$IUTUy^4zcOD0aWAw_`GK*{9V=sCgN^@xV4$Ov(*D;^DE?UZ%u4U$`(MKF{ulUSiLlE%Ov2MsPuC+{&J=Y+Fgzy-oyh5!h+`!I7}9W}mr+J^1a^pwQCCT$2G*-u2* zvq9`Z3=N$}Dx&oyfs(0w0SCwpw{G5SMUq`+IeM4vMP*(|2b*>4I%^vl8O7fkW03(^ z-fDqXRKn1i)k!a91I1aG$`H~EE@lp}5n~u7>4hS3qCGVE)fZfYOS*}v`_&mqq7NR% zBsTXBDxy~H#1&>a)&K(V{ktMH9=dlyTc=EdFCDQc4VNxebD!k+r!RlmF)xMjTxr0` z$u6xWKq&+~2gkTGXTndPZh6M2w4IV-z$xbx^iw2@k|zp%`k1K-?#qCAJrr-N#GnB@ z<~f)B;_G*#oFaA4WJ{~pfXL!%B42&!x0t|82)K-dny38g=R@o6$`fq6w=Z`={o?9S;VZ}iNvTsG|)7=yGt!8|=e zxg|k$u|o(ZRVXMqAZmt4=5)k%jX&;AXQctPO`f!-XliWe$+j>nd&gm6ys6c?bwG?kg`E%!>P*Sa}Pu)(M~N;c1A=qN&oJ^f1Vly1ry_x=poE%FVIG0796lGnj~JZDkVTZ5i=c$$Ha9{ zujc0Z(&-eDRy4m49X4zcJONmj@MH4@rp*G>qi&=7I206Q;?V9ZF<(N*dZytC&H=43 zV@<2x(IX-Vx6_co&_0tk3eAfby{UHXG|m!gE|0$s^tPKrOid?HE{H3BkrD>w`Jn`Mwj4Y za}@KIzFBYTXL=pw#L67s3YY1W56>8p9_ly8?-H&z{>&l4iirKH5?DFTjEpx@t%vDL z^YZgsT`euWWVe&=!6<`?r4@zf3R2FUOFWsC*48&65yjw+1Z0(2oF$Kmh(2QZDLpaU zpL&UJFgRf9pr3?3Z*HFTtl4Ds`t&QOx(y>`@?Wb4m?5oa_86;J9{O0L^E%1S>hi8M z$g3#dfF20hctL-)Y_JJ4x`mBNVYMqVmm=sMYctl5qBh9?vR^v!Zy9yqHm8EYUx5rE z)>cqZcwe3xz>OpE`BQ%^x2h$v+Ui{X-bx5fKiO>>b(w& z4qb6nk}3h_GaDnsHH;?!I`IGSQ@KJ5h{4>d!=D}l0KBH@gtw#pg7&bs-6gQ#TmJDE zw;_T+CDms2hJA9KOr+BG5Bn7uA6mKNG`0Z!n)%~*Huwb}S|P?ignt>X#kc~PI@l-8 zK+(gRHgzxvDog7O4?|xCIrYsM?Unk6Uc5L#Ovg~$-MVo@;O^(qI}Dj&c}Qt;8-@bx zlWv&!nTo9nG~-?S^+UD&(rd_b_*X`xXUOXg(=8X=D2j@7&n6YVK20hXkY|+J+GjYw zZBL_Xs}`JT`Wb1?y?+)@P`*)XRh6^o`S^_B)2m>uCL(bXDjvQWDg%7Gc91|ABb!5# zXUNYyd&@HYmiv28XbtMGSt-D^vzv!^Q0ni`IpS;y&q<+T5wg~wjfhiYxU^vo+0ZE*cV-lgEdWI#~I3d!0K zzVv16G{hYtJvyTifgn7_+z(1{9;u#ytbByJ6%3C^hssLmZ=76Qtcb7R0)hi1Pm3`R z0viRD81saRs&hy>!pZ)$*`~@JgoB-3>$2`HeZJ}Sfz2m|A;XAG&MY@xs&pfZCwF0W zUNP=p>k>%R97NBbZxBV!eR zg!c)7)5$1-&Gju-z=lk`u*D>dF`qC8p z9yFBFEcuI~kO#15_?~-*XiN-T&V`{ijs?gDtw~OwsSt6K(iKrjFg#IIiv%SkTphLH zc-}_nLb%!4>C?UhgALwr6e9+}&x{y|6G(Kl4!PV@Xv(Ho$C(+-B0nY_$- z$Uw?)2Zy;(7U1j9!QdfY{CUBB0ZJR~C>Z;8G($;wuX{wKWIDgy8yMOEGQ;{S+F`Bg@%bny_(_Q#& zeE$47Um;b_F%JbAkK;k}!nT9wGKk7)5~Ie87w@9|;JylV9B9hZhA#-l`ej+(2Qiq% z(iVB$k-G$#$J8;PCJr%!_B0Ju?)RHkl+H$ibkQ^Z7Ha!0SD6S6n&!5^qQd3tzt-N; zc3Mg|a^h&!;1RuH@5761y{B|pCuT*&?g<{JSm6M_7Tsy7pGFN!1zsb`>BFmb1kAQg z5w$9~PWqOwNlBh!JsC8+5b>k?bGhG~MH&W%8({xs9}-hR62V~!KRObN%kJXnIRGoa z6SJmtiOab6d-^ZU%Jb`!_O^KC8!`0In4Z_1ql(?UGPgdD z&9l2aG*Q7J`m)M|KDGajw|)@oq*9(dTzgbk^BpbC7ijGk(_H^2NhXL6-mT~r8<_8|#70=h*?ayfc))j{Q`)2D6GGHEhJEMgqN>2|g2 zSte6i?5q)D2m$CD*Tz%uKa=}*_Q9leqsfssp0Xe;8%xLGG`>(rX9t1NgqgzJQQmFT zEDpUlYU6r9|FU!PAyM?Ovjs~2J0^8DLVMaX82c1TTP9r z-f6wZ;eK)3D(`jckUj(x&HHvn4|LXi$_E5&wI?>$6^X$vqHGQDZ-N!vksGSOv$S{7 zcUpU%jy5#I?#JwDeLW4)x|qNX*rWn{$ zow;BA17**)%eHC6y`&Zlm&HC;gq)IJm1^VTnm@Zt{lJ_y8`crHqlCsZ>G9emX}pb> zV@Ab~@9^7c{#2PqQ&UsdYJ}h~O}cXhis`dOSF4XqzruWQ#Sx=M<_0g;z+x zE2VH*&FG}0e=l1uuX}VM@@6wEb4ynXKYrj#H2Gf#x8F3U_;xy#9w-%R{TN^86j zHe|{4p1SEgn9s_yYr~iDfHVAg6sL2;@+(HwessYZ=}`WfMVN#Fv%ioY@>j3&ZkWh~ zKV^y?tm&P5_ZIU*A#&pIqPdas?&O32LZ2nG&gN+aRk|{htf83f6i=ii5g6r9H=N>YY(}D9opXvRotJXk zZ8|q@GS!g~f7@ep@g!?=svs`t0o{YAZ_9Sb%82gjYVLkwwsLz3e^sfm_UMT6+sewg z@bDL*{$UiVEKxqI&^#`%P1RHkKBSCXGTN+6?)va7%WT_^*8`fsEanUfpRlK^tN0n- zMlN|nj;3w&TAO{ue6t1H;G8wX?%ck;VCKx7moINBZ|{0Zphx4zEo4@y7>QOR25MCR zV=kj)XN-j^i;|V_{Lr{wuILccyqKYgnzF=`)#D|u47|u$INj_~>E;g*U?&RiCq}WmvnG;v@AB^qvC!DdD{I>K z_Y?FF^qD7DC{uMnq=YI^@iIh}XIb`P`kn6V%<0oNntn1R3D&3l^WDTNMMRCM3y1O( zKJMfE)oNVS_D)!|e0d_pdQ)}rEyv89mxXAo5S5HzOSh_qhACK0|AeIGL0sSCR3NoL zFl(KbFI{>J?ZungT2`Wh(=2v!nWQtY-X2CVu@-z zxW4L$5wZDp5*$m25v8q)~ZLJ%iM&|)1ndN@9 zxrOG7A8CDFEL^rMmL5JfEo~{zp%28nV&smuebVg*KfdXy%l&FLIu(%}wM;f9>T0)f zm8Uwu{`R1H4gM{s~|hb~=?^3X^R450A8+0gSn@vBq*g+t1PPJp5b&Hqq{piMHtTmZ%0 zX{uKu_gqFaz9D5^%qs>$ppF@?sVSx278xASO9TeK)riTHm!Vnk9bj_*=JF}wA;VQv zjNRN)&Y3!7=A@t8w|DOY4sIM(kukYdbL)>Z;#_99G`yLui>1iJBcYjUdHn>KLJF8S z8z!wzk@o1;?bVaCA#hP-yI24@!=s%f+A-}rbqc~X+}^=KL@OXWd$2yo{wQ-I#4iX; zW;B-qE3Ji=q+cpSr1%83YSX3`Ue#pn+H-LqaN2%=Vz2Sn_xdVMHG>}8|nTe=dFsV#={CHz%+Y+L35Kx9TW?__~B0E3# zY7Q&6lQDk1Ba`$2B6sLvTIV?=C{W`VT$OeBWeuzIBof!-Kw?$+&zAw8(4S9@H%~Vv z5d}~4%An$^=+8z3^Hf<`S#a<-vCiB3kD5Mxd7vv4?4`Dq%J5G5Phgq3B17~U8MW(L zpkCjMYmW+X8}st&ZxNfvy(^bg=q?(*#KgqyStXXHLBhQ_tU2BL*}0@7V-X1{w%)d{ z*3FVn~5r|pXPiIk95_J7zb4)fAfA8otf zy$s7Jn?xcLrLV)3j6S`G<*7^6m6cEDJB*<=40L7m&M2W<{-?4oRYN(g7Liq~+lnHh zx=fqBY}s+)c2%a?a$cOQW*xK7D#M5WQ(|92);fns&Vl=SC2d0jWfQX(Zj2>6fWuKM zE##{S2iPtqoh9CtSX$QtH$OxVVx1ywVitNf)NHb-W@>WUYKAwoYTY^`Atw5u>vVS} zJN1o;;&FXa)TU2kmB*ntU6&tCvKctnM(``|+NZGo%$y0{-Aus$$;y*c;Ysjv6N zFAePVqUh$aSM6N~BR~(H;-2u#qp}~-9?!=mh^yREzLO(}p1dtQz8L{q=;-)i*-2k> z43;yP*3q(3=TYqL%*4?1xo zfoY1bU%sSibZGqixs(~LWR|@_LRQDdoZ#vo>Mk!I$&Lp4J9_{cKH|3C+>5;3#jiyf zP)HM`#!wC5`-s7VXL73vM2qa~V)4aaePL<$UcIWm^`t9;!DiU0z_nUCVBCtU5-yQF zLwvzhXAbVAE+uO1H8eFdN?L7^kyP1%C^U(_N=wUvVKn#rFO)Ny^O-agroLI&NwDcS zNO{R5n-r-VOIB6X)E0M87+J{tT{wk-Lz7>l7KGtK0(?Iq^9(VA2rbGa2g7uNMJ;`28RcLc#J0k%*?M~RFR%L;%$YNG z;nqYQ?KTqaHjp9apjt%;g?XzVh-X2srNKhwOv@W!QQk+lYzy6}w(WM5?i*>|Oac(I+h$JNpJLna7w3X>6 zk@Qoe7>5v6rD4S%T68RH)0G3DYmR`UPxnEZsoOQi})U>Ai*F#Bh2|3lAgm`&~p^* zq?mMeP9jQ?^-j}W&Y&>TbgMZG?-|9H$kcnlr5V8$-PyX+gCR%F2T`sSzonLyq)8g@ zcq5e1K~iuqf6bJb5wpyQl^-+V~=sZ*6tpEVbaka|m$bS)D%yP12@utg0DilXG z+GY(3XVl;@uQQSv1XFZHnwvg`Q~jYzaPkw?1`Sj!l9XBOE17 zKGQ+9jrbOlvDoSwb(!`hT)BFCoBo9isB~KL(4}dw_vncemtgnd?p6QcL;SaI-!}Hk z1QJ1{HoK{*N%VV%0|O(#F5@jSP1zD>%KfOj^7~hU3U7tKuC`_QNJjq1>j@>Qe-^X$Y=t!dSSFwkHPlf!6o;7d=^IW#LHL$zY^c%=*}J$|$+(@9h5b zLnSX3=F4h^3QI2D)f zMr0c9GfMvcn0Io89!811W|S@Dxm{AWqZwOQzCP_`>kmw#BHwk-&ujhXPmYb{^&r7M z{I^j4o}%TLf3(fu@LFk0S4Z0?+N((S>&nTl#`?w}12G0I{`_aQpa@F{n=vO2c_+}X zr<^)}enDV&Ma7y+T7w4Z1TLo3BCB|Ke5!5v=Ra6OjaBx6)|#kwdtK_?mm5NxQaqkt zIHVvk$vnU>QQir5*KXo=d(rsa zP%cXu6>ND^gW$Hcu)FG~s_^GOeQ|}<*$8hSkgv0eiR;xz2Euw4QU4yIkd>&_5li{ zrm+a4AjLmane0$|?lm4*_(}{MWuCS`^zu~d%IXC9?{ z%{^1d3`mpj+xPwFU!>M*FiX^kCoyu37h#`ODMyYjN&MRM_xFbHYYg5sLPe$KvVW>H zUc`6TqFFuNjA#A*nUWCQ|Ncf#1l2!(iO%}}{N4Y{XW(&sv_xw@RXOC*+Ki5oTweQ( zmRD(TE|>j2ITiFle$HV!X54_%oDSRjxrsf-zoa;pFf!TNcnHf zlZ*wa{qv9h&)@MF{(ti{>iP1+7aboh z|Mx$t=q_W?hp@<+fN08iE!O3FGVE#LxWwGaM_{#W`ft)kwzJ6A_RO{aa$2FqxSJ`R z1Q-MOV`m|UU)IYAn z-=Cn>CPX*+)QE5gM_KY;Tm3*PA@GnM^76%Y)4u~B0K?TKH+UG6aaWJCXf?V!Vm4zv zJ>Tl_p6W5hb{V@mw79Zc59x!G)|G1w!FrXlfe#A{uezLGDTtd*p570?XubqhH$UV{ zBa;`K9ewv|EB5xBWlXu;5`gu@)Jm(7M$ay!y>-=mTU#6V?VG3QJ~2<28aJN?-NDs@ z$rfBwHkh7d;!rdxA3Uv&9Xxmnk^ZY6`z98sgseZaW#=oyo~!0LT4%pci6X;Ku-MCn z)N+Afbeflx@?p!S4NB*i_K^FfT5Qtm_4%?h(IaPrH2&M%Jki2` z9K9Jm?>LLr%8xJaeA{RIL)YH94^4W+o%(i+x_NcV=V$r>d=#yBlbxxA6a=VjwhLIsKSzNUD^F}XU>hL=sNDh)L<3A?9Z;j{8c z=L4T589TDF109SN)=X`bLxs8bzyZhQ&qpUjne97pz=X$aFt?(Oq2c^hP{jX!{=Big zGCE}41H*|G59>~13bBCpsfyLelbd(QxMrYiuga)R0Iv(IHbz$#9QNim!SQ&rDSt4l>sINO?F$$qhOIm0L!c|VLp#1(|nZ4gT zp_5oDfX7S}FFXI_^xx+`N2eFoMPJ$on0Z*x6*eb4qKgaKg}-L3>J+9wwiH6qb7efU zP)h2GPcXBK*24t57Ltt83HPUpZIYI$EqeO#fAwDlw;U~p2F*F{;;4%1EcxK_m5@Y% zr)kU59G@rKE!wyB@3pv^PGy;cZ0F&XZQFigM(K$amsxUDOpF8iNt(|GY?WDaM(6LJ zc6FcU-MC-wDYYgmEZ?Lq%t-M%svW0oe{9s~~+ zt#%FdZ!w;*L-l#^rA)jCY_3m&+}}T4(Obr1%Dod>W^kyBH*UQ2UtQ#YWetVzg#R7J zT4Zb#pAMf2b!?4!?tlIYZ?p1{z}@-l>Oj#i{{6e?G|LqnL2~(zj%7nOthCKgNFTbr zgKXKL4J%*rsQ7npLlfqfAL7ZbE(_Y}(t}nXentSbJn?}{&dd>>G|;&7j<&#^`upb~ zxZO1cp(ALHGWv#>?c2ZqA*w+&$UJcdx+^F|!-uN;VcsOlT(Q}xCrt2b$oytTB)z!L s;lC_i*uGQ{E#j|^Fb@Bp|J}FWb_d@1HxJzRC${U)GSW@ZS+V2)0{QrXZ2$lO diff --git a/src/vendor/itoa/src/lib.rs b/src/vendor/itoa/src/lib.rs index 40e952bf8c..722847f5d3 100644 --- a/src/vendor/itoa/src/lib.rs +++ b/src/vendor/itoa/src/lib.rs @@ -6,6 +6,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![doc(html_root_url = "https://docs.rs/itoa/0.3.4")] + +#![cfg_attr(feature = "i128", feature(i128_type, i128))] + +#![cfg_attr(feature = "cargo-clippy", allow(cast_lossless, unreadable_literal))] + +#[cfg(feature = "i128")] +mod udiv128; + use std::{io, mem, ptr, slice}; #[inline] @@ -17,6 +26,10 @@ pub trait Integer { fn write(self, W) -> io::Result; } +trait IntegerPrivate { + fn write_to(self, buf: &mut [u8; MAX_LEN]) -> &[u8]; +} + const DEC_DIGITS_LUT: &'static[u8] = b"0001020304050607080910111213141516171819\ 2021222324252627282930313233343536373839\ @@ -24,71 +37,83 @@ const DEC_DIGITS_LUT: &'static[u8] = 6061626364656667686970717273747576777879\ 8081828384858687888990919293949596979899"; +const MAX_LEN: usize = 40; // i128::MIN (including minus sign) + // Adaptation of the original implementation at // https://github.com/rust-lang/rust/blob/b8214dc6c6fc20d0a660fb5700dca9ebf51ebe89/src/libcore/fmt/num.rs#L188-L266 macro_rules! impl_Integer { - ($($t:ident),* as $conv_fn:ident) => ($( - impl Integer for $t { - #[allow(unused_comparisons)] - fn write(self, mut wr: W) -> io::Result { - let is_nonnegative = self >= 0; - let mut n = if is_nonnegative { - self as $conv_fn - } else { - try!(wr.write_all(b"-")); - // convert the negative num to positive by summing 1 to it's 2 complement - (!(self as $conv_fn)).wrapping_add(1) - }; - let mut buf: [u8; 20] = unsafe { mem::uninitialized() }; - let mut curr = buf.len() as isize; - let buf_ptr = buf.as_mut_ptr(); - let lut_ptr = DEC_DIGITS_LUT.as_ptr(); - - unsafe { - // eagerly decode 4 characters at a time - if <$t>::max_value() as u64 >= 10000 { - while n >= 10000 { - let rem = (n % 10000) as isize; - n /= 10000; - - let d1 = (rem / 100) << 1; - let d2 = (rem % 100) << 1; - curr -= 4; - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); + ($($t:ident),* as $conv_fn:ident) => {$( + impl Integer for $t { + fn write(self, mut wr: W) -> io::Result { + let mut buf = unsafe { mem::uninitialized() }; + let bytes = self.write_to(&mut buf); + try!(wr.write_all(bytes)); + Ok(bytes.len()) + } + } + + impl IntegerPrivate for $t { + #[allow(unused_comparisons)] + fn write_to(self, buf: &mut [u8; MAX_LEN]) -> &[u8] { + let is_nonnegative = self >= 0; + let mut n = if is_nonnegative { + self as $conv_fn + } else { + // convert the negative num to positive by summing 1 to it's 2 complement + (!(self as $conv_fn)).wrapping_add(1) + }; + let mut curr = buf.len() as isize; + let buf_ptr = buf.as_mut_ptr(); + let lut_ptr = DEC_DIGITS_LUT.as_ptr(); + + unsafe { + // need at least 16 bits for the 4-characters-at-a-time to work. + if mem::size_of::<$t>() >= 2 { + // eagerly decode 4 characters at a time + while n >= 10000 { + let rem = (n % 10000) as isize; + n /= 10000; + + let d1 = (rem / 100) << 1; + let d2 = (rem % 100) << 1; + curr -= 4; + ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); + } } - } - // if we reach here numbers are <= 9999, so at most 4 chars long - let mut n = n as isize; // possibly reduce 64bit math + // if we reach here numbers are <= 9999, so at most 4 chars long + let mut n = n as isize; // possibly reduce 64bit math - // decode 2 more chars, if > 2 chars - if n >= 100 { - let d1 = (n % 100) << 1; - n /= 100; - curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); - } + // decode 2 more chars, if > 2 chars + if n >= 100 { + let d1 = (n % 100) << 1; + n /= 100; + curr -= 2; + ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + } - // decode last 1 or 2 chars - if n < 10 { - curr -= 1; - *buf_ptr.offset(curr) = (n as u8) + 48; - } else { - let d1 = n << 1; - curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + // decode last 1 or 2 chars + if n < 10 { + curr -= 1; + *buf_ptr.offset(curr) = (n as u8) + b'0'; + } else { + let d1 = n << 1; + curr -= 2; + ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + } + + if !is_nonnegative { + curr -= 1; + *buf_ptr.offset(curr) = b'-'; + } } - } - let mut len = buf.len() - curr as usize; - try!(wr.write_all(unsafe { slice::from_raw_parts(buf_ptr.offset(curr), len) })); - if !is_nonnegative { - len += 1; + let len = buf.len() - curr as usize; + unsafe { slice::from_raw_parts(buf_ptr.offset(curr), len) } } - Ok(len) } - })*); + )*}; } impl_Integer!(i8, u8, i16, u16, i32, u32 as u32); @@ -99,3 +124,73 @@ impl_Integer!(isize, usize as u16); impl_Integer!(isize, usize as u32); #[cfg(target_pointer_width = "64")] impl_Integer!(isize, usize as u64); + +#[cfg(all(feature = "i128"))] +macro_rules! impl_Integer128 { + ($($t:ident),*) => {$( + impl Integer for $t { + fn write(self, mut wr: W) -> io::Result { + let mut buf = unsafe { mem::uninitialized() }; + let bytes = self.write_to(&mut buf); + try!(wr.write_all(bytes)); + Ok(bytes.len()) + } + } + + impl IntegerPrivate for $t { + #[allow(unused_comparisons)] + fn write_to(self, buf: &mut [u8; MAX_LEN]) -> &[u8] { + let is_nonnegative = self >= 0; + let n = if is_nonnegative { + self as u128 + } else { + // convert the negative num to positive by summing 1 to it's 2 complement + (!(self as u128)).wrapping_add(1) + }; + let mut curr = buf.len() as isize; + let buf_ptr = buf.as_mut_ptr(); + + unsafe { + // Divide by 10^19 which is the highest power less than 2^64. + let (n, rem) = udiv128::udivmod_1e19(n); + curr -= rem.write_to(buf).len() as isize; + + if n != 0 { + // Memset the base10 leading zeros of rem. + let target = buf.len() as isize - 19; + ptr::write_bytes(buf_ptr.offset(target), b'0', (curr - target) as usize); + curr = target; + + // Divide by 10^19 again. + let (n, rem) = udiv128::udivmod_1e19(n); + let buf2 = buf_ptr.offset(curr - buf.len() as isize) as *mut _; + curr -= rem.write_to(&mut *buf2).len() as isize; + + if n != 0 { + // Memset the leading zeros. + let target = buf.len() as isize - 38; + ptr::write_bytes(buf_ptr.offset(target), b'0', (curr - target) as usize); + curr = target; + + // There is at most one digit left + // because u128::max / 10^19 / 10^19 is 3. + curr -= 1; + *buf_ptr.offset(curr) = (n as u8) + b'0'; + } + } + + if !is_nonnegative { + curr -= 1; + *buf_ptr.offset(curr) = b'-'; + } + + let len = buf.len() - curr as usize; + slice::from_raw_parts(buf_ptr.offset(curr), len) + } + } + } + )*}; +} + +#[cfg(all(feature = "i128"))] +impl_Integer128!(i128, u128); diff --git a/src/vendor/itoa/src/udiv128.rs b/src/vendor/itoa/src/udiv128.rs new file mode 100644 index 0000000000..24233c68c7 --- /dev/null +++ b/src/vendor/itoa/src/udiv128.rs @@ -0,0 +1,62 @@ +// Copyright 2009-2016 compiler-builtins Developers +// +// The compiler-builtins crate is dual licensed under both the University of +// Illinois "BSD-Like" license and the MIT license. As a user of this code you may +// choose to use it under either license. As a contributor, you agree to allow +// your code to be used under both. +// +// Full text of the relevant licenses is found here: +// https://github.com/rust-lang-nursery/compiler-builtins/blob/master/LICENSE.TXT +// +// +// +// The following code is based on Rust’s [compiler-builtins crate] +// (https://github.com/rust-lang-nursery/compiler-builtins) which +// provides runtime functions for the Rust programs. The Rust +// compiler will automatically link your programs against this crate. +// +// We copied the implementation of '__udivmodti4()' which is an intrinsic +// implementing division with remainder for architectures without 128-bit integer support. +// We have done this two reasons, to work around [bad optimization by LLVM] +// (https://github.com/rust-lang/rust/issues/44545) and to allow function +// inlining which doesn’t happen with the intrinsic. + +pub fn udivmod_1e19(n: u128) -> (u128, u64) { + let d = 10_000_000_000_000_000_000_u64; // 10^19 + + let high = (n >> 64) as u64; + if high == 0 { + let low = n as u64; + return ((low / d) as u128, low % d); + } + + let sr = 65 - high.leading_zeros(); + + // 2 <= sr <= 65 + let mut q: u128 = n << (128 - sr); + let mut r: u128 = n >> sr; + let mut carry: u64 = 0; + + // Don't use a range because they may generate references to memcpy in unoptimized code + // + // Loop invariants: r < d; carry is 0 or 1 + let mut i = 0; + while i < sr { + i += 1; + + // r:q = ((r:q) << 1) | carry + r = (r << 1) | (q >> 127); + q = (q << 1) | carry as u128; + + // carry = 0 + // if r >= d { + // r -= d; + // carry = 1; + // } + let s = (d as u128).wrapping_sub(r).wrapping_sub(1) as i128 >> 127; + carry = (s & 1) as u64; + r -= (d as u128) & s as u128; + } + + ((q << 1) | carry as u128, r as u64) +} diff --git a/src/vendor/itoa/tests/test.rs b/src/vendor/itoa/tests/test.rs index 7615c19b8b..a3602932a2 100644 --- a/src/vendor/itoa/tests/test.rs +++ b/src/vendor/itoa/tests/test.rs @@ -1,13 +1,23 @@ +#![cfg_attr(feature = "i128", feature(i128_type, i128))] + +#![cfg_attr(feature = "cargo-clippy", allow(cast_lossless, string_lit_as_bytes))] + #![allow(non_snake_case)] extern crate itoa; macro_rules! test { - ($($name:ident($value:expr, $expected:expr),)*) => { + ( + $( + $(#[$attr:meta])* + $name:ident($value:expr, $expected:expr) + ),* + ) => { $( + $(#[$attr])* #[test] fn $name() { - let mut buf = [b'\0'; 20]; + let mut buf = [b'\0'; 40]; let len = itoa::write(&mut buf[..], $value).unwrap(); assert_eq!(&buf[0..len], $expected.as_bytes()); } @@ -15,11 +25,19 @@ macro_rules! test { } } -test!( - test_0u64(0u64, "0"), - test_HALFu64(::max_value() as u64, "4294967295"), - test_MAXu64(::max_value(), "18446744073709551615"), +test!{ + test_u64_0(0u64, "0"), + test_u64_half(::max_value() as u64, "4294967295"), + test_u64_max(::max_value(), "18446744073709551615"), + test_i64_min(::min_value(), "-9223372036854775808"), - test_0i16(0i16, "0"), - test_MINi16(::min_value(), "-32768"), -); + test_i16_0(0i16, "0"), + test_i16_min(::min_value(), "-32768"), + + #[cfg(feature = "i128")] + test_u128_0(0u128, "0"), + #[cfg(feature = "i128")] + test_u128_max(::max_value(), "340282366920938463463374607431768211455"), + #[cfg(feature = "i128")] + test_i128_min(::min_value(), "-170141183460469231731687303715884105728") +} diff --git a/src/vendor/kuchiki/.cargo-checksum.json b/src/vendor/kuchiki/.cargo-checksum.json new file mode 100644 index 0000000000..8aff5fda2e --- /dev/null +++ b/src/vendor/kuchiki/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"a264f8d660cfb857f234139e79b8da7797608656c7e88f9786ecf6e312a745aa",".travis.yml":"96358cfe855b70bc211725a1144973b737196fd39783b6b46f7ac91b1e0be8ac","Cargo.toml":"b87501d59b32727be8ec1c4ab214854d79529e2fe1e2ebf86f985234802e4ef7","Cargo.toml.orig":"78067e148b21602d9360aee8d4dfa592391d631e002ecdc98bebabb2253768c9","README.md":"2600915662f99776522619e7bfa26bcad2f1300cbfd0cce479d7d523590471ec","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"4dec55355aa947d58b06182ba3e92abf2a3b9412fbc2dfa4b64616d6c3b63db8","docs/index.html":"4dec55355aa947d58b06182ba3e92abf2a3b9412fbc2dfa4b64616d6c3b63db8","examples/find_matches.rs":"aae6b5fa119f7e515597ac3a3b2a0ddb5970433691e3540839052bd4ff21d7c9","examples/stack-overflow.rs":"1b22905e878e9461caed92561e7566390cf85ad31982bfe5b845b1bf4d95e855","src/attributes.rs":"1f3b84ef1be9c5177c033a38be2f3f14cc0fef7bb625cc2fbd9cb87d25588982","src/iter.rs":"6e4a1d956d76f7dab06781b3844623405ca86518fc518b7ce8d883ae79bf4d47","src/lib.rs":"1445e6061414049070ffb7975bd878c9998791b99e7349bdae08a30207405d4a","src/move_cell.rs":"b2178a5fe2f3cbd4423d1065dafc8d362ce85b3f1ef9361dadb9591c8954b2cd","src/node_data_ref.rs":"9891761474f305134a0ba84e062505f0c6748b6cb804ce63574a517d2615a1d5","src/parser.rs":"622d620309badac634972b16f01740fd9eb2dfe01178782bbb38b4d3b27a9d4f","src/select.rs":"0595a90d030e30e0b3a4646dccdf5d23d065295ab4c06343594dfb31dd961fda","src/serializer.rs":"5e14efe52e03583b206a8dc93d232f296b18e623cead6805d8cbe28032501c59","src/tests.rs":"855f2d8666f68b445972e5e394a8a00a379eaf5c5a983930e89c0ff743c3015b","src/tree.rs":"83770c14a1fc02097bc63d6ab2e1f8787d9994075d4918bf9a6407f03dda2c40","test_data/foo.html":"c0e67d8138acefda54d4594e426d7a804c5309d0c6da49a6902791aa279307c7"},"package":"ef2ea4f2f7883cd7c6772b06c14abca01a2cc1f75c426cebffcf6b3b925ef9fc"} \ No newline at end of file diff --git a/src/vendor/kuchiki/.cargo-ok b/src/vendor/kuchiki/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/kuchiki/.gitignore b/src/vendor/kuchiki/.gitignore new file mode 100644 index 0000000000..884cb47a2d --- /dev/null +++ b/src/vendor/kuchiki/.gitignore @@ -0,0 +1,3 @@ +target +Cargo.lock +.cargo/config diff --git a/src/vendor/kuchiki/.travis.yml b/src/vendor/kuchiki/.travis.yml new file mode 100644 index 0000000000..017d7c3786 --- /dev/null +++ b/src/vendor/kuchiki/.travis.yml @@ -0,0 +1,6 @@ +sudo: false +language: rust +rust: + - nightly + - beta + - stable diff --git a/src/vendor/kuchiki/Cargo.toml b/src/vendor/kuchiki/Cargo.toml new file mode 100644 index 0000000000..2f88fffe58 --- /dev/null +++ b/src/vendor/kuchiki/Cargo.toml @@ -0,0 +1,36 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "kuchiki" +version = "0.5.1" +authors = ["Simon Sapin "] +description = "(朽木) HTML/XML tree manipulation library" +license = "MIT" +repository = "https://github.com/SimonSapin/kuchiki" + +[lib] +name = "kuchiki" +doctest = false +[dependencies.selectors] +version = "0.18" + +[dependencies.matches] +version = "0.1.4" + +[dependencies.html5ever] +version = "0.18" + +[dependencies.cssparser] +version = "0.13" +[dev-dependencies.tempdir] +version = "0.3" diff --git a/src/vendor/kuchiki/Cargo.toml.orig b/src/vendor/kuchiki/Cargo.toml.orig new file mode 100644 index 0000000000..0175573124 --- /dev/null +++ b/src/vendor/kuchiki/Cargo.toml.orig @@ -0,0 +1,20 @@ +[package] +name = "kuchiki" +version = "0.5.1" +authors = ["Simon Sapin "] +license = "MIT" +description = "(朽木) HTML/XML tree manipulation library" +repository = "https://github.com/SimonSapin/kuchiki" + +[lib] +name = "kuchiki" +doctest = false + +[dependencies] +cssparser = "0.13" +matches = "0.1.4" +html5ever = "0.18" +selectors = "0.18" + +[dev-dependencies] +tempdir = "0.3" diff --git a/src/vendor/kuchiki/README.md b/src/vendor/kuchiki/README.md new file mode 100644 index 0000000000..30d4f54d41 --- /dev/null +++ b/src/vendor/kuchiki/README.md @@ -0,0 +1,10 @@ +Kuchiki (朽木) +============== + +HTML/XML¹ tree manipulation library for Rust. + +[Documentation](https://docs.rs/kuchiki/) + +See [users.rust-lang.org discussion](http://users.rust-lang.org/t/kuchiki-a-vaporware-html-xml-tree-manipulation-library/435). + +¹ There is no support for XML syntax yet. The plan is to integrate with an existing parser. diff --git a/src/vendor/kuchiki/docs/.nojekyll b/src/vendor/kuchiki/docs/.nojekyll new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/kuchiki/docs/404.html b/src/vendor/kuchiki/docs/404.html new file mode 100644 index 0000000000..9fef97867e --- /dev/null +++ b/src/vendor/kuchiki/docs/404.html @@ -0,0 +1,3 @@ + + +Moved to docs.rs diff --git a/src/vendor/kuchiki/docs/index.html b/src/vendor/kuchiki/docs/index.html new file mode 100644 index 0000000000..9fef97867e --- /dev/null +++ b/src/vendor/kuchiki/docs/index.html @@ -0,0 +1,3 @@ + + +Moved to docs.rs diff --git a/src/vendor/kuchiki/examples/find_matches.rs b/src/vendor/kuchiki/examples/find_matches.rs new file mode 100644 index 0000000000..848e08e7d5 --- /dev/null +++ b/src/vendor/kuchiki/examples/find_matches.rs @@ -0,0 +1,48 @@ +extern crate kuchiki; + +use kuchiki::traits::*; + +fn main() { + let html = r" + + + + +

Example

+

Hello, world!

+

I love HTML

+ + + "; + let css_selector = ".foo"; + + let document = kuchiki::parse_html().one(html); + + for css_match in document.select(css_selector).unwrap() { + // css_match is a NodeDataRef, but most of the interesting methods are + // on NodeRef. Let's get the underlying NodeRef. + let as_node = css_match.as_node(); + + // In this example, as_node represents an HTML node like + // + //

Hello world!

" + // + // Which is distinct from just 'Hello world!'. To get rid of that

+ // tag, we're going to get each element's first child, which will be + // a "text" node. + // + // There are other kinds of nodes, of course. The possibilities are all + // listed in the `NodeData` enum in this crate. + let text_node = as_node.first_child().unwrap(); + + // Let's get the actual text in this text node. A text node wraps around + // a RefCell, so we need to call borrow() to get a &str out. + let text = text_node.as_text().unwrap().borrow(); + + // Prints: + // + // "Hello, world!" + // "I love HTML" + println!("{:?}", text); + } +} diff --git a/src/vendor/kuchiki/examples/stack-overflow.rs b/src/vendor/kuchiki/examples/stack-overflow.rs new file mode 100644 index 0000000000..a38e35cd32 --- /dev/null +++ b/src/vendor/kuchiki/examples/stack-overflow.rs @@ -0,0 +1,23 @@ +extern crate kuchiki; + +fn main() { + let mut depth = 2; + // 20 M nodes is a few GB of memory. + while depth <= 20_000_000 { + + let mut node = kuchiki::NodeRef::new_text(""); + for _ in 0..depth { + let parent = kuchiki::NodeRef::new_text(""); + parent.append(node); + node = parent; + } + + println!("Trying to drop {} nodes...", depth); + // Without an explicit `impl Drop for Node`, + // depth = 20_000 causes "thread '

' has overflowed its stack" + // on my machine (Linux x86_64). + ::std::mem::drop(node); + + depth *= 10; + } +} diff --git a/src/vendor/kuchiki/src/attributes.rs b/src/vendor/kuchiki/src/attributes.rs new file mode 100644 index 0000000000..5a2481214b --- /dev/null +++ b/src/vendor/kuchiki/src/attributes.rs @@ -0,0 +1,47 @@ +use html5ever::{QualName, LocalName}; +use std::collections::hash_map::{self, HashMap}; + +/// Convenience wrapper around a hashmap that adds method for attributes in the null namespace. +#[derive(Debug, PartialEq, Clone)] +pub struct Attributes { + /// A map of attributes whose name can have namespaces. + pub map: HashMap, +} + +macro_rules! namespaceless { + ($local_name: expr) => { + QualName::new(None, ns!(), $local_name.into()) + } +} + +impl Attributes { + /// Like HashMap::contains + pub fn contains>(&self, local_name: A) -> bool { + self.map.contains_key(&namespaceless!(local_name)) + } + + /// Like HashMap::get + pub fn get>(&self, local_name: A) -> Option<&str> { + self.map.get(&namespaceless!(local_name)).map(AsRef::as_ref) + } + + /// Like HashMap::get_mut + pub fn get_mut>(&mut self, local_name: A) -> Option<&mut String> { + self.map.get_mut(&namespaceless!(local_name)) + } + + /// Like HashMap::entry + pub fn entry>(&mut self, local_name: A) -> hash_map::Entry { + self.map.entry(namespaceless!(local_name)) + } + + /// Like HashMap::insert + pub fn insert>(&mut self, local_name: A, value: String) -> Option { + self.map.insert(namespaceless!(local_name), value) + } + + /// Like HashMap::remove + pub fn remove>(&mut self, local_name: A) -> Option { + self.map.remove(&namespaceless!(local_name)) + } +} diff --git a/src/vendor/kuchiki/src/iter.rs b/src/vendor/kuchiki/src/iter.rs new file mode 100644 index 0000000000..3aac1688b2 --- /dev/null +++ b/src/vendor/kuchiki/src/iter.rs @@ -0,0 +1,422 @@ +//! Node iterators + +use std::borrow::Borrow; +use std::cell::RefCell; +use std::iter::Rev; + +use tree::{NodeRef, ElementData}; +use select::Selectors; +use node_data_ref::NodeDataRef; + +impl NodeRef { + /// Return an iterator of references to this node and its ancestors. + #[inline] + pub fn inclusive_ancestors(&self) -> Ancestors { + Ancestors(Some(self.clone())) + } + + /// Return an iterator of references to this node’s ancestors. + #[inline] + pub fn ancestors(&self) -> Ancestors { + Ancestors(self.parent()) + } + + /// Return an iterator of references to this node and the siblings before it. + #[inline] + pub fn inclusive_preceding_siblings(&self) -> Rev { + match self.parent() { + Some(parent) => { + let first_sibling = parent.first_child().unwrap(); + debug_assert!(self.previous_sibling().is_some() || *self == first_sibling); + Siblings(Some(State { next: first_sibling, next_back: self.clone() })) + } + None => { + debug_assert!(self.previous_sibling().is_none()); + Siblings(Some(State { next: self.clone(), next_back: self.clone() })) + } + }.rev() + } + + /// Return an iterator of references to this node’s siblings before it. + #[inline] + pub fn preceding_siblings(&self) -> Rev { + match (self.parent(), self.previous_sibling()) { + (Some(parent), Some(previous_sibling)) => { + let first_sibling = parent.first_child().unwrap(); + Siblings(Some(State { next: first_sibling, next_back: previous_sibling })) + } + _ => Siblings(None) + }.rev() + } + + /// Return an iterator of references to this node and the siblings after it. + #[inline] + pub fn inclusive_following_siblings(&self) -> Siblings { + match self.parent() { + Some(parent) => { + let last_sibling = parent.last_child().unwrap(); + debug_assert!(self.next_sibling().is_some() || *self == last_sibling); + Siblings(Some(State { next: self.clone(), next_back: last_sibling })) + } + None => { + debug_assert!(self.next_sibling().is_none()); + Siblings(Some(State { next: self.clone(), next_back: self.clone() })) + } + } + } + + /// Return an iterator of references to this node’s siblings after it. + #[inline] + pub fn following_siblings(&self) -> Siblings { + match (self.parent(), self.next_sibling()) { + (Some(parent), Some(next_sibling)) => { + let last_sibling = parent.last_child().unwrap(); + Siblings(Some(State { next: next_sibling, next_back: last_sibling })) + } + _ => Siblings(None) + } + } + + /// Return an iterator of references to this node’s children. + #[inline] + pub fn children(&self) -> Siblings { + match (self.first_child(), self.last_child()) { + (Some(first_child), Some(last_child)) => { + Siblings(Some(State { next: first_child, next_back: last_child })) + } + (None, None) => Siblings(None), + _ => unreachable!() + } + } + + /// Return an iterator of references to this node and its descendants, in tree order. + /// + /// Parent nodes appear before the descendants. + /// + /// Note: this is the `NodeEdge::Start` items from `traverse()`. + #[inline] + pub fn inclusive_descendants(&self) -> Descendants { + Descendants(self.traverse_inclusive()) + } + + /// Return an iterator of references to this node’s descendants, in tree order. + /// + /// Parent nodes appear before the descendants. + /// + /// Note: this is the `NodeEdge::Start` items from `traverse()`. + #[inline] + pub fn descendants(&self) -> Descendants { + Descendants(self.traverse()) + } + + /// Return an iterator of the start and end edges of this node and its descendants, + /// in tree order. + #[inline] + pub fn traverse_inclusive(&self) -> Traverse { + Traverse(Some(State { + next: NodeEdge::Start(self.clone()), + next_back: NodeEdge::End(self.clone()), + })) + } + + /// Return an iterator of the start and end edges of this node’s descendants, + /// in tree order. + #[inline] + pub fn traverse(&self) -> Traverse { + match (self.first_child(), self.last_child()) { + (Some(first_child), Some(last_child)) => { + Traverse(Some(State { + next: NodeEdge::Start(first_child), + next_back: NodeEdge::End(last_child) + })) + } + (None, None) => Traverse(None), + _ => unreachable!() + } + } + + /// Return an iterator of the inclusive descendants element that match the given selector list. + #[inline] + pub fn select(&self, selectors: &str) -> Result>, ()> { + self.inclusive_descendants().select(selectors) + } +} + + +#[derive(Debug, Clone)] +struct State { + next: T, + next_back: T, +} + + +/// A double-ended iterator of sibling nodes. +#[derive(Debug, Clone)] +pub struct Siblings(Option>); + +macro_rules! siblings_next { + ($next: ident, $next_back: ident, $next_sibling: ident) => { + fn $next(&mut self) -> Option { + #![allow(non_shorthand_field_patterns)] + self.0.take().map(|State { $next: next, $next_back: next_back }| { + if let Some(sibling) = next.$next_sibling() { + if next != next_back { + self.0 = Some(State { $next: sibling, $next_back: next_back }) + } + } + next + }) + } + } +} + +impl Iterator for Siblings { + type Item = NodeRef; + siblings_next!(next, next_back, next_sibling); +} + +impl DoubleEndedIterator for Siblings { + siblings_next!(next_back, next, previous_sibling); +} + + +/// An iterator on ancestor nodes. +#[derive(Debug, Clone)] +pub struct Ancestors(Option); + +impl Iterator for Ancestors { + type Item = NodeRef; + + #[inline] + fn next(&mut self) -> Option { + self.0.take().map(|node| { + self.0 = node.parent(); + node + }) + } +} + + +/// An iterator of references to a given node and its descendants, in tree order. +#[derive(Debug, Clone)] +pub struct Descendants(Traverse); + +macro_rules! descendants_next { + ($next: ident) => { + #[inline] + fn $next(&mut self) -> Option { + loop { + match (self.0).$next() { + Some(NodeEdge::Start(node)) => return Some(node), + Some(NodeEdge::End(_)) => {} + None => return None + } + } + } + } +} + +impl Iterator for Descendants { + type Item = NodeRef; + descendants_next!(next); +} + +impl DoubleEndedIterator for Descendants { + descendants_next!(next_back); +} + +/// Marks either the start or the end of a node. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum NodeEdge { + /// Indicates that start of a node that has children. + /// Yielded by `Traverse::next` before the node’s descendants. + /// In HTML or XML, this corresponds to an opening tag like `
` + Start(T), + + /// Indicates that end of a node that has children. + /// Yielded by `Traverse::next` after the node’s descendants. + /// In HTML or XML, this corresponds to a closing tag like `
` + End(T), +} + + +/// An iterator of the start and end edges of the nodes in a given subtree. +#[derive(Debug, Clone)] +pub struct Traverse(Option>>); + +macro_rules! traverse_next { + ($next: ident, $next_back: ident, $first_child: ident, $next_sibling: ident, $Start: ident, $End: ident) => { + fn $next(&mut self) -> Option> { + #![allow(non_shorthand_field_patterns)] + self.0.take().map(|State { $next: next, $next_back: next_back }| { + if next != next_back { + self.0 = match next { + NodeEdge::$Start(ref node) => { + match node.$first_child() { + Some(child) => { + Some(State { $next: NodeEdge::$Start(child), $next_back: next_back }) + } + None => Some(State { $next: NodeEdge::$End(node.clone()), $next_back: next_back }) + } + } + NodeEdge::$End(ref node) => { + match node.$next_sibling() { + Some(sibling) => { + Some(State { $next: NodeEdge::$Start(sibling), $next_back: next_back }) + } + None => node.parent().map(|parent| { + State { $next: NodeEdge::$End(parent), $next_back: next_back } + }) + } + } + }; + } + next + }) + } + } +} + +impl Iterator for Traverse { + type Item = NodeEdge; + traverse_next!(next, next_back, first_child, next_sibling, Start, End); +} + +impl DoubleEndedIterator for Traverse { + traverse_next!(next_back, next, last_child, previous_sibling, End, Start); +} + + +macro_rules! filter_map_like_iterator { + (#[$doc: meta] $name: ident: $f: expr, $from: ty => $to: ty) => { + #[$doc] + #[derive(Debug, Clone)] + pub struct $name(pub I); + + impl Iterator for $name where I: Iterator { + type Item = $to; + + #[inline] + fn next(&mut self) -> Option<$to> { + for x in self.0.by_ref() { + if let Some(y) = ($f)(x) { + return Some(y) + } + } + None + } + } + + impl DoubleEndedIterator for $name where I: DoubleEndedIterator { + #[inline] + fn next_back(&mut self) -> Option<$to> { + for x in self.0.by_ref().rev() { + if let Some(y) = ($f)(x) { + return Some(y) + } + } + None + } + } + } +} + +filter_map_like_iterator! { + /// A node iterator adaptor that yields element nodes. + Elements: NodeRef::into_element_ref, NodeRef => NodeDataRef +} + +filter_map_like_iterator! { + /// A node iterator adaptor that yields comment nodes. + Comments: NodeRef::into_comment_ref, NodeRef => NodeDataRef> +} + +filter_map_like_iterator! { + /// A node iterator adaptor that yields text nodes. + TextNodes: NodeRef::into_text_ref, NodeRef => NodeDataRef> +} + + +/// An element iterator adaptor that yields elements maching given selectors. +pub struct Select +where I: Iterator>, + S: Borrow { + /// The underlying iterator. + pub iter: I, + + /// The selectors to be matched. + pub selectors: S, +} + +impl Iterator for Select +where I: Iterator>, + S: Borrow { + type Item = NodeDataRef; + + #[inline] + fn next(&mut self) -> Option> { + for element in self.iter.by_ref() { + if self.selectors.borrow().matches(&element) { + return Some(element) + } + } + None + } +} + +impl DoubleEndedIterator for Select +where I: DoubleEndedIterator>, + S: Borrow { + #[inline] + fn next_back(&mut self) -> Option> { + for element in self.iter.by_ref().rev() { + if self.selectors.borrow().matches(&element) { + return Some(element) + } + } + None + } +} + + +/// Convenience methods for node iterators. +pub trait NodeIterator: Sized + Iterator { + /// Filter this element iterator to elements. + #[inline] + fn elements(self) -> Elements { + Elements(self) + } + + /// Filter this node iterator to text nodes. + #[inline] + fn text_nodes(self) -> TextNodes { + TextNodes(self) + } + + /// Filter this node iterator to comment nodes. + #[inline] + fn comments(self) -> Comments { + Comments(self) + } + + /// Filter this node iterator to elements maching the given selectors. + #[inline] + fn select(self, selectors: &str) -> Result>, ()> { + self.elements().select(selectors) + } +} + +/// Convenience methods for element iterators. +pub trait ElementIterator: Sized + Iterator> { + /// Filter this element iterator to elements maching the given selectors. + #[inline] + fn select(self, selectors: &str) -> Result, ()> { + Selectors::compile(selectors).map(|s| Select { + iter: self, + selectors: s, + }) + } +} + +impl NodeIterator for I where I: Iterator {} +impl ElementIterator for I where I: Iterator> {} diff --git a/src/vendor/kuchiki/src/lib.rs b/src/vendor/kuchiki/src/lib.rs new file mode 100644 index 0000000000..ddf86ea77a --- /dev/null +++ b/src/vendor/kuchiki/src/lib.rs @@ -0,0 +1,41 @@ +/*! + +Kuchiki (朽木), a HTML/XML tree manipulation library for Rust. + +*/ + +#![deny(missing_docs)] + +extern crate cssparser; +#[macro_use] extern crate html5ever; +#[macro_use] extern crate matches; +extern crate selectors; +#[cfg(test)] extern crate tempdir; + +mod attributes; +pub mod iter; +mod move_cell; +mod node_data_ref; +mod parser; +mod select; +mod serializer; +#[cfg(test)] mod tests; +mod tree; + +pub use attributes::Attributes; +pub use node_data_ref::NodeDataRef; +pub use parser::{parse_html, parse_html_with_options, ParseOpts}; +pub use select::{Selectors, Selector, Specificity}; +pub use tree::{NodeRef, Node, NodeData, ElementData, Doctype, DocumentData}; + +/// This module re-exports a number of traits that are useful when using Kuchiki. +/// It can be used with: +/// +/// ```rust +/// use kuchiki::traits::*; +/// ``` +pub mod traits { + pub use html5ever::tendril::TendrilSink; + pub use iter::{NodeIterator, ElementIterator}; +} + diff --git a/src/vendor/kuchiki/src/move_cell.rs b/src/vendor/kuchiki/src/move_cell.rs new file mode 100644 index 0000000000..10b61b77ce --- /dev/null +++ b/src/vendor/kuchiki/src/move_cell.rs @@ -0,0 +1,127 @@ +use std::cell::UnsafeCell; +use std::mem; +use std::rc::{Rc, Weak}; + +/// Like `Cell`, but doesn’t require `T: Copy`. +/// Specialization of https://github.com/SimonSapin/rust-movecell +pub struct MoveCell(UnsafeCell); + +impl MoveCell { + #[inline] + pub fn new(x: T) -> Self { + MoveCell(UnsafeCell::new(x)) + } + + // Note: this is unsound: + // + // #[inline] + // pub fn set(&self, x: T) { + // unsafe { + // *self.0.get() = x; + // } + // } + // + // Example: + // + // struct Evil(Box, Rc>>); + // impl Drop for Evil { + // fn drop(&mut self) { + // mem::drop(self.1.take()); // Mess with the "other" node, which might be `self`. + // self.0.clone(); // use after free! + // } + // } + // let a = Rc::new(MoveCell::new(None)); + // a.replace(Some(Evil(Box::new(5), a.clone()))); // Make a reference cycle. + // a.set(None); // Trigger Evil::drop while in the cell + + #[inline] + pub fn replace(&self, x: T) -> T { + unsafe { + mem::replace(&mut *self.0.get(), x) + } + } +} + +impl MoveCell> { + #[inline] + pub fn is_none(&self) -> bool { + unsafe { + (*self.0.get()).is_none() + } + } + + #[inline] + pub fn take(&self) -> Option { + unsafe { + (*self.0.get()).take() + } + } +} + +impl MoveCell>> { + #[inline] + pub fn upgrade(&self) -> Option> { + unsafe { + match *self.0.get() { + Some(ref weak) => weak.upgrade(), + None => None, + } + } + } +} + +impl MoveCell>> { + /// Return `Some` if this `Rc` is the only strong reference count, + /// even if there are weak references. + #[inline] + pub fn take_if_unique_strong(&self) -> Option> { + unsafe { + match *self.0.get() { + None => None, + Some(ref rc) if Rc::strong_count(rc) > 1 => None, + // Not borrowing the `Rc` here + // as we would be invalidating that borrow while it is outstanding: + Some(_) => self.take(), + } + } + } +} + +impl MoveCell where T: WellBehavedClone { + #[inline] + pub fn clone_inner(&self) -> T { + unsafe { + (*self.0.get()).clone() + } + } +} + +/** + A Clone impl that will not access the cell again through reference cycles, + which would introduce mutable aliasing. + + Incorrect example: + + ```rust + struct Evil(Box, Rc>>); + impl Clone for Evil { + fn clone(&self) -> Self { + mem::drop(self.1.take()); // Mess with the "other" node, which might be `self`. + Evil( + self.0.clone(), // use after free! + Rc::new(MoveCell::new(None)) + ) + } + } + unsafe impl WellBehavedClone for Evil {} // Wrong. + + let a = Rc::new(MoveCell::new(None)); + a.set(Some(Evil(Box::new(5), a.clone()))); // Make a reference cycle. + a.clone_inner(); + ``` + +*/ +pub unsafe trait WellBehavedClone: Clone {} +unsafe impl WellBehavedClone for Rc {} +unsafe impl WellBehavedClone for Weak {} +unsafe impl WellBehavedClone for Option where T: WellBehavedClone {} diff --git a/src/vendor/kuchiki/src/node_data_ref.rs b/src/vendor/kuchiki/src/node_data_ref.rs new file mode 100644 index 0000000000..29913b82a6 --- /dev/null +++ b/src/vendor/kuchiki/src/node_data_ref.rs @@ -0,0 +1,89 @@ +use std::cell::RefCell; +use std::fmt; +use std::ops::Deref; +use tree::{Node, NodeRef, ElementData, Doctype, DocumentData}; + +impl NodeRef { + /// If this node is an element, return a strong reference to element-specific data. + #[inline] + pub fn into_element_ref(self) -> Option> { + NodeDataRef::new_opt(self, Node::as_element) + } + + /// If this node is a text node, return a strong reference to its contents. + #[inline] + pub fn into_text_ref(self) -> Option>> { + NodeDataRef::new_opt(self, Node::as_text) + } + + /// If this node is a comment, return a strong reference to its contents. + #[inline] + pub fn into_comment_ref(self) -> Option>> { + NodeDataRef::new_opt(self, Node::as_comment) + } + + /// If this node is a doctype, return a strong reference to doctype-specific data. + #[inline] + pub fn into_doctype_ref(self) -> Option> { + NodeDataRef::new_opt(self, Node::as_doctype) + } + + /// If this node is a document, return a strong reference to document-specific data. + #[inline] + pub fn into_document_ref(self) -> Option> { + NodeDataRef::new_opt(self, Node::as_document) + } +} + + +/// Holds a strong reference to a node, but dereferences to some component inside of it. +pub struct NodeDataRef { + _keep_alive: NodeRef, + _reference: *const T +} + +impl NodeDataRef { + /// Create a `NodeDataRef` for a component in a given node. + #[inline] + pub fn new(rc: NodeRef, f: F) -> NodeDataRef where F: FnOnce(&Node) -> &T { + NodeDataRef { + _reference: f(&*rc), + _keep_alive: rc, + } + } + + /// Create a `NodeDataRef` for and a component that may or may not be in a given node. + #[inline] + pub fn new_opt(rc: NodeRef, f: F) -> Option> + where F: FnOnce(&Node) -> Option<&T> { + f(&*rc).map(|r| r as *const T).map(move |r| NodeDataRef { + _reference: r, + _keep_alive: rc, + }) + } + + /// Access the corresponding node. + #[inline] + pub fn as_node(&self) -> &NodeRef { + &self._keep_alive + } +} + +impl Deref for NodeDataRef { + type Target = T; + #[inline] fn deref(&self) -> &T { unsafe { &*self._reference } } +} + +impl fmt::Debug for NodeDataRef { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fmt::Debug::fmt(&**self, f) + } +} + +impl NodeDataRef { + /// Return the concatenation of all text nodes in this subtree. + pub fn text_contents(&self) -> String { + self.as_node().text_contents() + } +} diff --git a/src/vendor/kuchiki/src/parser.rs b/src/vendor/kuchiki/src/parser.rs new file mode 100644 index 0000000000..1e97febb85 --- /dev/null +++ b/src/vendor/kuchiki/src/parser.rs @@ -0,0 +1,170 @@ +use std::borrow::Cow; +use html5ever::{self, Attribute, QualName, ExpandedName}; +use html5ever::tendril::StrTendril; +use html5ever::tree_builder::{TreeSink, NodeOrText, QuirksMode, ElementFlags}; + +use tree::NodeRef; + +/// Options for the HTML parser. +#[derive(Default)] +pub struct ParseOpts { + /// Options for the HTML tokenizer. + pub tokenizer: html5ever::tokenizer::TokenizerOpts, + + /// Options for the HTML tree builder. + pub tree_builder: html5ever::tree_builder::TreeBuilderOpts, + + /// A callback for HTML parse errors (which are never fatal). + pub on_parse_error: Option)>>, +} + +/// Parse an HTML document with html5ever and the default configuration. +pub fn parse_html() -> html5ever::Parser { + parse_html_with_options(ParseOpts::default()) +} + +/// Parse an HTML document with html5ever with custom configuration. +pub fn parse_html_with_options(opts: ParseOpts) -> html5ever::Parser { + let sink = Sink { + document_node: NodeRef::new_document(), + on_parse_error: opts.on_parse_error, + }; + let html5opts = html5ever::ParseOpts { + tokenizer: opts.tokenizer, + tree_builder: opts.tree_builder, + }; + html5ever::parse_document(sink, html5opts) +} + +pub struct Sink { + document_node: NodeRef, + on_parse_error: Option)>>, +} + +impl TreeSink for Sink { + type Output = NodeRef; + + fn finish(self) -> NodeRef { self.document_node } + + type Handle = NodeRef; + + #[inline] + fn parse_error(&mut self, message: Cow<'static, str>) { + if let Some(ref mut handler) = self.on_parse_error { + handler(message) + } + } + + #[inline] + fn get_document(&mut self) -> NodeRef { + self.document_node.clone() + } + + #[inline] + fn set_quirks_mode(&mut self, mode: QuirksMode) { + self.document_node.as_document().unwrap()._quirks_mode.set(mode) + } + + #[inline] + fn same_node(&self, x: &NodeRef, y: &NodeRef) -> bool { + x == y + } + + #[inline] + fn elem_name<'a>(&self, target: &'a NodeRef) -> ExpandedName<'a> { + target.as_element().unwrap().name.expanded() + } + + #[inline] + fn create_element(&mut self, name: QualName, attrs: Vec, _flags: ElementFlags) + -> NodeRef { + let attrs = attrs.into_iter().map(|Attribute { name, value }| (name, value.into())); + NodeRef::new_element(name, attrs) + } + + #[inline] + fn create_comment(&mut self, text: StrTendril) -> NodeRef { + NodeRef::new_comment(text) + } + + #[inline] + fn create_pi(&mut self, target: StrTendril, data: StrTendril) -> NodeRef { + NodeRef::new_processing_instruction(target, data) + } + + #[inline] + fn has_parent_node(&self, node: &NodeRef) -> bool { + node.parent().is_some() + } + + #[inline] + fn append(&mut self, parent: &NodeRef, child: NodeOrText) { + match child { + NodeOrText::AppendNode(node) => parent.append(node), + NodeOrText::AppendText(text) => { + if let Some(last_child) = parent.last_child() { + if let Some(existing) = last_child.as_text() { + existing.borrow_mut().push_str(&text); + return + } + } + parent.append(NodeRef::new_text(text)) + } + } + } + + #[inline] + fn append_before_sibling(&mut self, sibling: &NodeRef, child: NodeOrText) { + match child { + NodeOrText::AppendNode(node) => sibling.insert_before(node), + NodeOrText::AppendText(text) => { + if let Some(previous_sibling) = sibling.previous_sibling() { + if let Some(existing) = previous_sibling.as_text() { + existing.borrow_mut().push_str(&text); + return + } + } + sibling.insert_before(NodeRef::new_text(text)) + } + } + } + + #[inline] + fn append_doctype_to_document(&mut self, name: StrTendril, public_id: StrTendril, + system_id: StrTendril) { + self.document_node.append(NodeRef::new_doctype(name, public_id, system_id)) + } + + #[inline] + fn add_attrs_if_missing(&mut self, target: &NodeRef, attrs: Vec) { + let element = target.as_element().unwrap(); + let mut attributes = element.attributes.borrow_mut(); + for Attribute { name, value } in attrs { + attributes.map.entry(name).or_insert_with(|| value.into()); + } + } + + #[inline] + fn remove_from_parent(&mut self, target: &NodeRef) { + target.detach() + } + + #[inline] + fn reparent_children(&mut self, node: &NodeRef, new_parent: &NodeRef) { + // FIXME: Can this be done more effciently in rctree, + // by moving the whole linked list of children at once? + for child in node.children() { + new_parent.append(child) + } + } + + #[inline] + fn mark_script_already_started(&mut self, _node: &NodeRef) { + // FIXME: Is this useful outside of a browser? + } + + #[inline] + fn get_template_contents(&mut self, target: &NodeRef) -> NodeRef { + target.as_element().unwrap().template_contents.clone().unwrap() + } +} diff --git a/src/vendor/kuchiki/src/select.rs b/src/vendor/kuchiki/src/select.rs new file mode 100644 index 0000000000..3a67c362a3 --- /dev/null +++ b/src/vendor/kuchiki/src/select.rs @@ -0,0 +1,302 @@ +use cssparser::{self, ToCss}; +use iter::{NodeIterator, Select}; +use node_data_ref::NodeDataRef; +use selectors::{self, matching}; +use selectors::attr::{AttrSelectorOperation, NamespaceConstraint}; +use selectors::parser::{SelectorImpl, Parser, SelectorList, Selector as GenericSelector}; +use std::ascii::AsciiExt; +use std::borrow::Cow; +use std::fmt; +use html5ever::{LocalName, Namespace}; +use tree::{NodeRef, NodeData, ElementData}; + +/// The definition of whitespace per CSS Selectors Level 3 § 4. +/// +/// Copied from rust-selectors. +static SELECTOR_WHITESPACE: &'static [char] = &[' ', '\t', '\n', '\r', '\x0C']; + +#[derive(Debug, Clone)] +pub struct KuchikiSelectors; + +impl SelectorImpl for KuchikiSelectors { + type AttrValue = String; + type Identifier = LocalName; + type ClassName = LocalName; + type LocalName = LocalName; + type NamespacePrefix = LocalName; + type NamespaceUrl = Namespace; + type BorrowedNamespaceUrl = Namespace; + type BorrowedLocalName = LocalName; + + type NonTSPseudoClass = PseudoClass; + type PseudoElement = PseudoElement; +} + +struct KuchikiParser; + +impl Parser for KuchikiParser { + type Impl = KuchikiSelectors; + + fn parse_non_ts_pseudo_class(&self, name: Cow) -> Result { + use self::PseudoClass::*; + if name.eq_ignore_ascii_case("any-link") { Ok(AnyLink) } + else if name.eq_ignore_ascii_case("link") { Ok(Link) } + else if name.eq_ignore_ascii_case("visited") { Ok(Visited) } + else if name.eq_ignore_ascii_case("active") { Ok(Active) } + else if name.eq_ignore_ascii_case("focus") { Ok(Focus) } + else if name.eq_ignore_ascii_case("hover") { Ok(Hover) } + else if name.eq_ignore_ascii_case("enabled") { Ok(Enabled) } + else if name.eq_ignore_ascii_case("disabled") { Ok(Disabled) } + else if name.eq_ignore_ascii_case("checked") { Ok(Checked) } + else if name.eq_ignore_ascii_case("indeterminate") { Ok(Indeterminate) } + else { Err(()) } + } +} + +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +pub enum PseudoClass { + AnyLink, + Link, + Visited, + Active, + Focus, + Hover, + Enabled, + Disabled, + Checked, + Indeterminate, +} + +impl ToCss for PseudoClass { + fn to_css(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { + dest.write_str(match *self { + PseudoClass::AnyLink => ":any-link", + PseudoClass::Link => ":link", + PseudoClass::Visited => ":visited", + PseudoClass::Active => ":active", + PseudoClass::Focus => ":focus", + PseudoClass::Hover => ":hover", + PseudoClass::Enabled => ":enabled", + PseudoClass::Disabled => ":disabled", + PseudoClass::Checked => ":checked", + PseudoClass::Indeterminate => ":indeterminate", + }) + } +} + +impl selectors::parser::SelectorMethods for PseudoClass { + type Impl = KuchikiSelectors; + + fn visit(&self, _visitor: &mut V) -> bool + where V: selectors::visitor::SelectorVisitor { + true + } +} + +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +pub enum PseudoElement {} + +impl ToCss for PseudoElement { + fn to_css(&self, _dest: &mut W) -> fmt::Result where W: fmt::Write { + match *self { + } + } +} + +impl selectors::parser::PseudoElement for PseudoElement { + type Impl = KuchikiSelectors; +} + +impl selectors::Element for NodeDataRef { + type Impl = KuchikiSelectors; + + #[inline] + fn parent_element(&self) -> Option { + self.as_node().parent().and_then(NodeRef::into_element_ref) + } + #[inline] + fn first_child_element(&self) -> Option { + self.as_node().children().elements().next() + } + #[inline] + fn last_child_element(&self) -> Option { + self.as_node().children().rev().elements().next() + } + #[inline] + fn prev_sibling_element(&self) -> Option { + self.as_node().preceding_siblings().elements().next() + } + #[inline] + fn next_sibling_element(&self) -> Option { + self.as_node().following_siblings().elements().next() + } + #[inline] + fn is_empty(&self) -> bool { + self.as_node().children().all(|child| match *child.data() { + NodeData::Element(_) => false, + NodeData::Text(ref text) => text.borrow().is_empty(), + _ => true, + }) + } + #[inline] + fn is_root(&self) -> bool { + match self.as_node().parent() { + None => false, + Some(parent) => matches!(*parent.data(), NodeData::Document(_)) + } + } + + #[inline] + fn is_html_element_in_html_document(&self) -> bool { + // FIXME: Have a notion of HTML document v.s. XML document? + self.name.ns == ns!(html) + } + #[inline] fn get_local_name<'a>(&'a self) -> &'a LocalName { &self.name.local } + #[inline] fn get_namespace<'a>(&'a self) -> &'a Namespace { &self.name.ns } + #[inline] + fn get_id(&self) -> Option { + self.attributes.borrow().get(local_name!("id")).map(LocalName::from) + } + #[inline] + fn has_class(&self, name: &LocalName) -> bool { + !name.is_empty() && + if let Some(class_attr) = self.attributes.borrow().get(local_name!("class")) { + class_attr.split(SELECTOR_WHITESPACE) + .any(|class| &**name == class ) + } else { + false + } + } + + #[inline] + fn attr_matches(&self, + ns: &NamespaceConstraint<&Namespace>, + local_name: &LocalName, + operation: &AttrSelectorOperation<&String>) + -> bool { + self.attributes.borrow().map.iter().any(|(key, value)| { + !matches!(*ns, NamespaceConstraint::Specific(url) if *url != key.ns) && + key.local == *local_name && + operation.eval_str(value) + }) + } + + fn match_pseudo_element(&self, + pseudo: &PseudoElement, + _context: &mut matching::MatchingContext) + -> bool { + match *pseudo {} + } + + fn match_non_ts_pseudo_class(&self, + pseudo: &PseudoClass, + _context: &mut matching::MatchingContext, + _flags_setter: &mut F) -> bool + where F: FnMut(&Self, matching::ElementSelectorFlags) + { + use self::PseudoClass::*; + match *pseudo { + Active | Focus | Hover | Enabled | Disabled | Checked | Indeterminate | Visited => false, + AnyLink | Link => { + self.name.ns == ns!(html) && + matches!(self.name.local, local_name!("a") | local_name!("area") | local_name!("link")) && + self.attributes.borrow().contains(local_name!("href")) + } + } + } +} + +/// A pre-compiled list of CSS Selectors. +pub struct Selectors(pub Vec); + +/// A pre-compiled CSS Selector. +pub struct Selector(GenericSelector); + +/// The specificity of a selector. +/// +/// Opaque, but ordered. +/// +/// Determines precedence in the cascading algorithm. +/// When equal, a rule later in source order takes precedence. +#[derive(Copy, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct Specificity(u32); + +impl Selectors { + /// Compile a list of selectors. This may fail on syntax errors or unsupported selectors. + #[inline] + pub fn compile(s: &str) -> Result { + SelectorList::parse(&KuchikiParser, &mut cssparser::Parser::new(s)).map(|list| { + Selectors(list.0.into_iter().map(Selector).collect()) + }) + } + + /// Returns whether the given element matches this list of selectors. + #[inline] + pub fn matches(&self, element: &NodeDataRef) -> bool { + self.0.iter().any(|s| s.matches(element)) + } + + /// Filter an element iterator, yielding those matching this list of selectors. + #[inline] + pub fn filter(&self, iter: I) -> Select + where I: Iterator> { + Select { + iter: iter, + selectors: self, + } + } +} + +impl Selector { + /// Returns whether the given element matches this selector. + #[inline] + pub fn matches(&self, element: &NodeDataRef) -> bool { + let mut context = matching::MatchingContext::new(matching::MatchingMode::Normal, None); + matching::matches_selector(&self.0.inner, element, &mut context, &mut |_, _| {}) + } + + /// Return the specificity of this selector. + pub fn specificity(&self) -> Specificity { + Specificity(self.0.specificity()) + } +} + +impl ::std::str::FromStr for Selectors { + type Err = (); + #[inline] + fn from_str(s: &str) -> Result { + Selectors::compile(s) + } +} + +impl fmt::Display for Selector { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.to_css(f) + } +} + +impl fmt::Display for Selectors { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut iter = self.0.iter(); + let first = iter.next() + .expect("Empty Selectors, should contain at least one selector"); + try!(first.0.to_css(f)); + for selector in iter { + try!(f.write_str(", ")); + try!(selector.0.to_css(f)); + } + Ok(()) + } +} + +impl fmt::Debug for Selector { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Debug for Selectors { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} diff --git a/src/vendor/kuchiki/src/serializer.rs b/src/vendor/kuchiki/src/serializer.rs new file mode 100644 index 0000000000..f89ce1be4d --- /dev/null +++ b/src/vendor/kuchiki/src/serializer.rs @@ -0,0 +1,79 @@ +use std::fs::File; +use std::io::{Write, Result}; +use std::path::Path; +use std::string::ToString; +use html5ever::serialize::{Serialize, Serializer, TraversalScope, serialize, SerializeOpts}; +use html5ever::serialize::TraversalScope::*; + +use tree::{NodeRef, NodeData}; + + +impl Serialize for NodeRef { + fn serialize(&self, serializer: &mut S, + traversal_scope: TraversalScope) -> Result<()> { + match (traversal_scope, self.data()) { + (_, &NodeData::Element(ref element)) => { + if traversal_scope == IncludeNode { + try!(serializer.start_elem( + element.name.clone(), + element.attributes.borrow().map.iter().map(|(name, value)| (name, &**value)))); + } + + for child in self.children() { + try!(Serialize::serialize(&child, serializer, IncludeNode)); + } + + if traversal_scope == IncludeNode { + try!(serializer.end_elem(element.name.clone())); + } + Ok(()) + } + + (_, &NodeData::DocumentFragment) | + (_, &NodeData::Document(_)) => { + for child in self.children() { + try!(Serialize::serialize(&child, serializer, IncludeNode)); + } + Ok(()) + } + + (ChildrenOnly, _) => Ok(()), + + (IncludeNode, &NodeData::Doctype(ref doctype)) => serializer.write_doctype(&doctype.name), + (IncludeNode, &NodeData::Text(ref text)) => serializer.write_text(&text.borrow()), + (IncludeNode, &NodeData::Comment(ref text)) => serializer.write_comment(&text.borrow()), + (IncludeNode, &NodeData::ProcessingInstruction(ref contents)) => { + let contents = contents.borrow(); + serializer.write_processing_instruction(&contents.0, &contents.1) + } + } + } +} + + +impl ToString for NodeRef { + #[inline] + fn to_string(&self) -> String { + let mut u8_vec = Vec::new(); + self.serialize(&mut u8_vec).unwrap(); + String::from_utf8(u8_vec).unwrap() + } +} + +impl NodeRef { + /// Serialize this node and its descendants in HTML syntax to the given stream. + #[inline] + pub fn serialize(&self, writer: &mut W) -> Result<()> { + serialize(writer, self, SerializeOpts { + traversal_scope: IncludeNode, + ..Default::default() + }) + } + + /// Serialize this node and its descendants in HTML syntax to a new file at the given path. + #[inline] + pub fn serialize_to_file>(&self, path: P) -> Result<()>{ + let mut file = try!(File::create(&path)); + self.serialize(&mut file) + } +} diff --git a/src/vendor/kuchiki/src/tests.rs b/src/vendor/kuchiki/src/tests.rs new file mode 100644 index 0000000000..b6fd0c51a1 --- /dev/null +++ b/src/vendor/kuchiki/src/tests.rs @@ -0,0 +1,119 @@ +use html5ever::tree_builder::QuirksMode; +use std::path::Path; + +use tempdir::TempDir; + +use parser::parse_html; +use select::*; +use traits::*; + +#[test] +fn text_nodes() { + let html = r" + +Test case +

Content contains Important data

"; + let document = parse_html().one(html); + let paragraph = document.select("p").unwrap().collect::>(); + assert_eq!(paragraph.len(), 1); + assert_eq!(paragraph[0].text_contents(), "Content contains Important data"); + let texts = paragraph[0].as_node().descendants().text_nodes().collect::>(); + assert_eq!(texts.len(), 3); + assert_eq!(&*texts[0].borrow(), "Content contains "); + assert_eq!(&*texts[1].borrow(), "Important"); + assert_eq!(&*texts[2].borrow(), " data"); + { + let mut x = texts[0].borrow_mut(); + &x.truncate(0); + &x.push_str("Content doesn't contain "); + } + assert_eq!(&*texts[0].borrow(), "Content doesn't contain "); +} + +#[test] +fn parse_and_serialize() { + let html = r" + +Test case +

Content"; + let document = parse_html().one(html); + assert_eq!(document.as_document().unwrap().quirks_mode(), QuirksMode::NoQuirks); + assert_eq!(document.to_string(), r"Test case +

Content

"); +} + +#[test] +fn parse_file() { + let mut path = Path::new(env!("CARGO_MANIFEST_DIR")).to_path_buf(); + path.push("test_data".to_string()); + path.push("foo.html"); + + let html = r" + Test case + + +

Foo

+ + +"; + let document = parse_html().from_utf8().from_file(&path).unwrap(); + assert_eq!(document.to_string(), html); +} + +#[test] +fn serialize_and_read_file() { + let tempdir = TempDir::new("test_rm_tempdir").unwrap(); + let mut path = tempdir.path().to_path_buf(); + path.push("temp.html"); + + let html = r"TitleBody"; + let document = parse_html().one(html); + let _ = document.serialize_to_file(path.clone()); + + let document2 = parse_html().from_utf8().from_file(&path).unwrap(); + assert_eq!(document.to_string(), document2.to_string()); +} + +#[test] +fn select() { + let html = r" +Test case +

Foo +

Bar +

Foo +"; + + let document = parse_html().one(html); + let matching = document.select("p.foo").unwrap().collect::>(); + assert_eq!(matching.len(), 2); + let child = matching[0].as_node().first_child().unwrap(); + assert_eq!(&**child.as_text().unwrap().borrow(), "Foo\n"); + assert_eq!(matching[0].attributes.borrow().get("class"), Some("foo")); + assert_eq!(matching[0].attributes.borrow().get(local_name!("class")), Some("foo")); +} + +#[test] +fn to_string() { + let html = r" + + + Test case + + +

Foo + +"; + + let document = parse_html().one(html); + assert_eq!(document.inclusive_descendants().nth(11).unwrap().to_string(), "

Foo\n \n

"); +} + +#[test] +fn specificity() { + let selectors = Selectors::compile(".example, :first-child, div").unwrap(); + let specificities = selectors.0.iter().map(|s| s.specificity()).collect::>(); + assert_eq!(specificities.len(), 3); + assert!(specificities[0] == specificities[1]); + assert!(specificities[0] > specificities[2]); + assert!(specificities[1] > specificities[2]); +} diff --git a/src/vendor/kuchiki/src/tree.rs b/src/vendor/kuchiki/src/tree.rs new file mode 100644 index 0000000000..cd0cdbb8d5 --- /dev/null +++ b/src/vendor/kuchiki/src/tree.rs @@ -0,0 +1,460 @@ +use move_cell::MoveCell; +use std::cell::{Cell, RefCell}; +use std::fmt; +use std::ops::Deref; +use std::rc::{Rc, Weak}; +use html5ever::tree_builder::QuirksMode; +use html5ever::{QualName, ExpandedName}; + +use attributes::Attributes; +use iter::NodeIterator; + + +/// Node data specific to the node type. +#[derive(Debug, PartialEq, Clone)] +pub enum NodeData { + /// Element node + Element(ElementData), + + /// Text node + Text(RefCell), + + /// Comment node + Comment(RefCell), + + /// Processing instruction node + ProcessingInstruction(RefCell<(String, String)>), + + /// Doctype node + Doctype(Doctype), + + /// Document node + Document(DocumentData), + + /// Document fragment node + DocumentFragment, +} + +/// Data specific to doctype nodes. +#[derive(Debug, PartialEq, Clone)] +pub struct Doctype { + /// The name of the doctype + pub name: String, + + /// The public ID of the doctype + pub public_id: String, + + /// The system ID of the doctype + pub system_id: String, +} + +/// Data specific to element nodes. +#[derive(Debug, PartialEq, Clone)] +pub struct ElementData { + /// The namespace and local name of the element, such as `ns!(html)` and `body`. + pub name: QualName, + + /// The attributes of the elements. + pub attributes: RefCell, + + /// If the element is an HTML `

(&mut self, p: P) -> &mut Build + where P: IntoIterator, + P::Item: AsRef { + for file in p.into_iter() { + self.file(file); + } + self + } + /// Set C++ support. /// /// The other `cpp_*` options will only become active if this is set to /// `true`. - pub fn cpp(&mut self, cpp: bool) -> &mut Config { + pub fn cpp(&mut self, cpp: bool) -> &mut Build { self.cpp = cpp; self } + /// Set warnings into errors flag. + /// + /// Disabled by default. + /// + /// Warning: turning warnings into errors only make sense + /// if you are a developer of the crate using cc-rs. + /// Some warnings only appear on some architecture or + /// specific version of the compiler. Any user of this crate, + /// or any other crate depending on it, could fail during + /// compile time. + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .warnings_into_errors(true) + /// .compile("libfoo.a"); + /// ``` + pub fn warnings_into_errors(&mut self, warnings_into_errors: bool) -> &mut Build { + self.warnings_into_errors = warnings_into_errors; + self + } + + /// Set warnings flags. + /// + /// Adds some flags: + /// - "/Wall" for MSVC. + /// - "-Wall", "-Wextra" for GNU and Clang. + /// + /// Enabled by default. + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .warnings(false) + /// .compile("libfoo.a"); + /// ``` + pub fn warnings(&mut self, warnings: bool) -> &mut Build { + self.warnings = warnings; + self + } + /// Set the standard library to link against when compiling with C++ /// support. /// @@ -274,8 +483,22 @@ impl Config { /// otherwise cargo will link against the specified library. /// /// The given library name must not contain the `lib` prefix. - pub fn cpp_link_stdlib(&mut self, cpp_link_stdlib: Option<&str>) -> &mut Config { - self.cpp_link_stdlib = Some(cpp_link_stdlib.map(|s| s.into())); + /// + /// Common values: + /// - `stdc++` for GNU + /// - `c++` for Clang + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .shared_flag(true) + /// .cpp_link_stdlib("stdc++") + /// .compile("libfoo.so"); + /// ``` + pub fn cpp_link_stdlib<'a, V: Into>>(&mut self, cpp_link_stdlib: V) -> &mut Build { + self.cpp_link_stdlib = Some(cpp_link_stdlib.into().map(|s| s.into())); self } @@ -299,7 +522,21 @@ impl Config { /// be used, otherwise `-stdlib` is added to the compile invocation. /// /// The given library name must not contain the `lib` prefix. - pub fn cpp_set_stdlib(&mut self, cpp_set_stdlib: Option<&str>) -> &mut Config { + /// + /// Common values: + /// - `stdc++` for GNU + /// - `c++` for Clang + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .cpp_set_stdlib("c++") + /// .compile("libfoo.a"); + /// ``` + pub fn cpp_set_stdlib<'a, V: Into>>(&mut self, cpp_set_stdlib: V) -> &mut Build { + let cpp_set_stdlib = cpp_set_stdlib.into(); self.cpp_set_stdlib = cpp_set_stdlib.map(|s| s.into()); self.cpp_link_stdlib(cpp_set_stdlib); self @@ -309,7 +546,16 @@ impl Config { /// /// This option is automatically scraped from the `TARGET` environment /// variable by build scripts, so it's not required to call this function. - pub fn target(&mut self, target: &str) -> &mut Config { + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .target("aarch64-linux-android") + /// .compile("foo"); + /// ``` + pub fn target(&mut self, target: &str) -> &mut Build { self.target = Some(target.to_string()); self } @@ -318,7 +564,16 @@ impl Config { /// /// This option is automatically scraped from the `HOST` environment /// variable by build scripts, so it's not required to call this function. - pub fn host(&mut self, host: &str) -> &mut Config { + /// + /// # Example + /// + /// ```no_run + /// cc::Build::new() + /// .file("src/foo.c") + /// .host("arm-linux-gnueabihf") + /// .compile("foo"); + /// ``` + pub fn host(&mut self, host: &str) -> &mut Build { self.host = Some(host.to_string()); self } @@ -327,7 +582,7 @@ impl Config { /// /// This option is automatically scraped from the `OPT_LEVEL` environment /// variable by build scripts, so it's not required to call this function. - pub fn opt_level(&mut self, opt_level: u32) -> &mut Config { + pub fn opt_level(&mut self, opt_level: u32) -> &mut Build { self.opt_level = Some(opt_level.to_string()); self } @@ -336,7 +591,7 @@ impl Config { /// /// This option is automatically scraped from the `OPT_LEVEL` environment /// variable by build scripts, so it's not required to call this function. - pub fn opt_level_str(&mut self, opt_level: &str) -> &mut Config { + pub fn opt_level_str(&mut self, opt_level: &str) -> &mut Build { self.opt_level = Some(opt_level.to_string()); self } @@ -347,7 +602,7 @@ impl Config { /// This option is automatically scraped from the `PROFILE` environment /// variable by build scripts (only enabled when the profile is "debug"), so /// it's not required to call this function. - pub fn debug(&mut self, debug: bool) -> &mut Config { + pub fn debug(&mut self, debug: bool) -> &mut Build { self.debug = Some(debug); self } @@ -357,7 +612,7 @@ impl Config { /// /// This option is automatically scraped from the `OUT_DIR` environment /// variable by build scripts, so it's not required to call this function. - pub fn out_dir>(&mut self, out_dir: P) -> &mut Config { + pub fn out_dir>(&mut self, out_dir: P) -> &mut Build { self.out_dir = Some(out_dir.as_ref().to_owned()); self } @@ -367,7 +622,7 @@ impl Config { /// This option is automatically determined from the target platform or a /// number of environment variables, so it's not required to call this /// function. - pub fn compiler>(&mut self, compiler: P) -> &mut Config { + pub fn compiler>(&mut self, compiler: P) -> &mut Build { self.compiler = Some(compiler.as_ref().to_owned()); self } @@ -377,13 +632,21 @@ impl Config { /// This option is automatically determined from the target platform or a /// number of environment variables, so it's not required to call this /// function. - pub fn archiver>(&mut self, archiver: P) -> &mut Config { + pub fn archiver>(&mut self, archiver: P) -> &mut Build { self.archiver = Some(archiver.as_ref().to_owned()); self } /// Define whether metadata should be emitted for cargo allowing it to /// automatically link the binary. Defaults to `true`. - pub fn cargo_metadata(&mut self, cargo_metadata: bool) -> &mut Config { + /// + /// The emitted metadata is: + /// + /// - `rustc-link-lib=static=`*compiled lib* + /// - `rustc-link-search=native=`*target folder* + /// - When target is MSVC, the ATL-MFC libs are added via `rustc-link-search=native=` + /// - When C++ is enabled, the C++ stdlib is added via `rustc-link-lib` + /// + pub fn cargo_metadata(&mut self, cargo_metadata: bool) -> &mut Build { self.cargo_metadata = cargo_metadata; self } @@ -392,7 +655,7 @@ impl Config { /// /// This option defaults to `false` for `windows-gnu` targets and /// to `true` for all other targets. - pub fn pic(&mut self, pic: bool) -> &mut Config { + pub fn pic(&mut self, pic: bool) -> &mut Build { self.pic = Some(pic); self } @@ -400,14 +663,13 @@ impl Config { /// Configures whether the /MT flag or the /MD flag will be passed to msvc build tools. /// /// This option defaults to `false`, and affect only msvc targets. - pub fn static_crt(&mut self, static_crt: bool) -> &mut Config { + pub fn static_crt(&mut self, static_crt: bool) -> &mut Build { self.static_crt = Some(static_crt); self } - #[doc(hidden)] - pub fn __set_env(&mut self, a: A, b: B) -> &mut Config + pub fn __set_env(&mut self, a: A, b: B) -> &mut Build where A: AsRef, B: AsRef { @@ -417,31 +679,42 @@ impl Config { /// Run the compiler, generating the file `output` /// - /// The name `output` must begin with `lib` and end with `.a` - pub fn compile(&self, output: &str) { - assert!(output.starts_with("lib")); - assert!(output.ends_with(".a")); - let lib_name = &output[3..output.len() - 2]; - let dst = self.get_out_dir(); + /// This will return a result instead of panicing; see compile() for the complete description. + pub fn try_compile(&self, output: &str) -> Result<(), Error> { + let (lib_name, gnu_lib_name) = if output.starts_with("lib") && output.ends_with(".a") { + (&output[3..output.len() - 2], output.to_owned()) + } else { + let mut gnu = String::with_capacity(5 + output.len()); + gnu.push_str("lib"); + gnu.push_str(&output); + gnu.push_str(".a"); + (output, gnu) + }; + let dst = self.get_out_dir()?; let mut objects = Vec::new(); let mut src_dst = Vec::new(); for file in self.files.iter() { let obj = dst.join(file).with_extension("o"); let obj = if !obj.starts_with(&dst) { - dst.join(obj.file_name().unwrap()) + dst.join(obj.file_name().ok_or_else(|| Error::new(ErrorKind::IOError, "Getting object file details failed."))?) } else { obj }; - fs::create_dir_all(&obj.parent().unwrap()).unwrap(); + + match obj.parent() { + Some(s) => fs::create_dir_all(s)?, + None => return Err(Error::new(ErrorKind::IOError, "Getting object file details failed.")), + }; + src_dst.push((file.to_path_buf(), obj.clone())); objects.push(obj); } - self.compile_objects(&src_dst); - self.assemble(lib_name, &dst.join(output), &objects); + self.compile_objects(&src_dst)?; + self.assemble(lib_name, &dst.join(gnu_lib_name), &objects)?; - if self.get_target().contains("msvc") { - let compiler = self.get_base_compiler(); + if self.get_target()?.contains("msvc") { + let compiler = self.get_base_compiler()?; let atlmfc_lib = compiler.env() .iter() .find(|&&(ref var, _)| var.as_os_str() == OsStr::new("LIB")) @@ -457,20 +730,39 @@ impl Config { } } - self.print(&format!("cargo:rustc-link-lib=static={}", - &output[3..output.len() - 2])); + self.print(&format!("cargo:rustc-link-lib=static={}", lib_name)); self.print(&format!("cargo:rustc-link-search=native={}", dst.display())); // Add specific C++ libraries, if enabled. if self.cpp { - if let Some(stdlib) = self.get_cpp_link_stdlib() { + if let Some(stdlib) = self.get_cpp_link_stdlib()? { self.print(&format!("cargo:rustc-link-lib={}", stdlib)); } } + + Ok(()) + } + + /// Run the compiler, generating the file `output` + /// + /// The name `output` should be the name of the library. For backwards compatibility, + /// the `output` may start with `lib` and end with `.a`. The Rust compilier will create + /// the assembly with the lib prefix and .a extension. MSVC will create a file without prefix, + /// ending with `.lib`. + /// + /// # Panics + /// + /// Panics if `output` is not formatted correctly or if one of the underlying + /// compiler commands fails. It can also panic if it fails reading file names + /// or creating directories. + pub fn compile(&self, output: &str) { + if let Err(e) = self.try_compile(output) { + fail(&e.message); + } } #[cfg(feature = "parallel")] - fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) { + fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) -> Result<(), Error> { use self::rayon::prelude::*; let mut cfg = rayon::Configuration::new(); @@ -481,24 +773,36 @@ impl Config { } drop(rayon::initialize(cfg)); + let results: Mutex>> = Mutex::new(Vec::new()); + objs.par_iter().with_max_len(1) - .for_each(|&(ref src, ref dst)| self.compile_object(src, dst)); + .for_each(|&(ref src, ref dst)| results.lock().unwrap().push(self.compile_object(src, dst))); + + // Check for any errors and return the first one found. + for result in results.into_inner().unwrap().iter() { + if result.is_err() { + return result.clone(); + } + } + + Ok(()) } #[cfg(not(feature = "parallel"))] - fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) { + fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) -> Result<(), Error> { for &(ref src, ref dst) in objs { - self.compile_object(src, dst); + self.compile_object(src, dst)?; } + Ok(()) } - fn compile_object(&self, file: &Path, dst: &Path) { + fn compile_object(&self, file: &Path, dst: &Path) -> Result<(), Error> { let is_asm = file.extension().and_then(|s| s.to_str()) == Some("asm"); - let msvc = self.get_target().contains("msvc"); + let msvc = self.get_target()?.contains("msvc"); let (mut cmd, name) = if msvc && is_asm { - self.msvc_macro_assembler() + self.msvc_macro_assembler()? } else { - let compiler = self.get_compiler(); + let compiler = self.try_get_compiler()?; let mut cmd = compiler.to_command(); for &(ref a, ref b) in self.env.iter() { cmd.env(a, b); @@ -506,46 +810,60 @@ impl Config { (cmd, compiler.path .file_name() - .unwrap() + .ok_or_else(|| Error::new(ErrorKind::IOError, "Failed to get compiler path."))? .to_string_lossy() .into_owned()) }; - if msvc && is_asm { - cmd.arg("/Fo").arg(dst); - } else if msvc { - let mut s = OsString::from("/Fo"); - s.push(&dst); - cmd.arg(s); - } else { - cmd.arg("-o").arg(&dst); - } + command_add_output_file(&mut cmd, dst, msvc, is_asm); cmd.arg(if msvc { "/c" } else { "-c" }); cmd.arg(file); - run(&mut cmd, &name); + run(&mut cmd, &name)?; + Ok(()) } - /// Run the compiler, returning the macro-expanded version of the input files. - /// - /// This is only relevant for C and C++ files. - pub fn expand(&self) -> Vec { - let compiler = self.get_compiler(); + /// This will return a result instead of panicing; see expand() for the complete description. + pub fn try_expand(&self) -> Result, Error> { + let compiler = self.try_get_compiler()?; let mut cmd = compiler.to_command(); for &(ref a, ref b) in self.env.iter() { cmd.env(a, b); } cmd.arg(compiler.family.expand_flag()); + + assert!(self.files.len() <= 1, + "Expand may only be called for a single file"); + for file in self.files.iter() { cmd.arg(file); } let name = compiler.path .file_name() - .unwrap() + .ok_or_else(|| Error::new(ErrorKind::IOError, "Failed to get compiler path."))? .to_string_lossy() .into_owned(); - run_output(&mut cmd, &name) + Ok(run_output(&mut cmd, &name)?) + } + + /// Run the compiler, returning the macro-expanded version of the input files. + /// + /// This is only relevant for C and C++ files. + /// + /// # Panics + /// Panics if more than one file is present in the config, or if compiler + /// path has an invalid file name. + /// + /// # Example + /// ```no_run + /// let out = cc::Build::new().file("src/foo.c").expand(); + /// ``` + pub fn expand(&self) -> Vec { + match self.try_expand() { + Err(e) => fail(&e.message), + Ok(v) => v, + } } /// Get the compiler that's in use for this configuration. @@ -562,11 +880,25 @@ impl Config { /// conventions for this path, e.g. looking at the explicitly set compiler, /// environment variables (a number of which are inspected here), and then /// falling back to the default configuration. + /// + /// # Panics + /// + /// Panics if an error occurred while determining the architecture. pub fn get_compiler(&self) -> Tool { - let opt_level = self.get_opt_level(); - let target = self.get_target(); + match self.try_get_compiler() { + Ok(tool) => tool, + Err(e) => fail(&e.message), + } + } - let mut cmd = self.get_base_compiler(); + /// Get the compiler that's in use for this configuration. + /// + /// This will return a result instead of panicing; see get_compiler() for the complete description. + pub fn try_get_compiler(&self) -> Result { + let opt_level = self.get_opt_level()?; + let target = self.get_target()?; + + let mut cmd = self.get_base_compiler()?; let nvcc = cmd.path.file_name() .and_then(|p| p.to_str()).map(|p| p.contains("nvcc")) .unwrap_or(false); @@ -602,7 +934,14 @@ impl Config { } ToolFamily::Gnu | ToolFamily::Clang => { - cmd.args.push(format!("-O{}", opt_level).into()); + // arm-linux-androideabi-gcc 4.8 shipped with Android NDK does + // not support '-Oz' + if opt_level == "z" && cmd.family != ToolFamily::Clang { + cmd.args.push("-Os".into()); + } else { + cmd.args.push(format!("-O{}", opt_level).into()); + } + if !nvcc { cmd.args.push("-ffunction-sections".into()); cmd.args.push("-fdata-sections".into()); @@ -717,7 +1056,7 @@ impl Config { if target.contains("-ios") { // FIXME: potential bug. iOS is always compiled with Clang, but Gcc compiler may be // detected instead. - self.ios_flags(&mut cmd); + self.ios_flags(&mut cmd)?; } if self.static_flag.unwrap_or(false) { @@ -750,6 +1089,12 @@ impl Config { cmd.args.push(flag.into()); } + for flag in self.flags_supported.iter() { + if self.is_flag_supported(flag).unwrap_or(false) { + cmd.args.push(flag.into()); + } + } + for &(ref key, ref value) in self.definitions.iter() { let lead = if let ToolFamily::Msvc = cmd.family {"/"} else {"-"}; if let Some(ref value) = *value { @@ -758,11 +1103,22 @@ impl Config { cmd.args.push(format!("{}D{}", lead, key).into()); } } - cmd + + if self.warnings { + for flag in cmd.family.warnings_flags().iter() { + cmd.args.push(flag.into()); + } + } + + if self.warnings_into_errors { + cmd.args.push(cmd.family.warnings_to_errors_flag().into()); + } + + Ok(cmd) } - fn msvc_macro_assembler(&self) -> (Command, String) { - let target = self.get_target(); + fn msvc_macro_assembler(&self) -> Result<(Command, String), Error> { + let target = self.get_target()?; let tool = if target.contains("x86_64") { "ml64.exe" } else { @@ -787,15 +1143,15 @@ impl Config { cmd.arg(flag); } - (cmd, tool.to_string()) + Ok((cmd, tool.to_string())) } - fn assemble(&self, lib_name: &str, dst: &Path, objects: &[PathBuf]) { + fn assemble(&self, lib_name: &str, dst: &Path, objects: &[PathBuf]) -> Result<(), Error> { // Delete the destination if it exists as the `ar` tool at least on Unix // appends to it, which we don't want. let _ = fs::remove_file(&dst); - let target = self.get_target(); + let target = self.get_target()?; if target.contains("msvc") { let mut cmd = match self.archiver { Some(ref s) => self.cmd(s), @@ -807,46 +1163,52 @@ impl Config { .arg("/nologo") .args(objects) .args(&self.objects), - "lib.exe"); + "lib.exe")?; // The Rust compiler will look for libfoo.a and foo.lib, but the // MSVC linker will also be passed foo.lib, so be sure that both // exist for now. let lib_dst = dst.with_file_name(format!("{}.lib", lib_name)); let _ = fs::remove_file(&lib_dst); - fs::hard_link(&dst, &lib_dst) + match fs::hard_link(&dst, &lib_dst) .or_else(|_| { // if hard-link fails, just copy (ignoring the number of bytes written) fs::copy(&dst, &lib_dst).map(|_| ()) - }) - .expect("Copying from {:?} to {:?} failed.");; + }) { + Ok(_) => (), + Err(_) => return Err(Error::new(ErrorKind::IOError, "Could not copy or create a hard-link to the generated lib file.")), + }; } else { - let ar = self.get_ar(); - let cmd = ar.file_name().unwrap().to_string_lossy(); + let ar = self.get_ar()?; + let cmd = ar.file_name() + .ok_or_else(|| Error::new(ErrorKind::IOError, "Failed to get archiver (ar) path."))? + .to_string_lossy(); run(self.cmd(&ar) .arg("crs") .arg(dst) .args(objects) .args(&self.objects), - &cmd); + &cmd)?; } + + Ok(()) } - fn ios_flags(&self, cmd: &mut Tool) { + fn ios_flags(&self, cmd: &mut Tool) -> Result<(), Error> { enum ArchSpec { Device(&'static str), Simulator(&'static str), } - let target = self.get_target(); - let arch = target.split('-').nth(0).unwrap(); + let target = self.get_target()?; + let arch = target.split('-').nth(0).ok_or_else(|| Error::new(ErrorKind::ArchitectureInvalid, "Unknown architecture for iOS target."))?; let arch = match arch { "arm" | "armv7" | "thumbv7" => ArchSpec::Device("armv7"), "armv7s" | "thumbv7s" => ArchSpec::Device("armv7s"), "arm64" | "aarch64" => ArchSpec::Device("arm64"), "i386" | "i686" => ArchSpec::Simulator("-m32"), "x86_64" => ArchSpec::Simulator("-m64"), - _ => fail("Unknown arch for iOS target"), + _ => return Err(Error::new(ErrorKind::ArchitectureInvalid, "Unknown architecture for iOS target.")), }; let sdk = match arch { @@ -869,14 +1231,18 @@ impl Config { .arg("--sdk") .arg(sdk) .stderr(Stdio::inherit()) - .output() - .unwrap() + .output()? .stdout; - let sdk_path = String::from_utf8(sdk_path).unwrap(); + let sdk_path = match String::from_utf8(sdk_path) { + Ok(p) => p, + Err(_) => return Err(Error::new(ErrorKind::IOError, "Unable to determine iOS SDK path.")), + }; cmd.args.push("-isysroot".into()); cmd.args.push(sdk_path.trim().into()); + + Ok(()) } fn cmd>(&self, prog: P) -> Command { @@ -887,12 +1253,12 @@ impl Config { cmd } - fn get_base_compiler(&self) -> Tool { + fn get_base_compiler(&self) -> Result { if let Some(ref c) = self.compiler { - return Tool::new(c.clone()); + return Ok(Tool::new(c.clone())); } - let host = self.get_host(); - let target = self.get_target(); + let host = self.get_host()?; + let target = self.get_target()?; let (env, msvc, gnu) = if self.cpp { ("CXX", "cl.exe", "g++") } else { @@ -908,7 +1274,7 @@ impl Config { "cc" }; - self.env_tool(env) + let tool_opt: Option = self.env_tool(env) .map(|(tool, args)| { let mut t = Tool::new(PathBuf::from(tool)); for arg in args { @@ -918,28 +1284,29 @@ impl Config { }) .or_else(|| { if target.contains("emscripten") { - //Windows uses bat file so we have to be a bit more specific let tool = if self.cpp { - if cfg!(windows) { - "em++.bat" - } else { - "em++" - } + "em++" } else { - if cfg!(windows) { - "emcc.bat" - } else { - "emcc" - } + "emcc" }; - - Some(Tool::new(PathBuf::from(tool))) + // Windows uses bat file so we have to be a bit more specific + if cfg!(windows) { + let mut t = Tool::new(PathBuf::from("cmd")); + t.args.push("/c".into()); + t.args.push(format!("{}.bat", tool).into()); + Some(t) + } else { + Some(Tool::new(PathBuf::from(tool))) + } } else { None } }) - .or_else(|| windows_registry::find_tool(&target, "cl.exe")) - .unwrap_or_else(|| { + .or_else(|| windows_registry::find_tool(&target, "cl.exe")); + + let tool = match tool_opt { + Some(t) => t, + None => { let compiler = if host.contains("windows") && target.contains("windows") { if target.contains("msvc") { msvc.to_string() @@ -948,7 +1315,7 @@ impl Config { } } else if target.contains("android") { format!("{}-{}", target.replace("armv7", "arm"), gnu) - } else if self.get_host() != target { + } else if self.get_host()? != target { // CROSS_COMPILE is of the form: "arm-linux-gnueabi-" let cc_env = self.getenv("CROSS_COMPILE"); let cross_compile = cc_env.as_ref().map(|s| s.trim_right_matches('-')); @@ -996,12 +1363,15 @@ impl Config { default.to_string() }; Tool::new(PathBuf::from(compiler)) - }) + } + }; + + Ok(tool) } - fn get_var(&self, var_base: &str) -> Result { - let target = self.get_target(); - let host = self.get_host(); + fn get_var(&self, var_base: &str) -> Result { + let target = self.get_target()?; + let host = self.get_host()?; let kind = if host == target { "HOST" } else { "TARGET" }; let target_u = target.replace("-", "_"); let res = self.getenv(&format!("{}_{}", var_base, target)) @@ -1011,7 +1381,7 @@ impl Config { match res { Some(res) => Ok(res), - None => Err("could not get environment variable".to_string()), + None => Err(Error::new(ErrorKind::EnvVarNotFound, &format!("Could not find environment variable {}.", var_base))), } } @@ -1026,7 +1396,7 @@ impl Config { fn env_tool(&self, name: &str) -> Option<(String, Vec)> { self.get_var(name).ok().map(|tool| { - let whitelist = ["ccache", "distcc"]; + let whitelist = ["ccache", "distcc", "sccache"]; for t in whitelist.iter() { if tool.starts_with(t) && tool[t.len()..].starts_with(' ') { return (t.to_string(), vec![tool[t.len()..].trim_left().to_string()]); @@ -1038,61 +1408,85 @@ impl Config { /// Returns the default C++ standard library for the current target: `libc++` /// for OS X and `libstdc++` for anything else. - fn get_cpp_link_stdlib(&self) -> Option { - self.cpp_link_stdlib.clone().unwrap_or_else(|| { - let target = self.get_target(); - if target.contains("msvc") { - None - } else if target.contains("darwin") { - Some("c++".to_string()) - } else if target.contains("freebsd") { - Some("c++".to_string()) - } else { - Some("stdc++".to_string()) - } - }) + fn get_cpp_link_stdlib(&self) -> Result, Error> { + match self.cpp_link_stdlib.clone() { + Some(s) => Ok(s), + None => { + let target = self.get_target()?; + if target.contains("msvc") { + Ok(None) + } else if target.contains("darwin") { + Ok(Some("c++".to_string())) + } else if target.contains("freebsd") { + Ok(Some("c++".to_string())) + } else { + Ok(Some("stdc++".to_string())) + } + }, + } } - fn get_ar(&self) -> PathBuf { - self.archiver + fn get_ar(&self) -> Result { + match self.archiver .clone() - .or_else(|| self.get_var("AR").map(PathBuf::from).ok()) - .unwrap_or_else(|| { - if self.get_target().contains("android") { - PathBuf::from(format!("{}-ar", self.get_target().replace("armv7", "arm"))) - } else if self.get_target().contains("emscripten") { - //Windows use bat files so we have to be a bit more specific - let tool = if cfg!(windows) { - "emar.bat" - } else { - "emar" - }; + .or_else(|| self.get_var("AR").map(PathBuf::from).ok()) { + Some(p) => Ok(p), + None => { + if self.get_target()?.contains("android") { + Ok(PathBuf::from(format!("{}-ar", self.get_target()?.replace("armv7", "arm")))) + } else if self.get_target()?.contains("emscripten") { + //Windows use bat files so we have to be a bit more specific + let tool = if cfg!(windows) { + "emar.bat" + } else { + "emar" + }; - PathBuf::from(tool) - } else { - PathBuf::from("ar") + Ok(PathBuf::from(tool)) + } else { + Ok(PathBuf::from("ar")) + } } - }) + } } - fn get_target(&self) -> String { - self.target.clone().unwrap_or_else(|| self.getenv_unwrap("TARGET")) + fn get_target(&self) -> Result { + match self.target.clone() { + Some(t) => Ok(t), + None => Ok(self.getenv_unwrap("TARGET")?), + } } - fn get_host(&self) -> String { - self.host.clone().unwrap_or_else(|| self.getenv_unwrap("HOST")) + fn get_host(&self) -> Result { + match self.host.clone() { + Some(h) => Ok(h), + None => Ok(self.getenv_unwrap("HOST")?), + } } - fn get_opt_level(&self) -> String { - self.opt_level.as_ref().cloned().unwrap_or_else(|| self.getenv_unwrap("OPT_LEVEL")) + fn get_opt_level(&self) -> Result { + match self.opt_level.as_ref().cloned() { + Some(ol) => Ok(ol), + None => Ok(self.getenv_unwrap("OPT_LEVEL")?), + } } fn get_debug(&self) -> bool { - self.debug.unwrap_or_else(|| self.getenv_unwrap("PROFILE") == "debug") + self.debug.unwrap_or_else(|| { + match self.getenv("DEBUG") { + Some(s) => s != "false", + None => false, + } + }) } - fn get_out_dir(&self) -> PathBuf { - self.out_dir.clone().unwrap_or_else(|| env::var_os("OUT_DIR").map(PathBuf::from).unwrap()) + fn get_out_dir(&self) -> Result { + match self.out_dir.clone() { + Some(p) => Ok(p), + None => Ok(env::var_os("OUT_DIR") + .map(PathBuf::from) + .ok_or_else(|| Error::new(ErrorKind::EnvVarNotFound, "Environment variable OUT_DIR not defined."))?), + } } fn getenv(&self, v: &str) -> Option { @@ -1101,10 +1495,10 @@ impl Config { r } - fn getenv_unwrap(&self, v: &str) -> String { + fn getenv_unwrap(&self, v: &str) -> Result { match self.getenv(v) { - Some(s) => s, - None => fail(&format!("environment variable `{}` not defined", v)), + Some(s) => Ok(s), + None => Err(Error::new(ErrorKind::EnvVarNotFound, &format!("Environment variable {} not defined.", v.to_string()))), } } @@ -1115,6 +1509,12 @@ impl Config { } } +impl Default for Build { + fn default() -> Build { + Build::new() + } +} + impl Tool { fn new(path: PathBuf) -> Tool { // Try to detect family of the tool from its name, falling back to Gnu. @@ -1174,31 +1574,42 @@ impl Tool { } } -fn run(cmd: &mut Command, program: &str) { - let (mut child, print) = spawn(cmd, program); - let status = child.wait().expect("failed to wait on child process"); +fn run(cmd: &mut Command, program: &str) -> Result<(), Error> { + let (mut child, print) = spawn(cmd, program)?; + let status = match child.wait() { + Ok(s) => s, + Err(_) => return Err(Error::new(ErrorKind::ToolExecError, &format!("Failed to wait on spawned child process, command {:?} with args {:?}.", cmd, program))), + }; print.join().unwrap(); println!("{}", status); - if !status.success() { - fail(&format!("command did not execute successfully, got: {}", status)); + + if status.success() { + Ok(()) + } else { + Err(Error::new(ErrorKind::ToolExecError, &format!("Command {:?} with args {:?} did not execute successfully (status code {}).", cmd, program, status))) } } -fn run_output(cmd: &mut Command, program: &str) -> Vec { +fn run_output(cmd: &mut Command, program: &str) -> Result, Error> { cmd.stdout(Stdio::piped()); - let (mut child, print) = spawn(cmd, program); + let (mut child, print) = spawn(cmd, program)?; let mut stdout = vec![]; child.stdout.take().unwrap().read_to_end(&mut stdout).unwrap(); - let status = child.wait().expect("failed to wait on child process"); + let status = match child.wait() { + Ok(s) => s, + Err(_) => return Err(Error::new(ErrorKind::ToolExecError, &format!("Failed to wait on spawned child process, command {:?} with args {:?}.", cmd, program))), + }; print.join().unwrap(); println!("{}", status); - if !status.success() { - fail(&format!("command did not execute successfully, got: {}", status)); + + if status.success() { + Ok(stdout) + } else { + Err(Error::new(ErrorKind::ToolExecError, &format!("Command {:?} with args {:?} did not execute successfully (status code {}).", cmd, program, status))) } - stdout } -fn spawn(cmd: &mut Command, program: &str) -> (Child, JoinHandle<()>) { +fn spawn(cmd: &mut Command, program: &str) -> Result<(Child, JoinHandle<()>), Error> { println!("running: {:?}", cmd); // Capture the standard error coming from these programs, and write it out @@ -1215,26 +1626,34 @@ fn spawn(cmd: &mut Command, program: &str) -> (Child, JoinHandle<()>) { println!(""); } }); - (child, print) + Ok((child, print)) } Err(ref e) if e.kind() == io::ErrorKind::NotFound => { let extra = if cfg!(windows) { - " (see https://github.com/alexcrichton/gcc-rs#compile-time-requirements \ + " (see https://github.com/alexcrichton/cc-rs#compile-time-requirements \ for help)" } else { "" }; - fail(&format!("failed to execute command: {}\nIs `{}` \ - not installed?{}", - e, - program, - extra)); + Err(Error::new(ErrorKind::ToolNotFound, &format!("Failed to find tool. Is `{}` installed?{}", program, extra))) } - Err(e) => fail(&format!("failed to execute command: {}", e)), + Err(_) => Err(Error::new(ErrorKind::ToolExecError, &format!("Command {:?} with args {:?} failed to start.", cmd, program))), } } fn fail(s: &str) -> ! { - println!("\n\n{}\n\n", s); - panic!() + panic!("\n\nInternal error occurred: {}\n\n", s) +} + + +fn command_add_output_file(cmd: &mut Command, dst: &Path, msvc: bool, is_asm: bool) { + if msvc && is_asm { + cmd.arg("/Fo").arg(dst); + } else if msvc { + let mut s = OsString::from("/Fo"); + s.push(&dst); + cmd.arg(s); + } else { + cmd.arg("-o").arg(&dst); + } } diff --git a/src/vendor/gcc/src/registry.rs b/src/vendor/cc/src/registry.rs similarity index 100% rename from src/vendor/gcc/src/registry.rs rename to src/vendor/cc/src/registry.rs diff --git a/src/vendor/gcc/src/setup_config.rs b/src/vendor/cc/src/setup_config.rs similarity index 100% rename from src/vendor/gcc/src/setup_config.rs rename to src/vendor/cc/src/setup_config.rs diff --git a/src/vendor/gcc/src/winapi.rs b/src/vendor/cc/src/winapi.rs similarity index 99% rename from src/vendor/gcc/src/winapi.rs rename to src/vendor/cc/src/winapi.rs index 010d165c70..3fb04087fd 100644 --- a/src/vendor/gcc/src/winapi.rs +++ b/src/vendor/cc/src/winapi.rs @@ -67,8 +67,8 @@ pub trait Interface { fn uuidof() -> GUID; } -#[link(name = "Ole32")] -#[link(name = "OleAut32")] +#[link(name = "ole32")] +#[link(name = "oleaut32")] extern { } extern "system" { diff --git a/src/vendor/gcc/src/windows_registry.rs b/src/vendor/cc/src/windows_registry.rs similarity index 99% rename from src/vendor/gcc/src/windows_registry.rs rename to src/vendor/cc/src/windows_registry.rs index 35758091db..9099e0f8d1 100644 --- a/src/vendor/gcc/src/windows_registry.rs +++ b/src/vendor/cc/src/windows_registry.rs @@ -87,6 +87,7 @@ pub fn find_tool(target: &str, tool: &str) -> Option { } /// A version of Visual Studio +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum VsVers { /// Visual Studio 12 (2013) Vs12, diff --git a/src/vendor/gcc/tests/cc_env.rs b/src/vendor/cc/tests/cc_env.rs similarity index 98% rename from src/vendor/gcc/tests/cc_env.rs rename to src/vendor/cc/tests/cc_env.rs index 559dbe8ad4..642200d07a 100644 --- a/src/vendor/gcc/tests/cc_env.rs +++ b/src/vendor/cc/tests/cc_env.rs @@ -1,5 +1,5 @@ extern crate tempdir; -extern crate gcc; +extern crate cc; use std::env; diff --git a/src/vendor/gcc/tests/support/mod.rs b/src/vendor/cc/tests/support/mod.rs similarity index 95% rename from src/vendor/gcc/tests/support/mod.rs rename to src/vendor/cc/tests/support/mod.rs index 135a6635b5..428854625c 100644 --- a/src/vendor/gcc/tests/support/mod.rs +++ b/src/vendor/cc/tests/support/mod.rs @@ -6,7 +6,7 @@ use std::fs::{self, File}; use std::io::prelude::*; use std::path::PathBuf; -use gcc; +use cc; use tempdir::TempDir; pub struct Test { @@ -36,7 +36,7 @@ impl Test { pub fn gnu() -> Test { let t = Test::new(); - t.shim("cc").shim("ar"); + t.shim("cc").shim("c++").shim("ar"); t } @@ -55,8 +55,8 @@ impl Test { self } - pub fn gcc(&self) -> gcc::Config { - let mut cfg = gcc::Config::new(); + pub fn gcc(&self) -> cc::Build { + let mut cfg = cc::Build::new(); let mut path = env::split_paths(&env::var_os("PATH").unwrap()).collect::>(); path.insert(0, self.td.path().to_owned()); let target = if self.msvc { diff --git a/src/vendor/gcc/tests/test.rs b/src/vendor/cc/tests/test.rs similarity index 73% rename from src/vendor/gcc/tests/test.rs rename to src/vendor/cc/tests/test.rs index 2be5ecdbaf..b8f1abf1eb 100644 --- a/src/vendor/gcc/tests/test.rs +++ b/src/vendor/cc/tests/test.rs @@ -1,4 +1,4 @@ -extern crate gcc; +extern crate cc; extern crate tempdir; use support::Test; @@ -10,7 +10,7 @@ fn gnu_smoke() { let test = Test::gnu(); test.gcc() .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0) .must_have("-O2") @@ -28,7 +28,7 @@ fn gnu_opt_level_1() { test.gcc() .opt_level(1) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0) .must_have("-O1") @@ -41,7 +41,7 @@ fn gnu_opt_level_s() { test.gcc() .opt_level_str("s") .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0) .must_have("-Os") @@ -57,10 +57,33 @@ fn gnu_debug() { test.gcc() .debug(true) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("-g"); } +#[test] +fn gnu_warnings_into_errors() { + let test = Test::gnu(); + test.gcc() + .warnings_into_errors(true) + .file("foo.c") + .compile("foo"); + + test.cmd(0).must_have("-Werror"); +} + +#[test] +fn gnu_warnings() { + let test = Test::gnu(); + test.gcc() + .warnings(true) + .file("foo.c") + .compile("foo"); + + test.cmd(0).must_have("-Wall") + .must_have("-Wextra"); +} + #[test] fn gnu_x86_64() { for vendor in &["unknown-linux-gnu", "apple-darwin"] { @@ -70,7 +93,7 @@ fn gnu_x86_64() { .target(&target) .host(&target) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0) .must_have("-fPIC") @@ -88,7 +111,7 @@ fn gnu_x86_64_no_pic() { .target(&target) .host(&target) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_not_have("-fPIC"); } @@ -103,7 +126,7 @@ fn gnu_i686() { .target(&target) .host(&target) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0) .must_have("-m32"); @@ -120,7 +143,7 @@ fn gnu_i686_pic() { .target(&target) .host(&target) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("-fPIC"); } @@ -132,7 +155,7 @@ fn gnu_set_stdlib() { test.gcc() .cpp_set_stdlib(Some("foo")) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_not_have("-stdlib=foo"); } @@ -143,7 +166,7 @@ fn gnu_include() { test.gcc() .include("foo/bar") .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("-I").must_have("foo/bar"); } @@ -152,10 +175,10 @@ fn gnu_include() { fn gnu_define() { let test = Test::gnu(); test.gcc() - .define("FOO", Some("bar")) + .define("FOO", "bar") .define("BAR", None) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("-DFOO=bar").must_have("-DBAR"); } @@ -165,7 +188,7 @@ fn gnu_compile_assembly() { let test = Test::gnu(); test.gcc() .file("foo.S") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("foo.S"); } @@ -176,13 +199,48 @@ fn gnu_shared() { .file("foo.c") .shared_flag(true) .static_flag(false) - .compile("libfoo.a"); + .compile("foo"); test.cmd(0) .must_have("-shared") .must_not_have("-static"); } +#[test] +fn gnu_flag_if_supported() { + if cfg!(windows) { + return + } + let test = Test::gnu(); + test.gcc() + .file("foo.c") + .flag_if_supported("-Wall") + .flag_if_supported("-Wflag-does-not-exist") + .flag_if_supported("-std=c++11") + .compile("foo"); + + test.cmd(0) + .must_have("-Wall") + .must_not_have("-Wflag-does-not-exist") + .must_not_have("-std=c++11"); +} + +#[test] +fn gnu_flag_if_supported_cpp() { + if cfg!(windows) { + return + } + let test = Test::gnu(); + test.gcc() + .cpp(true) + .file("foo.cpp") + .flag_if_supported("-std=c++11") + .compile("foo"); + + test.cmd(0) + .must_have("-std=c++11"); +} + #[test] fn gnu_static() { let test = Test::gnu(); @@ -190,7 +248,7 @@ fn gnu_static() { .file("foo.c") .shared_flag(false) .static_flag(true) - .compile("libfoo.a"); + .compile("foo"); test.cmd(0) .must_have("-static") @@ -202,7 +260,7 @@ fn msvc_smoke() { let test = Test::msvc(); test.gcc() .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0) .must_have("/O2") @@ -219,7 +277,7 @@ fn msvc_opt_level_0() { test.gcc() .opt_level(0) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_not_have("/O2"); } @@ -230,7 +288,7 @@ fn msvc_debug() { test.gcc() .debug(true) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("/Z7"); } @@ -240,7 +298,7 @@ fn msvc_include() { test.gcc() .include("foo/bar") .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("/I").must_have("foo/bar"); } @@ -249,10 +307,10 @@ fn msvc_include() { fn msvc_define() { let test = Test::msvc(); test.gcc() - .define("FOO", Some("bar")) + .define("FOO", "bar") .define("BAR", None) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("/DFOO=bar").must_have("/DBAR"); } @@ -263,7 +321,7 @@ fn msvc_static_crt() { test.gcc() .static_crt(true) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("/MT"); } @@ -274,7 +332,7 @@ fn msvc_no_static_crt() { test.gcc() .static_crt(false) .file("foo.c") - .compile("libfoo.a"); + .compile("foo"); test.cmd(0).must_have("/MD"); } diff --git a/src/vendor/clap/.cargo-checksum.json b/src/vendor/clap/.cargo-checksum.json index 5672b2cafb..05ea903683 100644 --- a/src/vendor/clap/.cargo-checksum.json +++ b/src/vendor/clap/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".appveyor.yml":"38fb7e583271029caad727c9123a2b2679b7c59971de418f16dc5136dbebaeb5",".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".clog.toml":"f691701bd51b5f311931d0d8f05fa3d78c00dda8d60f3313e21011309c736ff1",".github/CONTRIBUTING.md":"f7eff737f3aa25294802fefb233e3758a64b248781dbbf3262532d693f340a87",".github/ISSUE_TEMPLATE.md":"181a07050efec74e52bb3890585eb02dc77259ac6396ff33fe0869208fa86cee",".gitignore":"57b1cc6deeaf68d35909201e4bb863c7dbec899ceaa17edde7b9fe64ece8c3e9",".mention-bot":"51790ab49f43ed86a4a7c3d2e468aa5fa526ca5e2ac6af20432a2cb5b2fdbe84",".travis.yml":"691bfd78c9b54845aa38a478ccd2ad8196598f8f486d6aeeef2090bb5fe341f5","CHANGELOG.md":"b4ece6d3c5d05f30fbbf080c5335a86e03013d9978b6c504c6f59e7e420d2fb1","CONTRIBUTORS.md":"5d7dbafaff6879bbfbb01b22cca299953ec163872d8d624bbf99e20851ca0165","Cargo.toml":"310f013f05c604ecf332ec6447c0be4d9c4c24cee2f64ea53f2ae3101465efbb","Cargo.toml.orig":"bb1e09d0c71122ab332d2230082a84fd0399acaa7730981644e24213639334d9","LICENSE-MIT":"6725d1437fc6c77301f2ff0e7d52914cf4f9509213e1078dc77d9356dbe6eac5","README.md":"37f3ed7b305737c08f6a4a6e287fc0c2e49fc89c25bf6e110b6d7c0e390f3ad3","clap-test.rs":"995a9d41ef372a814616113f4a58c1e580043678e54527afc2ebee7e8e1d3ef5","index.html":"36f9ce4465266f3af9a259444b01c4239200473cabfc848f789f75b322a3ea8f","justfile":"811b2dec57aec46e570aeeb9945018cf87fe65f6d5b27cdb9ffca79d906910f6","rustfmt.toml":"8fd2d63119df515fd5f44e530c709b19d66b09fbc2e22a640bf4b64c57e7d6b3","src/app/help.rs":"dae302ae9db036e480034d2ec4a8af59333d2ac4c42d9db76861fb6572b3289b","src/app/macros.rs":"77764555c2831180f4e976d5bcd95f3da4fdb609d77ae84c94f4ce602afd0c9b","src/app/meta.rs":"a56d28bb466a8ba68155b3f2883e85228b4b74cf25658f62fc050e07cff2dc85","src/app/mod.rs":"da44fb4d0390299763fe8975f171b800be80e8630cf4aa5fa08d32a667c18eca","src/app/parser.rs":"2e9f9b8bdab727a57d4c904aa701457a49ffd62fb7e8c2366fe6d24a06da6f1b","src/app/settings.rs":"c6b87d4da01891123edddee9e28b048cba0c19d8c1db34ad5ad3e85b657c6b97","src/app/usage.rs":"703cec975c53e7f01b14b4593de41c518910ab347bc4c54efe79367a704ffc4c","src/app/validator.rs":"3e41b683e6e60535cb8a52e5e4b08b4bb14a29780869f486110488c627124e9c","src/args/any_arg.rs":"b082385eeff2505ced7b747bd44d20a3fb6fd9d4bd14be9e99870699c43ea072","src/args/arg.rs":"673de3f1957eccb1b116255bac9638fe24c0da54ccb358d958446c8ed54c9621","src/args/arg_builder/base.rs":"8b99a9ab811df3e0bdcfba8c0994042b0bcd06d8ddf794ab559baaf9a490ba59","src/args/arg_builder/flag.rs":"3e2c28bb9e3a30ecf7d3b8502af70738893d450511719724a5836598bcfbdb06","src/args/arg_builder/mod.rs":"7a32c8fd85b48f7b60e5f2c13dc70fa9100aa65cd933ba419300d28d682bf722","src/args/arg_builder/option.rs":"f45429d97fafa630b2d74816eddddc4d9f19498b7619a5f623d6dfb9f50f6ed8","src/args/arg_builder/positional.rs":"e9fe32bad3744b75548e7fd3e9f5bccab19ba835ff9506f147b32c9fb5f46c7a","src/args/arg_builder/switched.rs":"61f5121b0ec746461215a47e1b7a4d699a37a3f181172820e0615f68d5f6f0ef","src/args/arg_builder/valued.rs":"19368a03e046d6b63451c3d04dff6e51d49f140ed45330f82879539c6d1b28dd","src/args/arg_matcher.rs":"27829739ae12ac7800a26109e751ce9f8c3d26e262d41de161a38baf5c421167","src/args/arg_matches.rs":"5fc622e847810ff6a956da00fa64f4a4cff3acd656abf742c4a52e64dfa61218","src/args/group.rs":"7fe5e2f0dd24faf1765410a9336d85976875e964d7f246e1fa216c4808d88dde","src/args/macros.rs":"57f248e2694f9413cbbaf9087813ed4f27064f5f8e29eaf4ec41ec2b274ae806","src/args/matched_arg.rs":"1ed8d338869ecc3b5fa426ef4cf42f4c9c3b1dd538cdea1fe0489169345536f7","src/args/mod.rs":"c155cd989fa4ca1f8de6a79115afbf5086f092adcb854ff9698b9100f45fc323","src/args/settings.rs":"2753ff50046def9ccb7f601b3d9f565348da1ef0253af24ccee94616a2e5c470","src/args/subcommand.rs":"e1ad9638c33785f1301675de1795b0a4f4b079452aa11f7526d263c2a1179432","src/completions/bash.rs":"116c6830ee2b6310f299a69924f5b1e39b05ebec2b5f7b0ffe3b6938b7fa5514","src/completions/fish.rs":"63975f8beea9af6bef66c7dd7938bfa61c6f871995a74dbc1545daa9fbc1f2d0","src/completions/macros.rs":"ebad5037e6e63401b1a54498e09d3bd93d1a3a06f045c2990902d47eb9a73774","src/completions/mod.rs":"5d4a734df6a21e6c1e0831a2f7be50a45d2e7bdaf7475589ea78b978643229cd","src/completions/powershell.rs":"866409e5d0a9b2551d739f86c0e4faf86911e9e7c656fb74b38e6960844233b5","src/completions/shell.rs":"c7995ca229fd0d8671761da0aca0513c4f740165f02d06cd97aa0ae881c22cd4","src/completions/zsh.rs":"8ac4576e1cb3b1403dbb35ce146159aa8b29864e1d8201776200d999052b422d","src/errors.rs":"3c46a4d79d9304ffb152a190528ec9db0cb6c05799bb5211e6df9f7d7abab814","src/fmt.rs":"f205f784268572544ff7e84a89f416c898255404275d4ab1f8fea7e89695daa9","src/lib.rs":"87b3ee49c6389cdbaa23e705732bcc68e7235bb16ff469321c92a89258c21beb","src/macros.rs":"9ae2fd66d40692edbe245b3cfe17b87401f00362368e49fcbb333f771b35a7df","src/osstringext.rs":"a87a5a0685dd8310f6329d5f8e8f54c0fac68eb75595a835aeb1c36208efd5f9","src/strext.rs":"d4418d396069e9c05804f92c042ba7192a4244e46059e2edc98670b45cd2daee","src/suggestions.rs":"ad1165a9896382a0f09f73c0f6bf468454c19da207f28c3973e02879f453ad68","src/usage_parser.rs":"a04143bba42a6506746091a3f898c38e2c7409bacefed21fa8194c90961ca390"},"package":"2267a8fdd4dce6956ba6649e130f62fb279026e5e84b92aa939ac8f85ce3f9f0"} \ No newline at end of file +{"files":{".appveyor.yml":"38fb7e583271029caad727c9123a2b2679b7c59971de418f16dc5136dbebaeb5",".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".clog.toml":"f691701bd51b5f311931d0d8f05fa3d78c00dda8d60f3313e21011309c736ff1",".github/CONTRIBUTING.md":"f7eff737f3aa25294802fefb233e3758a64b248781dbbf3262532d693f340a87",".github/ISSUE_TEMPLATE.md":"181a07050efec74e52bb3890585eb02dc77259ac6396ff33fe0869208fa86cee",".gitignore":"57b1cc6deeaf68d35909201e4bb863c7dbec899ceaa17edde7b9fe64ece8c3e9",".mention-bot":"51790ab49f43ed86a4a7c3d2e468aa5fa526ca5e2ac6af20432a2cb5b2fdbe84",".travis.yml":"691bfd78c9b54845aa38a478ccd2ad8196598f8f486d6aeeef2090bb5fe341f5","CHANGELOG.md":"b4291c23056a29a2a696d06c5a3799d6ad82c8a44c41bf2ba9ad5d7ab6cca1b4","CONTRIBUTORS.md":"6890277f02e66ec77120d3335240ac677abd126db5f7a9d9168f47f5cb4df977","Cargo.toml":"a6bbc2ce7e93d82ee2ea3eb882cad8c69cb68fd591ef994c3f2559b5a18092c5","Cargo.toml.orig":"ade062cbee6fc212a4f401a77c82b5865665521572ddbf8da60f30bd490bdf13","LICENSE-MIT":"6725d1437fc6c77301f2ff0e7d52914cf4f9509213e1078dc77d9356dbe6eac5","README.md":"6e17795ca075cb74371dac33e174b15a989211149e37df75bada240ddeb3a5c1","SPONSORS.md":"097c6b7a80feba1e1b9170fa641a2d7d1868e6115fce73a90ab26448ba36f843","clap-test.rs":"995a9d41ef372a814616113f4a58c1e580043678e54527afc2ebee7e8e1d3ef5","index.html":"36f9ce4465266f3af9a259444b01c4239200473cabfc848f789f75b322a3ea8f","justfile":"811b2dec57aec46e570aeeb9945018cf87fe65f6d5b27cdb9ffca79d906910f6","rustfmt.toml":"8fd2d63119df515fd5f44e530c709b19d66b09fbc2e22a640bf4b64c57e7d6b3","src/app/help.rs":"cf523f570f383a0da1ec21a8c75ecfc21d60c7cb55b08e5f8d78aface649e042","src/app/macros.rs":"77764555c2831180f4e976d5bcd95f3da4fdb609d77ae84c94f4ce602afd0c9b","src/app/meta.rs":"a56d28bb466a8ba68155b3f2883e85228b4b74cf25658f62fc050e07cff2dc85","src/app/mod.rs":"da44fb4d0390299763fe8975f171b800be80e8630cf4aa5fa08d32a667c18eca","src/app/parser.rs":"7d4143418c4c0ef52c5169c722b82f99d8f109fb55be7af4725d1ac60fa27bf6","src/app/settings.rs":"c6b87d4da01891123edddee9e28b048cba0c19d8c1db34ad5ad3e85b657c6b97","src/app/usage.rs":"703cec975c53e7f01b14b4593de41c518910ab347bc4c54efe79367a704ffc4c","src/app/validator.rs":"cf9ea30b64efa50fe9bedfdaba493b9f46d62ec5834f5c89d052d564449d3a98","src/args/any_arg.rs":"b082385eeff2505ced7b747bd44d20a3fb6fd9d4bd14be9e99870699c43ea072","src/args/arg.rs":"c39c26ac9251fe20a3b330518edcc58e443fe1d5fcab754010b3f6bb54a66e15","src/args/arg_builder/base.rs":"8b99a9ab811df3e0bdcfba8c0994042b0bcd06d8ddf794ab559baaf9a490ba59","src/args/arg_builder/flag.rs":"3e2c28bb9e3a30ecf7d3b8502af70738893d450511719724a5836598bcfbdb06","src/args/arg_builder/mod.rs":"7a32c8fd85b48f7b60e5f2c13dc70fa9100aa65cd933ba419300d28d682bf722","src/args/arg_builder/option.rs":"f45429d97fafa630b2d74816eddddc4d9f19498b7619a5f623d6dfb9f50f6ed8","src/args/arg_builder/positional.rs":"e9fe32bad3744b75548e7fd3e9f5bccab19ba835ff9506f147b32c9fb5f46c7a","src/args/arg_builder/switched.rs":"61f5121b0ec746461215a47e1b7a4d699a37a3f181172820e0615f68d5f6f0ef","src/args/arg_builder/valued.rs":"19368a03e046d6b63451c3d04dff6e51d49f140ed45330f82879539c6d1b28dd","src/args/arg_matcher.rs":"27829739ae12ac7800a26109e751ce9f8c3d26e262d41de161a38baf5c421167","src/args/arg_matches.rs":"5fc622e847810ff6a956da00fa64f4a4cff3acd656abf742c4a52e64dfa61218","src/args/group.rs":"7fe5e2f0dd24faf1765410a9336d85976875e964d7f246e1fa216c4808d88dde","src/args/macros.rs":"57f248e2694f9413cbbaf9087813ed4f27064f5f8e29eaf4ec41ec2b274ae806","src/args/matched_arg.rs":"1ed8d338869ecc3b5fa426ef4cf42f4c9c3b1dd538cdea1fe0489169345536f7","src/args/mod.rs":"c155cd989fa4ca1f8de6a79115afbf5086f092adcb854ff9698b9100f45fc323","src/args/settings.rs":"2753ff50046def9ccb7f601b3d9f565348da1ef0253af24ccee94616a2e5c470","src/args/subcommand.rs":"e1ad9638c33785f1301675de1795b0a4f4b079452aa11f7526d263c2a1179432","src/completions/bash.rs":"116c6830ee2b6310f299a69924f5b1e39b05ebec2b5f7b0ffe3b6938b7fa5514","src/completions/fish.rs":"2db1ee7fa5b25d7f88883c9101dc7c7b208f315ee6d4a2dc4e69a2f24de48edd","src/completions/macros.rs":"ebad5037e6e63401b1a54498e09d3bd93d1a3a06f045c2990902d47eb9a73774","src/completions/mod.rs":"5d4a734df6a21e6c1e0831a2f7be50a45d2e7bdaf7475589ea78b978643229cd","src/completions/powershell.rs":"866409e5d0a9b2551d739f86c0e4faf86911e9e7c656fb74b38e6960844233b5","src/completions/shell.rs":"c7995ca229fd0d8671761da0aca0513c4f740165f02d06cd97aa0ae881c22cd4","src/completions/zsh.rs":"e98cc3676c0bfa67f0816947b932c92a14cbcf13b45656cf2f8683f9ab05fbca","src/errors.rs":"3c46a4d79d9304ffb152a190528ec9db0cb6c05799bb5211e6df9f7d7abab814","src/fmt.rs":"f205f784268572544ff7e84a89f416c898255404275d4ab1f8fea7e89695daa9","src/lib.rs":"c086305eb4e9f4fd80b3b2811e3ae28122be8cd0b192fd3474ee4ffc0fdf65bd","src/macros.rs":"9ae2fd66d40692edbe245b3cfe17b87401f00362368e49fcbb333f771b35a7df","src/osstringext.rs":"a87a5a0685dd8310f6329d5f8e8f54c0fac68eb75595a835aeb1c36208efd5f9","src/strext.rs":"d4418d396069e9c05804f92c042ba7192a4244e46059e2edc98670b45cd2daee","src/suggestions.rs":"c6a6deb31485277eefc53ba22e1872337cfdd1a94a626419d4345af5a1dfb1aa","src/usage_parser.rs":"a04143bba42a6506746091a3f898c38e2c7409bacefed21fa8194c90961ca390"},"package":"3451e409013178663435d6f15fdb212f14ee4424a3d74f979d081d0a66b6f1f2"} \ No newline at end of file diff --git a/src/vendor/clap/CHANGELOG.md b/src/vendor/clap/CHANGELOG.md index b5d2b8ba76..ecdc381c9c 100644 --- a/src/vendor/clap/CHANGELOG.md +++ b/src/vendor/clap/CHANGELOG.md @@ -1,3 +1,35 @@ + +### v2.26.2 (2017-09-14) + + +#### Improvements + +* if all subcommands are hidden, the subcommands section of the help message is no longer displayed ([4ae7b046](https://github.com/kbknapp/clap-rs/commit/4ae7b0464750bc07ec80ece38e43f003fdd1b8ae), closes [#1046](https://github.com/kbknapp/clap-rs/issues/1046)) + +#### Bug Fixes + +* fixes a bug where default values are not applied if the option supports zero values ([9c248cbf](https://github.com/kbknapp/clap-rs/commit/9c248cbf7d8a825119bc387c23e9a1d1989682b0), closes [#1047](https://github.com/kbknapp/clap-rs/issues/1047)) + + + + +### v2.26.1 (2017-09-14) + + +#### Bug Fixes + +* fixes using require_equals(true) and min_values(0) together ([10ae208f](https://github.com/kbknapp/clap-rs/commit/10ae208f68518eff6e98166724065745f4083174), closes [#1044](https://github.com/kbknapp/clap-rs/issues/1044)) +* escape special characters in zsh and fish completions ([87e019fc](https://github.com/kbknapp/clap-rs/commit/87e019fc84ba6193a8c4ddc26c61eb99efffcd25)) +* avoid panic generating default help msg if term width set to 0 due to bug in textwrap 0.7.0 ([b3eadb0d](https://github.com/kbknapp/clap-rs/commit/b3eadb0de516106db4e08f078ad32e8f6d6e7a57)) +* Change `who's` -> `whose` ([53c1ffe8](https://github.com/kbknapp/clap-rs/commit/53c1ffe87f38b05d8804a0f7832412a952845349)) +* adds a debug assertion to ensure all args added to groups actually exist ([7ad123e2](https://github.com/kbknapp/clap-rs/commit/7ad123e2c02577e3ca30f7e205181e896b157d11), closes [#917](https://github.com/kbknapp/clap-rs/issues/917)) +* fixes a bug where args that allow values to start with a hyphen couldnt contain a double hyphen -- as a value ([ab2f4c9e](https://github.com/kbknapp/clap-rs/commit/ab2f4c9e563e36ec739a4b55d5a5b76fdb9e9fa4), closes [#960](https://github.com/kbknapp/clap-rs/issues/960)) +* fixes a bug where positional argument help text is misaligned ([54c16836](https://github.com/kbknapp/clap-rs/commit/54c16836dea4651806a2cfad53146a83fa3abf21)) +* **Help Message:** fixes long_about not being usable ([a8257ea0](https://github.com/kbknapp/clap-rs/commit/a8257ea0ffb812e552aca256c4a3d2aebfd8065b), closes [#1043](https://github.com/kbknapp/clap-rs/issues/1043)) +* **Suggestions:** output for flag after subcommand ([434ea5ba](https://github.com/kbknapp/clap-rs/commit/434ea5ba71395d8c1afcf88e69f0b0d8339b01a1)) + + + ## v2.26.0 (2017-07-29) diff --git a/src/vendor/clap/CONTRIBUTORS.md b/src/vendor/clap/CONTRIBUTORS.md index 0a0d93f21e..4f86b45573 100644 --- a/src/vendor/clap/CONTRIBUTORS.md +++ b/src/vendor/clap/CONTRIBUTORS.md @@ -1,65 +1,77 @@ the following is a list of contributors: -[kbknapp](https://github.com/kbknapp) |[homu](https://github.com/homu) |[Vinatorul](https://github.com/Vinatorul) |[tormol](https://github.com/tormol) |[sru](https://github.com/sru) |[nabijaczleweli](https://github.com/nabijaczleweli) | +[kbknapp](https://github.com/kbknapp) |[homu](https://github.com/homu) |[Vinatorul](https://github.com/Vinatorul) |[tormol](https://github.com/tormol) |[little-dude](https://github.com/little-dude) |[sru](https://github.com/sru) | :---: |:---: |:---: |:---: |:---: |:---: | -[kbknapp](https://github.com/kbknapp) |[homu](https://github.com/homu) |[Vinatorul](https://github.com/Vinatorul) |[tormol](https://github.com/tormol) |[sru](https://github.com/sru) |[nabijaczleweli](https://github.com/nabijaczleweli) | +[kbknapp](https://github.com/kbknapp) |[homu](https://github.com/homu) |[Vinatorul](https://github.com/Vinatorul) |[tormol](https://github.com/tormol) |[little-dude](https://github.com/little-dude) |[sru](https://github.com/sru) | -[Byron](https://github.com/Byron) |[hgrecco](https://github.com/hgrecco) |[james-darkfox](https://github.com/james-darkfox) |[rtaycher](https://github.com/rtaycher) |[glowing-chemist](https://github.com/glowing-chemist) |[Arnavion](https://github.com/Arnavion) | +[nabijaczleweli](https://github.com/nabijaczleweli) |[mgeisler](https://github.com/mgeisler) |[Byron](https://github.com/Byron) |[hgrecco](https://github.com/hgrecco) |[nateozem](https://github.com/nateozem) |[james-darkfox](https://github.com/james-darkfox) | :---: |:---: |:---: |:---: |:---: |:---: | -[Byron](https://github.com/Byron) |[hgrecco](https://github.com/hgrecco) |[james-darkfox](https://github.com/james-darkfox) |[rtaycher](https://github.com/rtaycher) |[glowing-chemist](https://github.com/glowing-chemist) |[Arnavion](https://github.com/Arnavion) | +[nabijaczleweli](https://github.com/nabijaczleweli) |[mgeisler](https://github.com/mgeisler) |[Byron](https://github.com/Byron) |[hgrecco](https://github.com/hgrecco) |[nateozem](https://github.com/nateozem) |[james-darkfox](https://github.com/james-darkfox) | -[mgeisler](https://github.com/mgeisler) |[afiune](https://github.com/afiune) |[crazymerlyn](https://github.com/crazymerlyn) |[SuperFluffy](https://github.com/SuperFluffy) |[untitaker](https://github.com/untitaker) |[japaric](https://github.com/japaric) | +[Arnavion](https://github.com/Arnavion) |[rtaycher](https://github.com/rtaycher) |[glowing-chemist](https://github.com/glowing-chemist) |[untitaker](https://github.com/untitaker) |[afiune](https://github.com/afiune) |[crazymerlyn](https://github.com/crazymerlyn) | :---: |:---: |:---: |:---: |:---: |:---: | -[mgeisler](https://github.com/mgeisler) |[afiune](https://github.com/afiune) |[crazymerlyn](https://github.com/crazymerlyn) |[SuperFluffy](https://github.com/SuperFluffy) |[untitaker](https://github.com/untitaker) |[japaric](https://github.com/japaric) | +[Arnavion](https://github.com/Arnavion) |[rtaycher](https://github.com/rtaycher) |[glowing-chemist](https://github.com/glowing-chemist) |[untitaker](https://github.com/untitaker) |[afiune](https://github.com/afiune) |[crazymerlyn](https://github.com/crazymerlyn) | -[matthiasbeyer](https://github.com/matthiasbeyer) |[SShrike](https://github.com/SShrike) |[gohyda](https://github.com/gohyda) |[jimmycuadra](https://github.com/jimmycuadra) |[Nemo157](https://github.com/Nemo157) |[tshepang](https://github.com/tshepang) | +[SuperFluffy](https://github.com/SuperFluffy) |[japaric](https://github.com/japaric) |[matthiasbeyer](https://github.com/matthiasbeyer) |[SShrike](https://github.com/SShrike) |[tshepang](https://github.com/tshepang) |[gohyda](https://github.com/gohyda) | :---: |:---: |:---: |:---: |:---: |:---: | -[matthiasbeyer](https://github.com/matthiasbeyer) |[SShrike](https://github.com/SShrike) |[gohyda](https://github.com/gohyda) |[jimmycuadra](https://github.com/jimmycuadra) |[Nemo157](https://github.com/Nemo157) |[tshepang](https://github.com/tshepang) | +[SuperFluffy](https://github.com/SuperFluffy) |[japaric](https://github.com/japaric) |[matthiasbeyer](https://github.com/matthiasbeyer) |[SShrike](https://github.com/SShrike) |[tshepang](https://github.com/tshepang) |[gohyda](https://github.com/gohyda) | -[porglezomp](https://github.com/porglezomp) |[wdv4758h](https://github.com/wdv4758h) |[frewsxcv](https://github.com/frewsxcv) |[hoodie](https://github.com/hoodie) |[huonw](https://github.com/huonw) |[GrappigPanda](https://github.com/GrappigPanda) | +[jimmycuadra](https://github.com/jimmycuadra) |[Nemo157](https://github.com/Nemo157) |[golem131](https://github.com/golem131) |[porglezomp](https://github.com/porglezomp) |[wdv4758h](https://github.com/wdv4758h) |[frewsxcv](https://github.com/frewsxcv) | :---: |:---: |:---: |:---: |:---: |:---: | -[porglezomp](https://github.com/porglezomp) |[wdv4758h](https://github.com/wdv4758h) |[frewsxcv](https://github.com/frewsxcv) |[hoodie](https://github.com/hoodie) |[huonw](https://github.com/huonw) |[GrappigPanda](https://github.com/GrappigPanda) | +[jimmycuadra](https://github.com/jimmycuadra) |[Nemo157](https://github.com/Nemo157) |[golem131](https://github.com/golem131) |[porglezomp](https://github.com/porglezomp) |[wdv4758h](https://github.com/wdv4758h) |[frewsxcv](https://github.com/frewsxcv) | -[ignatenkobrain](https://github.com/ignatenkobrain) |[cstorey](https://github.com/cstorey) |[musoke](https://github.com/musoke) |[nelsonjchen](https://github.com/nelsonjchen) |[pkgw](https://github.com/pkgw) |[Deedasmi](https://github.com/Deedasmi) | +[hoodie](https://github.com/hoodie) |[huonw](https://github.com/huonw) |[GrappigPanda](https://github.com/GrappigPanda) |[ignatenkobrain](https://github.com/ignatenkobrain) |[shepmaster](https://github.com/shepmaster) |[cstorey](https://github.com/cstorey) | :---: |:---: |:---: |:---: |:---: |:---: | -[ignatenkobrain](https://github.com/ignatenkobrain) |[cstorey](https://github.com/cstorey) |[musoke](https://github.com/musoke) |[nelsonjchen](https://github.com/nelsonjchen) |[pkgw](https://github.com/pkgw) |[Deedasmi](https://github.com/Deedasmi) | +[hoodie](https://github.com/hoodie) |[huonw](https://github.com/huonw) |[GrappigPanda](https://github.com/GrappigPanda) |[ignatenkobrain](https://github.com/ignatenkobrain) |[shepmaster](https://github.com/shepmaster) |[cstorey](https://github.com/cstorey) | -[N-006](https://github.com/N-006) |[Keats](https://github.com/Keats) |[starkat99](https://github.com/starkat99) |[alex-gulyas](https://github.com/alex-gulyas) |[cite-reader](https://github.com/cite-reader) |[alexbool](https://github.com/alexbool) | +[kieraneglin](https://github.com/kieraneglin) |[musoke](https://github.com/musoke) |[nelsonjchen](https://github.com/nelsonjchen) |[pkgw](https://github.com/pkgw) |[Deedasmi](https://github.com/Deedasmi) |[vmchale](https://github.com/vmchale) | :---: |:---: |:---: |:---: |:---: |:---: | -[N-006](https://github.com/N-006) |[Keats](https://github.com/Keats) |[starkat99](https://github.com/starkat99) |[alex-gulyas](https://github.com/alex-gulyas) |[cite-reader](https://github.com/cite-reader) |[alexbool](https://github.com/alexbool) | +[kieraneglin](https://github.com/kieraneglin) |[musoke](https://github.com/musoke) |[nelsonjchen](https://github.com/nelsonjchen) |[pkgw](https://github.com/pkgw) |[Deedasmi](https://github.com/Deedasmi) |[vmchale](https://github.com/vmchale) | -[AluisioASG](https://github.com/AluisioASG) |[BurntSushi](https://github.com/BurntSushi) |[nox](https://github.com/nox) |[pixelistik](https://github.com/pixelistik) |[brennie](https://github.com/brennie) |[ogham](https://github.com/ogham) | +[messense](https://github.com/messense) |[Keats](https://github.com/Keats) |[starkat99](https://github.com/starkat99) |[alex-gulyas](https://github.com/alex-gulyas) |[cite-reader](https://github.com/cite-reader) |[alexbool](https://github.com/alexbool) | :---: |:---: |:---: |:---: |:---: |:---: | -[AluisioASG](https://github.com/AluisioASG) |[BurntSushi](https://github.com/BurntSushi) |[nox](https://github.com/nox) |[pixelistik](https://github.com/pixelistik) |[brennie](https://github.com/brennie) |[ogham](https://github.com/ogham) | +[messense](https://github.com/messense) |[Keats](https://github.com/Keats) |[starkat99](https://github.com/starkat99) |[alex-gulyas](https://github.com/alex-gulyas) |[cite-reader](https://github.com/cite-reader) |[alexbool](https://github.com/alexbool) | -[Bilalh](https://github.com/Bilalh) |[dotdash](https://github.com/dotdash) |[bradurani](https://github.com/bradurani) |[Seeker14491](https://github.com/Seeker14491) |[brianp](https://github.com/brianp) |[casey](https://github.com/casey) | +[AluisioASG](https://github.com/AluisioASG) |[BurntSushi](https://github.com/BurntSushi) |[nox](https://github.com/nox) |[mitsuhiko](https://github.com/mitsuhiko) |[brennie](https://github.com/brennie) |[pixelistik](https://github.com/pixelistik) | +:---: |:---: |:---: |:---: |:---: |:---: | +[AluisioASG](https://github.com/AluisioASG) |[BurntSushi](https://github.com/BurntSushi) |[nox](https://github.com/nox) |[mitsuhiko](https://github.com/mitsuhiko) |[brennie](https://github.com/brennie) |[pixelistik](https://github.com/pixelistik) | + +[Bilalh](https://github.com/Bilalh) |[dotdash](https://github.com/dotdash) |[bradurani](https://github.com/bradurani) |[Seeker14491](https://github.com/Seeker14491) |[brianp](https://github.com/brianp) |[casey](https://github.com/casey) | :---: |:---: |:---: |:---: |:---: |:---: | [Bilalh](https://github.com/Bilalh) |[dotdash](https://github.com/dotdash) |[bradurani](https://github.com/bradurani) |[Seeker14491](https://github.com/Seeker14491) |[brianp](https://github.com/brianp) |[casey](https://github.com/casey) | -[volks73](https://github.com/volks73) |[daboross](https://github.com/daboross) |[mernen](https://github.com/mernen) |[dguo](https://github.com/dguo) |[davidszotten](https://github.com/davidszotten) |[eddyb](https://github.com/eddyb) | +[volks73](https://github.com/volks73) |[daboross](https://github.com/daboross) |[mernen](https://github.com/mernen) |[dguo](https://github.com/dguo) |[davidszotten](https://github.com/davidszotten) |[drusellers](https://github.com/drusellers) | :---: |:---: |:---: |:---: |:---: |:---: | -[volks73](https://github.com/volks73) |[daboross](https://github.com/daboross) |[mernen](https://github.com/mernen) |[dguo](https://github.com/dguo) |[davidszotten](https://github.com/davidszotten) |[eddyb](https://github.com/eddyb) | +[volks73](https://github.com/volks73) |[daboross](https://github.com/daboross) |[mernen](https://github.com/mernen) |[dguo](https://github.com/dguo) |[davidszotten](https://github.com/davidszotten) |[drusellers](https://github.com/drusellers) | -[birkenfeld](https://github.com/birkenfeld) |[tanakh](https://github.com/tanakh) |[SirVer](https://github.com/SirVer) |[idmit](https://github.com/idmit) |[archer884](https://github.com/archer884) |[shepmaster](https://github.com/shepmaster) | +[eddyb](https://github.com/eddyb) |[birkenfeld](https://github.com/birkenfeld) |[guanqun](https://github.com/guanqun) |[tanakh](https://github.com/tanakh) |[SirVer](https://github.com/SirVer) |[idmit](https://github.com/idmit) | :---: |:---: |:---: |:---: |:---: |:---: | -[birkenfeld](https://github.com/birkenfeld) |[tanakh](https://github.com/tanakh) |[SirVer](https://github.com/SirVer) |[idmit](https://github.com/idmit) |[archer884](https://github.com/archer884) |[shepmaster](https://github.com/shepmaster) | +[eddyb](https://github.com/eddyb) |[birkenfeld](https://github.com/birkenfeld) |[guanqun](https://github.com/guanqun) |[tanakh](https://github.com/tanakh) |[SirVer](https://github.com/SirVer) |[idmit](https://github.com/idmit) | -[jespino](https://github.com/jespino) |[jtdowney](https://github.com/jtdowney) |[andete](https://github.com/andete) |[joshtriplett](https://github.com/joshtriplett) |[malbarbo](https://github.com/malbarbo) |[iliekturtles](https://github.com/iliekturtles) | +[archer884](https://github.com/archer884) |[jacobmischka](https://github.com/jacobmischka) |[jespino](https://github.com/jespino) |[jtdowney](https://github.com/jtdowney) |[andete](https://github.com/andete) |[jdanford](https://github.com/jdanford) | :---: |:---: |:---: |:---: |:---: |:---: | -[jespino](https://github.com/jespino) |[jtdowney](https://github.com/jtdowney) |[andete](https://github.com/andete) |[joshtriplett](https://github.com/joshtriplett) |[malbarbo](https://github.com/malbarbo) |[iliekturtles](https://github.com/iliekturtles) | +[archer884](https://github.com/archer884) |[jacobmischka](https://github.com/jacobmischka) |[jespino](https://github.com/jespino) |[jtdowney](https://github.com/jtdowney) |[andete](https://github.com/andete) |[jdanford](https://github.com/jdanford) | -[nicompte](https://github.com/nicompte) |[NickeZ](https://github.com/NickeZ) |[nvzqz](https://github.com/nvzqz) |[Geogi](https://github.com/Geogi) |[flying-sheep](https://github.com/flying-sheep) |[peppsac](https://github.com/peppsac) | +[joshtriplett](https://github.com/joshtriplett) |[Kalwyn](https://github.com/Kalwyn) |[manuel-rhdt](https://github.com/manuel-rhdt) |[malbarbo](https://github.com/malbarbo) |[Marwes](https://github.com/Marwes) |[mdaffin](https://github.com/mdaffin) | :---: |:---: |:---: |:---: |:---: |:---: | -[nicompte](https://github.com/nicompte) |[NickeZ](https://github.com/NickeZ) |[nvzqz](https://github.com/nvzqz) |[Geogi](https://github.com/Geogi) |[flying-sheep](https://github.com/flying-sheep) |[peppsac](https://github.com/peppsac) | +[joshtriplett](https://github.com/joshtriplett) |[Kalwyn](https://github.com/Kalwyn) |[manuel-rhdt](https://github.com/manuel-rhdt) |[malbarbo](https://github.com/malbarbo) |[Marwes](https://github.com/Marwes) |[mdaffin](https://github.com/mdaffin) | -[hexjelly](https://github.com/hexjelly) |[rnelson](https://github.com/rnelson) |[swatteau](https://github.com/swatteau) |[tspiteri](https://github.com/tspiteri) |[vks](https://github.com/vks) |[th4t](https://github.com/th4t) | +[iliekturtles](https://github.com/iliekturtles) |[nicompte](https://github.com/nicompte) |[NickeZ](https://github.com/NickeZ) |[nvzqz](https://github.com/nvzqz) |[nuew](https://github.com/nuew) |[Geogi](https://github.com/Geogi) | :---: |:---: |:---: |:---: |:---: |:---: | -[hexjelly](https://github.com/hexjelly) |[rnelson](https://github.com/rnelson) |[swatteau](https://github.com/swatteau) |[tspiteri](https://github.com/tspiteri) |[vks](https://github.com/vks) |[th4t](https://github.com/th4t) | +[iliekturtles](https://github.com/iliekturtles) |[nicompte](https://github.com/nicompte) |[NickeZ](https://github.com/NickeZ) |[nvzqz](https://github.com/nvzqz) |[nuew](https://github.com/nuew) |[Geogi](https://github.com/Geogi) | -[mineo](https://github.com/mineo) |[grossws](https://github.com/grossws) |[messense](https://github.com/messense) |[mvaude](https://github.com/mvaude) |[panicbit](https://github.com/panicbit) |[mitsuhiko](https://github.com/mitsuhiko) | +[flying-sheep](https://github.com/flying-sheep) |[Phlosioneer](https://github.com/Phlosioneer) |[peppsac](https://github.com/peppsac) |[golddranks](https://github.com/golddranks) |[hexjelly](https://github.com/hexjelly) |[rnelson](https://github.com/rnelson) | :---: |:---: |:---: |:---: |:---: |:---: | -[mineo](https://github.com/mineo) |[grossws](https://github.com/grossws) |[messense](https://github.com/messense) |[mvaude](https://github.com/mvaude) |[panicbit](https://github.com/panicbit) |[mitsuhiko](https://github.com/mitsuhiko) | +[flying-sheep](https://github.com/flying-sheep) |[Phlosioneer](https://github.com/Phlosioneer) |[peppsac](https://github.com/peppsac) |[golddranks](https://github.com/golddranks) |[hexjelly](https://github.com/hexjelly) |[rnelson](https://github.com/rnelson) | + +[swatteau](https://github.com/swatteau) |[tspiteri](https://github.com/tspiteri) |[vks](https://github.com/vks) |[th4t](https://github.com/th4t) |[mineo](https://github.com/mineo) |[wabain](https://github.com/wabain) | +:---: |:---: |:---: |:---: |:---: |:---: | +[swatteau](https://github.com/swatteau) |[tspiteri](https://github.com/tspiteri) |[vks](https://github.com/vks) |[th4t](https://github.com/th4t) |[mineo](https://github.com/mineo) |[wabain](https://github.com/wabain) | + +[grossws](https://github.com/grossws) |[kennytm](https://github.com/kennytm) |[mvaude](https://github.com/mvaude) |[panicbit](https://github.com/panicbit) |[ogham](https://github.com/ogham) | +:---: |:---: |:---: |:---: |:---: | +[grossws](https://github.com/grossws) |[kennytm](https://github.com/kennytm) |[mvaude](https://github.com/mvaude) |[panicbit](https://github.com/panicbit) |[ogham](https://github.com/ogham) | diff --git a/src/vendor/clap/Cargo.toml b/src/vendor/clap/Cargo.toml index 049c59cdb6..dc1ccf6cb1 100644 --- a/src/vendor/clap/Cargo.toml +++ b/src/vendor/clap/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "clap" -version = "2.26.0" +version = "2.26.2" authors = ["Kevin K. "] exclude = ["examples/*", "clap-test/*", "tests/*", "benches/*", "*.png", "clap-perf/*", "*.dot"] description = "A simple to use, efficient, and full featured Command Line Argument Parser\n" @@ -62,44 +62,41 @@ lto = true debug = false debug-assertions = false rpath = false +[dependencies.clippy] +version = "~0.0.131" +optional = true + +[dependencies.vec_map] +version = "0.8" + +[dependencies.textwrap] +version = "0.8.0" + [dependencies.atty] version = "0.2.2" optional = true -[dependencies.unicode-width] -version = "0.1.4" - -[dependencies.clippy] -version = "~0.0.131" +[dependencies.yaml-rust] +version = "0.3.5" optional = true -[dependencies.unicode-segmentation] -version = "1.2.0" +[dependencies.unicode-width] +version = "0.1.4" [dependencies.strsim] version = "0.6.0" optional = true -[dependencies.vec_map] -version = "0.8" - [dependencies.ansi_term] version = "0.9.0" optional = true -[dependencies.bitflags] -version = "0.9" - [dependencies.term_size] version = "0.3.0" optional = true -[dependencies.yaml-rust] -version = "0.3.5" -optional = true - -[dependencies.textwrap] -version = "0.7.0" +[dependencies.bitflags] +version = "0.9" [dev-dependencies.lazy_static] version = "0.2" @@ -107,17 +104,17 @@ version = "0.2" version = "0.2" [features] +default = ["suggestions", "color", "wrap_help"] +doc = ["yaml"] +yaml = ["yaml-rust"] +no_cargo = [] nightly = [] -lints = ["clippy"] unstable = [] -debug = [] -yaml = ["yaml-rust"] -wrap_help = ["term_size"] suggestions = ["strsim"] -doc = ["yaml"] -no_cargo = [] -default = ["suggestions", "color", "wrap_help"] +lints = ["clippy"] +wrap_help = ["term_size"] color = ["ansi_term", "atty"] +debug = [] [badges.appveyor] repository = "kbknapp/clap-rs" diff --git a/src/vendor/clap/Cargo.toml.orig b/src/vendor/clap/Cargo.toml.orig index b71f25c54a..bcb684bb1f 100644 --- a/src/vendor/clap/Cargo.toml.orig +++ b/src/vendor/clap/Cargo.toml.orig @@ -1,7 +1,7 @@ [package] name = "clap" -version = "2.26.0" +version = "2.26.2" authors = ["Kevin K. "] exclude = ["examples/*", "clap-test/*", "tests/*", "benches/*", "*.png", "clap-perf/*", "*.dot"] repository = "https://github.com/kbknapp/clap-rs.git" @@ -23,8 +23,7 @@ appveyor = { repository = "kbknapp/clap-rs" } bitflags = "0.9" vec_map = "0.8" unicode-width = "0.1.4" -unicode-segmentation = "1.2.0" -textwrap = "0.7.0" +textwrap = "0.8.0" strsim = { version = "0.6.0", optional = true } ansi_term = { version = "0.9.0", optional = true } term_size = { version = "0.3.0", optional = true } diff --git a/src/vendor/clap/README.md b/src/vendor/clap/README.md index e1ec668cdf..6a42faee0b 100644 --- a/src/vendor/clap/README.md +++ b/src/vendor/clap/README.md @@ -45,15 +45,23 @@ Created by [gh-md-toc](https://github.com/ekalinin/github-markdown-toc) ## What's New -Here's the highlights for v2.26.0 +Here's what's new in 2.26.2: -* **The minimum required version of Rust is now 1.13.0 (Stable)** -* bumps unicode-segmentation to v1.2 -* update textwrap to version 0.7.0 which increases the performance of writing help strings +* if all subcommands are hidden, the subcommands section of the help message is no longer displayed +* fixes a bug where default values are not applied if the option supports zero values +* fixes using require_equals(true) and min_values(0) together +* escape special characters in zsh and fish completions +* avoid panic generating default help msg if term width set to 0 due to bug in textwrap 0.7.0 +* Change `who's` -> `whose` in documentation +* **Help Message:** fixes `App::long_about` not being displayed +* **Suggestions:** output for flag after subcommand -Here's the highlights for v2.21.0 to v2.25.1 +Here's the highlights for v2.21.0 to v2.26.0 +* **The minimum required version of Rust is now 1.13.0 (Stable)** +* bumps unicode-segmentation to v1.2 +* update textwrap to version 0.7.0 which increases the performance of writing help strings * impl Default for Values + OsValues for any lifetime. * use textwrap crate for wrapping help texts * suggests to use flag after subcommand when applicable @@ -144,7 +152,7 @@ I first want to say I'm a big a fan of BurntSushi's work, the creator of `Docopt `docopt` is also excellent at translating arguments into Rust types automatically. There is even a syntax extension which will do all this for you, if you're willing to use a nightly compiler (use of a stable compiler requires you to somewhat manually translate from arguments to Rust types). To use BurntSushi's words, `docopt` is also a sort of black box. You get what you get, and it's hard to tweak implementation or customize the experience for your use case. -Because `docopt` is doing a ton of work to parse your help messages and determine what you were trying to communicate as valid arguments, it's also one of the more heavy weight parsers performance-wise. For most applications this isn't a concern and this isn't to say `docopt` is slow, in fact from it. This is just something to keep in mind while comparing. +Because `docopt` is doing a ton of work to parse your help messages and determine what you were trying to communicate as valid arguments, it's also one of the more heavy weight parsers performance-wise. For most applications this isn't a concern and this isn't to say `docopt` is slow, in fact far from it. This is just something to keep in mind while comparing. #### All else being equal, what are some reasons to use `clap`? diff --git a/src/vendor/clap/SPONSORS.md b/src/vendor/clap/SPONSORS.md new file mode 100644 index 0000000000..397e005a68 --- /dev/null +++ b/src/vendor/clap/SPONSORS.md @@ -0,0 +1,7 @@ +The following is a list of [sponsors](https://www.clap.rs/sponsor/) for the clap-rs project: + +[Noelia Seva-Gonzalez](http://vsgrealestategroup.com.realproserver.com/About) +Noelia Seva-Gonzalez + +[Rob Tsuk](https://github.com/rtsuk) +Rob Tsuk \ No newline at end of file diff --git a/src/vendor/clap/src/app/help.rs b/src/vendor/clap/src/app/help.rs index db34ceabd0..6c2941b544 100644 --- a/src/vendor/clap/src/app/help.rs +++ b/src/vendor/clap/src/app/help.rs @@ -521,7 +521,7 @@ impl<'a> Help<'a> { .filter(|arg| !arg.is_set(ArgSettings::Hidden)) .count() > 0; let opts = parser.has_opts(); - let subcmds = parser.has_subcommands(); + let subcmds = parser.has_visible_subcommands(); let unified_help = parser.is_set(AppSettings::UnifiedHelpMessage); @@ -657,8 +657,22 @@ impl<'a> Help<'a> { if let Some(author) = parser.meta.author { write_thing!(author) } - if let Some(about) = parser.meta.about { - write_thing!(about) + if self.use_long { + if let Some(about) = parser.meta.long_about { + debugln!("Help::write_default_help: writing long about"); + write_thing!(about) + } else if let Some(about) = parser.meta.about { + debugln!("Help::write_default_help: writing about"); + write_thing!(about) + } + } else { + if let Some(about) = parser.meta.about { + debugln!("Help::write_default_help: writing about"); + write_thing!(about) + } else if let Some(about) = parser.meta.long_about { + debugln!("Help::write_default_help: writing long about"); + write_thing!(about) + } } color!(self, "\nUSAGE:", warning)?; @@ -861,6 +875,11 @@ impl<'a> Help<'a> { "{}", parser.meta.about.unwrap_or("unknown about"))?; } + b"long-about" => { + write!(self.writer, + "{}", + parser.meta.long_about.unwrap_or("unknown about"))?; + } b"usage" => { write!(self.writer, "{}", usage::create_usage_no_title(parser, &[]))?; } diff --git a/src/vendor/clap/src/app/parser.rs b/src/vendor/clap/src/app/parser.rs index c20ab0b655..6ec72034d2 100644 --- a/src/vendor/clap/src/app/parser.rs +++ b/src/vendor/clap/src/app/parser.rs @@ -475,10 +475,7 @@ impl<'a, 'b> Parser<'a, 'b> #[inline] pub fn has_visible_subcommands(&self) -> bool { - if self.subcommands.is_empty() { - return false; - } - self.subcommands.iter().any(|s| !s.p.is_set(AS::Hidden)) + self.has_subcommands() && self.subcommands.iter().filter(|sc| sc.p.meta.name != "help").any(|sc| !sc.p.is_set(AS::Hidden)) } #[inline] @@ -1339,7 +1336,8 @@ impl<'a, 'b> Parser<'a, 'b> #[cfg_attr(feature = "cargo-clippy", allow(let_and_return))] fn use_long_help(&self) -> bool { - let ul = self.flags.iter().any(|f| f.b.long_help.is_some()) || + let ul = self.meta.long_about.is_some() || + self.flags.iter().any(|f| f.b.long_help.is_some()) || self.opts.iter().any(|o| o.b.long_help.is_some()) || self.positionals.values().any(|p| p.b.long_help.is_some()) || self.subcommands @@ -1529,13 +1527,17 @@ impl<'a, 'b> Parser<'a, 'b> debugln!("Parser::parse_opt; opt={}, val={:?}", opt.b.name, val); debugln!("Parser::parse_opt; opt.settings={:?}", opt.b.settings); let mut has_eq = false; + let no_val = val.is_none(); + let empty_vals = opt.is_set(ArgSettings::EmptyValues); + let min_vals_zero = opt.v.min_vals.unwrap_or(1) == 0; + let needs_eq = opt.is_set(ArgSettings::RequireEquals); debug!("Parser::parse_opt; Checking for val..."); if let Some(fv) = val { has_eq = fv.starts_with(&[b'=']) || had_eq; let v = fv.trim_left_matches(b'='); - if !opt.is_set(ArgSettings::EmptyValues) && - (v.len_() == 0 || (opt.is_set(ArgSettings::RequireEquals) && !has_eq)) { + if !empty_vals && + (v.len_() == 0 || (needs_eq && !has_eq)) { sdebugln!("Found Empty - Error"); return Err(Error::empty_value(opt, &*usage::create_error_usage(self, matcher, None), @@ -1546,7 +1548,7 @@ impl<'a, 'b> Parser<'a, 'b> fv, fv.starts_with(&[b'='])); self.add_val_to_arg(opt, v, matcher)?; - } else if opt.is_set(ArgSettings::RequireEquals) && !opt.is_set(ArgSettings::EmptyValues) { + } else if needs_eq && !(empty_vals || min_vals_zero) { sdebugln!("None, but requires equals...Error"); return Err(Error::empty_value(opt, &*usage::create_error_usage(self, matcher, None), @@ -1561,10 +1563,12 @@ impl<'a, 'b> Parser<'a, 'b> self.groups_for_arg(opt.b.name) .and_then(|vec| Some(matcher.inc_occurrences_of(&*vec))); - if val.is_none() || - !has_eq && - (opt.is_set(ArgSettings::Multiple) && !opt.is_set(ArgSettings::RequireDelimiter) && - matcher.needs_more_vals(opt)) { + let needs_delim = opt.is_set(ArgSettings::RequireDelimiter); + let mult = opt.is_set(ArgSettings::Multiple); + if no_val && min_vals_zero && !has_eq && needs_eq { + debugln!("Parser::parse_opt: More arg vals not required..."); + return Ok(ParseResult::ValuesDone); + } else if no_val || (mult && !needs_delim) && !has_eq && matcher.needs_more_vals(opt) { debugln!("Parser::parse_opt: More arg vals required..."); return Ok(ParseResult::Opt(opt.b.name)); } @@ -1727,7 +1731,14 @@ impl<'a, 'b> Parser<'a, 'b> macro_rules! add_val { (@default $_self:ident, $a:ident, $m:ident) => { if let Some(ref val) = $a.v.default_val { - if $m.get($a.b.name).is_none() { + if $m.get($a.b.name).map(|ma| ma.vals.len()).map(|len| len == 0).unwrap_or(false) { + $_self.add_val_to_arg($a, OsStr::new(val), $m)?; + + if $_self.cache.map_or(true, |name| name != $a.name()) { + arg_post_processing!($_self, $a, $m); + $_self.cache = Some($a.name()); + } + } else { $_self.add_val_to_arg($a, OsStr::new(val), $m)?; if $_self.cache.map_or(true, |name| name != $a.name()) { diff --git a/src/vendor/clap/src/app/validator.rs b/src/vendor/clap/src/app/validator.rs index 33b98c014e..6227c060cf 100644 --- a/src/vendor/clap/src/app/validator.rs +++ b/src/vendor/clap/src/app/validator.rs @@ -294,9 +294,9 @@ impl<'a, 'b, 'z> Validator<'a, 'b, 'z> { self.0.color())); } } - if let Some(num) = a.min_vals() { + let min_vals_zero = if let Some(num) = a.min_vals() { debugln!("Validator::validate_arg_num_vals: min_vals set: {}", num); - if (ma.vals.len() as u64) < num { + if (ma.vals.len() as u64) < num && num != 0 { debugln!("Validator::validate_arg_num_vals: Sending error TooFewValues"); return Err(Error::too_few_values(a, num, @@ -306,9 +306,10 @@ impl<'a, 'b, 'z> Validator<'a, 'b, 'z> { None), self.0.color())); } - } + num == 0 + } else { false }; // Issue 665 (https://github.com/kbknapp/clap-rs/issues/665) - if a.takes_value() && !a.is_set(ArgSettings::EmptyValues) && ma.vals.is_empty() { + if a.takes_value() && !(a.is_set(ArgSettings::EmptyValues) || min_vals_zero) && ma.vals.is_empty() { return Err(Error::empty_value(a, &*usage::create_error_usage(self.0, matcher, None), self.0.color())); diff --git a/src/vendor/clap/src/args/arg.rs b/src/vendor/clap/src/args/arg.rs index 9e81911fab..77771da87a 100644 --- a/src/vendor/clap/src/args/arg.rs +++ b/src/vendor/clap/src/args/arg.rs @@ -1507,7 +1507,7 @@ impl<'a, 'b> Arg<'a, 'b> { /// assert!(res.is_ok()); // We didn't use --other=special, so "cfg" wasn't required /// ``` /// - /// Setting [`Arg::required_if(arg, val)`] and having `arg` used with a vaue of `val` but *not* + /// Setting [`Arg::required_if(arg, val)`] and having `arg` used with a value of `val` but *not* /// using this arg is an error. /// /// ```rust @@ -1591,7 +1591,7 @@ impl<'a, 'b> Arg<'a, 'b> { /// ``` /// /// Setting [`Arg::required_ifs(&[(arg, val)])`] and having any of the `arg`s used with it's - /// vaue of `val` but *not* using this arg is an error. + /// value of `val` but *not* using this arg is an error. /// /// ```rust /// # use clap::{App, Arg, ErrorKind}; @@ -2054,7 +2054,7 @@ impl<'a, 'b> Arg<'a, 'b> { /// **NOTE:** This setting only applies to [options] and [positional arguments] /// /// **NOTE:** When the terminator is passed in on the command line, it is **not** stored as one - /// of the vaues + /// of the values /// /// # Examples /// diff --git a/src/vendor/clap/src/completions/fish.rs b/src/vendor/clap/src/completions/fish.rs index 2e0f36ea44..894f89baa5 100644 --- a/src/vendor/clap/src/completions/fish.rs +++ b/src/vendor/clap/src/completions/fish.rs @@ -40,6 +40,11 @@ end } } +// Escape string inside single quotes +fn escape_string(string: &str) -> String { + string.replace("\\", "\\\\").replace("'", "\\'") +} + fn gen_fish_inner(root_command: &str, comp_gen: &FishGen, parent_cmds: &str, buffer: &mut String) { debugln!("FishGen::gen_fish_inner;"); // example : @@ -67,7 +72,7 @@ fn gen_fish_inner(root_command: &str, comp_gen: &FishGen, parent_cmds: &str, buf template.push_str(format!(" -l {}", data).as_str()); } if let Some(data) = option.b.help { - template.push_str(format!(" -d \"{}\"", data).as_str()); + template.push_str(format!(" -d '{}'", escape_string(data)).as_str()); } if let Some(ref data) = option.v.possible_vals { template.push_str(format!(" -r -f -a \"{}\"", data.join(" ")).as_str()); @@ -85,7 +90,7 @@ fn gen_fish_inner(root_command: &str, comp_gen: &FishGen, parent_cmds: &str, buf template.push_str(format!(" -l {}", data).as_str()); } if let Some(data) = flag.b.help { - template.push_str(format!(" -d \"{}\"", data).as_str()); + template.push_str(format!(" -d '{}'", escape_string(data)).as_str()); } buffer.push_str(template.as_str()); buffer.push_str("\n"); @@ -96,7 +101,7 @@ fn gen_fish_inner(root_command: &str, comp_gen: &FishGen, parent_cmds: &str, buf template.push_str(" -f"); template.push_str(format!(" -a \"{}\"", &subcommand.p.meta.name).as_str()); if let Some(data) = subcommand.p.meta.about { - template.push_str(format!(" -d \"{}\"", &data).as_str()) + template.push_str(format!(" -d '{}'", escape_string(&data)).as_str()) } buffer.push_str(template.as_str()); buffer.push_str("\n"); diff --git a/src/vendor/clap/src/completions/zsh.rs b/src/vendor/clap/src/completions/zsh.rs index fa84342f8c..298b5cadde 100644 --- a/src/vendor/clap/src/completions/zsh.rs +++ b/src/vendor/clap/src/completions/zsh.rs @@ -292,12 +292,20 @@ fn get_args_of(p: &Parser) -> String { ret.join("\n") } +// Escape string inside single quotes and brackets +fn escape_string(string: &str) -> String { + string.replace("\\", "\\\\") + .replace("'", "'\\''") + .replace("[", "\\[") + .replace("]", "\\]") +} + fn write_opts_of(p: &Parser) -> String { debugln!("write_opts_of;"); let mut ret = vec![]; for o in p.opts() { debugln!("write_opts_of:iter: o={}", o.name()); - let help = o.help().unwrap_or("").replace("[", "\\[").replace("]", "\\]"); + let help = o.help().map_or(String::new(), escape_string); let mut conflicts = get_zsh_arg_conflicts!(p, o, INTERNAL_ERROR_MSG); conflicts = if conflicts.is_empty() { String::new() @@ -316,7 +324,7 @@ fn write_opts_of(p: &Parser) -> String { String::new() }; if let Some(short) = o.short() { - let s = format!("\"{conflicts}{multiple}-{arg}+[{help}]{possible_values}\" \\", + let s = format!("'{conflicts}{multiple}-{arg}+[{help}]{possible_values}' \\", conflicts = conflicts, multiple = multiple, arg = short, @@ -327,7 +335,7 @@ fn write_opts_of(p: &Parser) -> String { ret.push(s); } if let Some(long) = o.long() { - let l = format!("\"{conflicts}{multiple}--{arg}+[{help}]{possible_values}\" \\", + let l = format!("'{conflicts}{multiple}--{arg}+[{help}]{possible_values}' \\", conflicts = conflicts, multiple = multiple, arg = long, @@ -347,7 +355,7 @@ fn write_flags_of(p: &Parser) -> String { let mut ret = vec![]; for f in p.flags() { debugln!("write_flags_of:iter: f={}", f.name()); - let help = f.help().unwrap_or("").replace("[", "\\[").replace("]", "\\]"); + let help = f.help().map_or(String::new(), escape_string); let mut conflicts = get_zsh_arg_conflicts!(p, f, INTERNAL_ERROR_MSG); conflicts = if conflicts.is_empty() { String::new() @@ -361,7 +369,7 @@ fn write_flags_of(p: &Parser) -> String { "" }; if let Some(short) = f.short() { - let s = format!("\"{conflicts}{multiple}-{arg}[{help}]\" \\", + let s = format!("'{conflicts}{multiple}-{arg}[{help}]' \\", multiple = multiple, conflicts = conflicts, arg = short, @@ -372,7 +380,7 @@ fn write_flags_of(p: &Parser) -> String { } if let Some(long) = f.long() { - let l = format!("\"{conflicts}{multiple}--{arg}[{help}]\" \\", + let l = format!("'{conflicts}{multiple}--{arg}[{help}]' \\", conflicts = conflicts, multiple = multiple, arg = long, diff --git a/src/vendor/clap/src/lib.rs b/src/vendor/clap/src/lib.rs index 73e74f1127..468dd5967e 100644 --- a/src/vendor/clap/src/lib.rs +++ b/src/vendor/clap/src/lib.rs @@ -542,7 +542,6 @@ extern crate vec_map; #[cfg(feature = "wrap_help")] extern crate term_size; extern crate textwrap; -extern crate unicode_segmentation; #[cfg(feature = "color")] extern crate atty; diff --git a/src/vendor/clap/src/suggestions.rs b/src/vendor/clap/src/suggestions.rs index bf586def96..cd18d4a2b8 100644 --- a/src/vendor/clap/src/suggestions.rs +++ b/src/vendor/clap/src/suggestions.rs @@ -58,9 +58,10 @@ pub fn did_you_mean_flag_suffix<'z, T, I>(arg: &str, longs: I, subcommands: &'z if let Some(candidate) = did_you_mean(arg, opts) { let suffix = format!( - "\n\tDid you mean to put '--{}' after the subcommand '{}'?", - Format::Good(arg), - Format::Good(candidate)); + "\n\tDid you mean to put '{}{}' after the subcommand '{}'?", + Format::Good("--"), + Format::Good(candidate), + Format::Good(subcommand.get_name())); return (suffix, Some(candidate)); } } diff --git a/src/vendor/cmake/.cargo-checksum.json b/src/vendor/cmake/.cargo-checksum.json index a7cc62dfbb..3128d7ae02 100644 --- a/src/vendor/cmake/.cargo-checksum.json +++ b/src/vendor/cmake/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"c5565ac6e1981bf3a88d132c16e381411a239a1c25ec140ee13cf2d50f1f97d0","Cargo.toml":"b00470e46ebb5985066646479d788b7ceb4ec7155c5d0bc99d78230e323c5745","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"8ca528d20639506546044c676ff9069e3e850937b02bff4194dcf9e5c3c50d64","src/lib.rs":"414fe90372fa7c7f67853af871f2ca30c117abe406b405719e615b581a9f55ff"},"package":"b8ebbb35d3dc9cd09497168f33de1acb79b265d350ab0ac34133b98f8509af1f"} \ No newline at end of file +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"c5565ac6e1981bf3a88d132c16e381411a239a1c25ec140ee13cf2d50f1f97d0","Cargo.toml":"83f8699952ffbc3b778dded5a07baf4c70e3c79f278ad1eca205f91eb2ea6602","Cargo.toml.orig":"ec6adec6b0a1c45f8f0cc2377add25d218f2f9618d3050c6d4e8e46f47e2338e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"8ca528d20639506546044c676ff9069e3e850937b02bff4194dcf9e5c3c50d64","src/lib.rs":"49bd86e5c32d24a776c24595fa8ab6b29317ebd7f82cc64fa6585b9807d4fa72"},"package":"357c07e7a1fc95732793c1edb5901e1a1f305cfcf63a90eb12dbd22bdb6b789d"} \ No newline at end of file diff --git a/src/vendor/cmake/Cargo.toml b/src/vendor/cmake/Cargo.toml index 611606d50d..bf60a8bb92 100644 --- a/src/vendor/cmake/Cargo.toml +++ b/src/vendor/cmake/Cargo.toml @@ -1,17 +1,25 @@ -[package] +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) +[package] name = "cmake" -version = "0.1.24" +version = "0.1.26" authors = ["Alex Crichton "] -license = "MIT/Apache-2.0" +description = "A build dependency for running `cmake` to build a native library\n" +homepage = "https://github.com/alexcrichton/cmake-rs" +documentation = "http://alexcrichton.com/cmake-rs" readme = "README.md" keywords = ["build-dependencies"] +license = "MIT/Apache-2.0" repository = "https://github.com/alexcrichton/cmake-rs" -homepage = "https://github.com/alexcrichton/cmake-rs" -documentation = "http://alexcrichton.com/cmake-rs" -description = """ -A build dependency for running `cmake` to build a native library -""" - -[dependencies] -gcc = "0.3.48" +[dependencies.cc] +version = "1.0" diff --git a/src/vendor/cmake/Cargo.toml.orig b/src/vendor/cmake/Cargo.toml.orig new file mode 100644 index 0000000000..9b9d026c17 --- /dev/null +++ b/src/vendor/cmake/Cargo.toml.orig @@ -0,0 +1,16 @@ +[package] +name = "cmake" +version = "0.1.26" +authors = ["Alex Crichton "] +license = "MIT/Apache-2.0" +readme = "README.md" +keywords = ["build-dependencies"] +repository = "https://github.com/alexcrichton/cmake-rs" +homepage = "https://github.com/alexcrichton/cmake-rs" +documentation = "http://alexcrichton.com/cmake-rs" +description = """ +A build dependency for running `cmake` to build a native library +""" + +[dependencies] +cc = "1.0" diff --git a/src/vendor/cmake/src/lib.rs b/src/vendor/cmake/src/lib.rs index 557d654986..36b0c657d8 100644 --- a/src/vendor/cmake/src/lib.rs +++ b/src/vendor/cmake/src/lib.rs @@ -44,7 +44,7 @@ #![deny(missing_docs)] -extern crate gcc; +extern crate cc; use std::env; use std::ffi::{OsString, OsStr}; @@ -234,18 +234,20 @@ impl Config { getenv_unwrap("HOST") }); let msvc = target.contains("msvc"); - let mut c_cfg = gcc::Config::new(); + let mut c_cfg = cc::Build::new(); c_cfg.cargo_metadata(false) .opt_level(0) .debug(false) .target(&target) + .warnings(false) .host(&host); - let mut cxx_cfg = gcc::Config::new(); + let mut cxx_cfg = cc::Build::new(); cxx_cfg.cargo_metadata(false) .cpp(true) .opt_level(0) .debug(false) .target(&target) + .warnings(false) .host(&host); if let Some(static_crt) = self.static_crt { c_cfg.static_crt(static_crt); @@ -314,6 +316,10 @@ impl Config { if self.generator.is_none() { cmd.arg("-G").arg(self.visual_studio_generator(&target)); } + } else if target.contains("redox") { + if !self.defined("CMAKE_SYSTEM_NAME") { + cmd.arg("-DCMAKE_SYSTEM_NAME=Generic"); + } } let mut is_ninja = false; if let Some(ref generator) = self.generator { @@ -358,7 +364,7 @@ impl Config { } }; let mut set_compiler = |kind: &str, - compiler: &gcc::Tool, + compiler: &cc::Tool, extra: &OsString| { let flag_var = format!("CMAKE_{}_FLAGS", kind); let tool_var = format!("CMAKE_{}_COMPILER", kind); @@ -490,7 +496,7 @@ impl Config { } fn visual_studio_generator(&self, target: &str) -> String { - use gcc::windows_registry::{find_vs_version, VsVers}; + use cc::windows_registry::{find_vs_version, VsVers}; let base = match find_vs_version() { Ok(VsVers::Vs15) => "Visual Studio 15 2017", diff --git a/src/vendor/conv/.cargo-checksum.json b/src/vendor/conv/.cargo-checksum.json new file mode 100644 index 0000000000..337818297e --- /dev/null +++ b/src/vendor/conv/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"ed17d8ac9d834ecaaba9736aba1f48340e129f5dcc8bb42423f81b92466b1957","Cargo.toml":"5a5ebfcd3c64dd9c621b2322b9a4f26078afcc6e0898737f6b1c316f22f24d91","LICENSE":"b2507383f9c3de7b2067c5af0fbe8ab5f24bb5fc0f606525201f2890d51c3400","README.md":"961818d81b7596a3575148a8ab54b66b40e7fa5e7942e6f78770635a675de031","src/errors.rs":"60cb67d97d4dcd8cb37c47a107ca064f101e24cb5dc9ff3e56939b87e6ea6372","src/impls.rs":"6620dca1ac72b1b2c1ff46b1ec1a637cbe3ecbde2a2e4471f9e25ca4da706d59","src/lib.rs":"b2573e4eb1e75abac4a959a1f870752fc6d9592e682c34b68d52b31fbf95ccc3","src/macros.rs":"8f5d4cf40c127e1eda1b4c13c1e81a45aed11af8bb6db88e3672a267b79b4daa","src/misc.rs":"9e4f8012588b81a0e9445d4072dc55a530591b67d3149dc5cc7688409d5e8f69","tests/conv_utils.rs":"e6704c5cda52cb7adafddbc6be4f4a6267cd4caf7eb44864cf8d9809dba3d079","tests/derive_try_from.rs":"f0a5a5f084e01666049f766373327520ff33f18ab845d22e01d111cdaacc36a4","tests/lang_char.rs":"8d410c9c9efcc1d1ba2ff3c1454a5c3b3d3913596ba59ffd34eb973ad87e56eb","tests/lang_floats.rs":"398ce7cae3388f707d0850ddde6ae46ed26d3cabfe8eb1417cc5093fe49e6bcb","tests/lang_ints.rs":"fd8179a8d1279f5e42a28622e38a6edbd5206aa6abaea6f7bd40886e6fabb058","tests/unwraps.rs":"0edd2cfd1fcf3ed1804bf1bee68c56ae7edaa2efc6a6599f8d669cd8726ea3e3","tests/use_in_generics.rs":"965d9debc0982fe96103a1a08144c5a9d4b8af07510ea84fc997ead568b49563","tests/util/mod.rs":"fdd9bb3500bd060f70288f79f7dd083eb25a09d8fe6a02f12951208a7ae999cf"},"package":"78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299"} \ No newline at end of file diff --git a/src/vendor/conv/.cargo-ok b/src/vendor/conv/.cargo-ok new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/vendor/conv/.gitignore b/src/vendor/conv/.gitignore new file mode 100644 index 0000000000..a9d37c560c --- /dev/null +++ b/src/vendor/conv/.gitignore @@ -0,0 +1,2 @@ +target +Cargo.lock diff --git a/src/vendor/conv/.travis.yml b/src/vendor/conv/.travis.yml new file mode 100644 index 0000000000..067ec09c7a --- /dev/null +++ b/src/vendor/conv/.travis.yml @@ -0,0 +1,17 @@ +language: rust +script: cargo build --verbose && cargo test --verbose +rust: + - 1.2.0 + - 1.3.0 + - 1.4.0 + - 1.5.0 + - 1.6.0 + - stable + - beta + - nightly +matrix: + allow_failures: + - rust: nightly +branches: + except: + - /^issue-.*$/ diff --git a/src/vendor/conv/Cargo.toml b/src/vendor/conv/Cargo.toml new file mode 100644 index 0000000000..42f42aa0dd --- /dev/null +++ b/src/vendor/conv/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "conv" +version = "0.3.3" +authors = ["Daniel Keep "] + +description = "This crate provides a number of conversion traits with more specific semantics than those provided by 'as' or 'From'/'Into'." +repository = "https://github.com/DanielKeep/rust-conv" +documentation = "https://danielkeep.github.io/rust-conv/doc/conv/index.html" +readme = "README.md" +license = "MIT" +keywords = ["from", "into", "conversion", "approximation"] + +exclude = [ + "scripts/*", + "update-docs.py", +] + +[dependencies] +custom_derive = "0.1.2" + +[dev-dependencies] +quickcheck = "0.2.21, < 0.2.25" diff --git a/src/vendor/conv/LICENSE b/src/vendor/conv/LICENSE new file mode 100644 index 0000000000..a2af29bc7d --- /dev/null +++ b/src/vendor/conv/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2015 Daniel Keep + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/conv/README.md b/src/vendor/conv/README.md new file mode 100644 index 0000000000..85a1a287c9 --- /dev/null +++ b/src/vendor/conv/README.md @@ -0,0 +1,129 @@ + +# `conv` + +This crate provides a number of conversion traits with more specific semantics than those provided by `as` or `From`/`Into`. + +The goal with the traits provided here is to be more specific about what generic code can rely on, as well as provide reasonably self-describing alternatives to the standard `From`/`Into` traits. For example, the although `T: From` might be satisfied in generic code, this says nothing about what *kind* of conversion that represents. + +In addition, `From`/`Into` provide no facility for a conversion failing, meaning that implementations may need to choose between conversions that may not be valid, or panicking; neither option is appealing in general. + +**Links** + +* [Latest Release](https://crates.io/crates/scan-rules/) +* [Latest Docs](https://danielkeep.github.io/rust-scan-rules/doc/scan_rules/index.html) +* [Repository](https://github.com/DanielKeep/rust-scan-rules) + +## Compatibility + +`conv` is compatible with Rust 1.2 and higher. + +## Examples + +```rust +# extern crate conv; +# use conv::*; +# fn main() { +// This *cannot* fail, so we can use `unwrap_ok` to discard the `Result`. +assert_eq!(u8::value_from(0u8).unwrap_ok(), 0u8); + +// This *can* fail. Specifically, it can overflow toward negative infinity. +assert_eq!(u8::value_from(0i8), Ok(0u8)); +assert_eq!(u8::value_from(-1i8), Err(NegOverflow(-1))); + +// This can overflow in *either* direction; hence the change to `RangeError`. +assert_eq!(u8::value_from(-1i16), Err(RangeError::NegOverflow(-1))); +assert_eq!(u8::value_from(0i16), Ok(0u8)); +assert_eq!(u8::value_from(256i16), Err(RangeError::PosOverflow(256))); + +// We can use the extension traits to simplify this a little. +assert_eq!(u8::value_from(-1i16).unwrap_or_saturate(), 0u8); +assert_eq!(u8::value_from(0i16).unwrap_or_saturate(), 0u8); +assert_eq!(u8::value_from(256i16).unwrap_or_saturate(), 255u8); + +// Obviously, all integers can be "approximated" using the default scheme (it +// doesn't *do* anything), but they can *also* be approximated with the +// `Wrapping` scheme. +assert_eq!( + >::approx_from(400u16), + Err(PosOverflow(400))); +assert_eq!( + >::approx_from(400u16), + Ok(144u8)); + +// This is rather inconvenient; as such, there are a number of convenience +// extension methods available via `ConvUtil` and `ConvAsUtil`. +assert_eq!(400u16.approx(), Err::(PosOverflow(400))); +assert_eq!(400u16.approx_by::(), Ok::(144u8)); +assert_eq!(400u16.approx_as::(), Err(PosOverflow(400))); +assert_eq!(400u16.approx_as_by::(), Ok(144)); + +// Integer -> float conversions *can* fail due to limited precision. +// Once the continuous range of exactly representable integers is exceeded, the +// provided implementations fail with overflow errors. +assert_eq!(f32::value_from(16_777_216i32), Ok(16_777_216.0f32)); +assert_eq!(f32::value_from(16_777_217i32), Err(RangeError::PosOverflow(16_777_217))); + +// Float -> integer conversions have to be done using approximations. Although +// exact conversions are *possible*, "advertising" this with an implementation +// is misleading. +// +// Note that `DefaultApprox` for float -> integer uses whatever rounding +// mode is currently active (*i.e.* whatever `as` would do). +assert_eq!(41.0f32.approx(), Ok(41u8)); +assert_eq!(41.3f32.approx(), Ok(41u8)); +assert_eq!(41.5f32.approx(), Ok(41u8)); +assert_eq!(41.8f32.approx(), Ok(41u8)); +assert_eq!(42.0f32.approx(), Ok(42u8)); + +assert_eq!(255.0f32.approx(), Ok(255u8)); +assert_eq!(256.0f32.approx(), Err::(FloatError::PosOverflow(256.0))); + +// Sometimes, it can be useful to saturate the conversion from float to +// integer directly, then account for NaN as input separately. The `Saturate` +// extension trait exists for this reason. +assert_eq!((-23.0f32).approx_as::().saturate(), Ok(0)); +assert_eq!(302.0f32.approx_as::().saturate(), Ok(255u8)); +assert!(std::f32::NAN.approx_as::().saturate().is_err()); + +// If you really don't care about the specific kind of error, you can just rely +// on automatic conversion to `GeneralErrorKind`. +fn too_many_errors() -> Result<(), GeneralErrorKind> { + assert_eq!({let r: u8 = try!(0u8.value_into()); r}, 0u8); + assert_eq!({let r: u8 = try!(0i8.value_into()); r}, 0u8); + assert_eq!({let r: u8 = try!(0i16.value_into()); r}, 0u8); + assert_eq!({let r: u8 = try!(0.0f32.approx()); r}, 0u8); + Ok(()) +} +# let _ = too_many_errors(); +# } +``` + +## Change Log + +### v0.3.2 + +- Added integer ↔ `char` conversions. +- Added missing `isize`/`usize` → `f32`/`f64` conversions. +- Fixed the error type of `i64` → `usize` for 64-bit targets. + +### v0.3.1 + +- Change to `unwrap_ok` for better codegen (thanks bluss). +- Fix for Rust breaking change (code in question was dodgy anyway; thanks m4rw3r). + +### v0.3.0 + +- Added an `Error` constraint to all `Err` associated types. This will break any user-defined conversions where the `Err` type does not implement `Error`. +- Renamed the `Overflow` and `Underflow` errors to `PosOverflow` and `NegOverflow` respectively. In the context of floating point conversions, "underflow" usually means the value was too close to zero to correctly represent. + +### v0.2.1 + +- Added `ConvUtil::into_as` as a shortcut for `Into::::into`. +- Added `#[inline]` attributes. +- Added `Saturate::saturate`, which can saturate `Result`s arising from over/underflow. + +### v0.2.0 + +- Changed all error types to include the original input as payload. This breaks pretty much *everything*. Sorry about that. On the bright side, there's now no downside to using the conversion traits for non-`Copy` types. +- Added the normal rounding modes for float → int approximations: `RoundToNearest`, `RoundToNegInf`, `RoundToPosInf`, and `RoundToZero`. +- `ApproxWith` is now subsumed by a pair of extension traits (`ConvUtil` and `ConvAsUtil`), that also have shortcuts for `TryInto` and `ValueInto` so that you can specify the destination type on the method. diff --git a/src/vendor/conv/src/errors.rs b/src/vendor/conv/src/errors.rs new file mode 100644 index 0000000000..9e005b044b --- /dev/null +++ b/src/vendor/conv/src/errors.rs @@ -0,0 +1,606 @@ +/*! +This module defines the various error types that can be produced by a failed conversion. + +In addition, it also defines some extension traits to make working with failable conversions more ergonomic (see the `Unwrap*` traits). +*/ + +use std::any::Any; +use std::error::Error; +use std::fmt::{self, Debug, Display}; +use misc::{Saturated, InvalidSentinel, SignedInfinity}; + +macro_rules! Desc { + ( + ($desc:expr) + pub struct $name:ident<$t:ident> $_body:tt; + ) => { + impl<$t> Display for $name<$t> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(fmt, $desc) + } + } + + impl<$t> Error for $name<$t> where $t: Any { + fn description(&self) -> &str { + $desc + } + } + }; +} + +macro_rules! DummyDebug { + ( + () pub enum $name:ident<$t:ident> { + $(#[doc=$_doc:tt] $vname:ident($_vpay:ident),)+ + } + ) => { + impl<$t> Debug for $name<$t> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let msg = match *self { + $($name::$vname(_) => stringify!($vname),)+ + }; + write!(fmt, concat!(stringify!($name), "::{}(..)"), msg) + } + } + }; + + ( + () pub struct $name:ident<$t:ident>(pub $_pay:ident); + ) => { + impl<$t> Debug for $name<$t> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(fmt, concat!(stringify!($name), "(..)")) + } + } + }; +} + +macro_rules! EnumDesc { + ( + ($($vname:ident => $vdesc:expr,)+) + pub enum $name:ident $_body:tt + ) => { + impl Display for $name { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(fmt, "{}", + match *self { $($name::$vname => $vdesc,)+ }) + } + } + + impl Error for $name { + fn description(&self) -> &str { + match *self { $($name::$vname => $vdesc,)+ } + } + } + }; + + ( + ($($vname:ident => $vdesc:expr,)+) + pub enum $name:ident<$t:ident> $_body:tt + ) => { + impl<$t> Display for $name<$t> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(fmt, "{}", + match *self { $($name::$vname(..) => $vdesc,)+ }) + } + } + + impl<$t> Error for $name<$t> where $t: Any { + fn description(&self) -> &str { + match *self { $($name::$vname(..) => $vdesc,)+ } + } + } + }; +} + +macro_rules! FromName { + ( + ($fname:ident) + pub enum $name:ident<$t:ident> $_body:tt + ) => { + impl<$t> From<$fname<$t>> for $name<$t> { + #[inline] + fn from(e: $fname<$t>) -> Self { + $name::$fname(e.into_inner()) + } + } + }; + + ( + ($fname:ident<$t:ident>) + pub enum $name:ident $_body:tt + ) => { + impl<$t> From<$fname<$t>> for $name { + #[inline] + fn from(_: $fname<$t>) -> Self { + $name::$fname + } + } + }; +} + +macro_rules! FromNoError { + ( + () pub enum $name:ident $_body:tt + ) => { + impl From for $name { + #[inline] + fn from(_: NoError) -> Self { + panic!(concat!("cannot convert NoError into ", stringify!($name))) + } + } + }; + + ( + () pub enum $name:ident<$t:ident> $_body:tt + ) => { + impl<$t> From for $name<$t> { + fn from(_: NoError) -> Self { + panic!(concat!("cannot convert NoError into ", stringify!($name))) + } + } + }; + + ( + () pub struct $name:ident<$t:ident> $_body:tt; + ) => { + impl<$t> From for $name<$t> { + fn from(_: NoError) -> Self { + panic!(concat!("cannot convert NoError into ", stringify!($name))) + } + } + }; +} + +macro_rules! FromRemap { + ( + ($from:ident($($vname:ident),+)) + pub enum $name:ident $_body:tt + ) => { + impl From<$from> for $name { + #[inline] + fn from(e: $from) -> Self { + match e { + $($from::$vname => $name::$vname,)+ + } + } + } + }; + + ( + ($from:ident<$t:ident>($($vname:ident),+)) + pub enum $name:ident $_body:tt + ) => { + impl<$t> From<$from<$t>> for $name { + #[inline] + fn from(e: $from<$t>) -> Self { + match e { + $($from::$vname(..) => $name::$vname,)+ + } + } + } + }; + + ( + ($from:ident($($vname:ident),+)) + pub enum $name:ident<$t:ident> $_body:tt + ) => { + impl<$t> From<$from<$t>> for $name<$t> { + #[inline] + fn from(e: $from<$t>) -> Self { + match e { + $($from::$vname(v) => $name::$vname(v),)+ + } + } + } + }; +} + +macro_rules! IntoInner { + ( + () pub enum $name:ident<$t:ident> { + $(#[doc=$_doc:tt] $vname:ident($_vpay:ident),)+ + } + ) => { + impl<$t> $name<$t> { + /// Returns the value stored in this error. + #[inline] + pub fn into_inner(self) -> $t { + match self { $($name::$vname(v))|+ => v } + } + } + }; + + ( + () pub struct $name:ident<$t:ident>(pub $_pay:ident); + ) => { + impl<$t> $name<$t> { + /// Returns the value stored in this error. + #[inline] + pub fn into_inner(self) -> $t { + self.0 + } + } + }; +} + +custom_derive!{ + /** + A general error enumeration that subsumes all other conversion errors. + + This exists primarily as a "catch-all" for reliably unifying various different kinds of conversion errors. + */ + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + IntoInner, DummyDebug, FromNoError, + EnumDesc( + NegOverflow => "conversion resulted in negative overflow", + PosOverflow => "conversion resulted in positive overflow", + Unrepresentable => "could not convert unrepresentable value", + ), + FromName(Unrepresentable), + FromName(NegOverflow), + FromName(PosOverflow), + FromRemap(RangeError(NegOverflow, PosOverflow)) + )] + pub enum GeneralError { + /// Input was too negative for the target type. + NegOverflow(T), + + /// Input was too positive for the target type. + PosOverflow(T), + + /// Input was not representable in the target type. + Unrepresentable(T), + } +} + +impl From> for GeneralError { + #[inline] + fn from(e: FloatError) -> GeneralError { + use self::FloatError as F; + use self::GeneralError as G; + match e { + F::NegOverflow(v) => G::NegOverflow(v), + F::PosOverflow(v) => G::PosOverflow(v), + F::NotANumber(v) => G::Unrepresentable(v), + } + } +} + +custom_derive! { + /** + A general error enumeration that subsumes all other conversion errors, but discards all input payloads the errors may be carrying. + + This exists primarily as a "catch-all" for reliably unifying various different kinds of conversion errors, and between different input types. + */ + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, + FromNoError, + EnumDesc( + NegOverflow => "conversion resulted in negative overflow", + PosOverflow => "conversion resulted in positive overflow", + Unrepresentable => "could not convert unrepresentable value", + ), + FromName(Unrepresentable), + FromName(NegOverflow), + FromName(PosOverflow), + FromRemap(RangeErrorKind(NegOverflow, PosOverflow)), + FromRemap(RangeError(NegOverflow, PosOverflow)), + FromRemap(GeneralError(NegOverflow, PosOverflow, Unrepresentable)) + )] + pub enum GeneralErrorKind { + /// Input was too negative for the target type. + NegOverflow, + + /// Input was too positive for the target type. + PosOverflow, + + /// Input was not representable in the target type. + Unrepresentable, + } +} + +impl From> for GeneralErrorKind { + #[inline] + fn from(e: FloatError) -> GeneralErrorKind { + use self::FloatError as F; + use self::GeneralErrorKind as G; + match e { + F::NegOverflow(..) => G::NegOverflow, + F::PosOverflow(..) => G::PosOverflow, + F::NotANumber(..) => G::Unrepresentable, + } + } +} + +/** +Indicates that it is not possible for the conversion to fail. + +You can use the [`UnwrapOk::unwrap_ok`](./trait.UnwrapOk.html#tymethod.unwrap_ok) method to discard the (statically impossible) `Err` case from a `Result<_, NoError>`, without using `Result::unwrap` (which is typically viewed as a "code smell"). +*/ +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] +pub enum NoError {} + +impl Display for NoError { + fn fmt(&self, _: &mut fmt::Formatter) -> Result<(), fmt::Error> { + unreachable!() + } +} + +impl Error for NoError { + fn description(&self) -> &str { + unreachable!() + } +} + +custom_derive! { + /// Indicates that the conversion failed because the value was not representable. + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + IntoInner, DummyDebug, FromNoError, + Desc("could not convert unrepresentable value") + )] + pub struct Unrepresentable(pub T); +} + +custom_derive! { + /// Indicates that the conversion failed due to a negative overflow. + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + IntoInner, DummyDebug, FromNoError, + Desc("conversion resulted in negative overflow") + )] + pub struct NegOverflow(pub T); +} + +custom_derive! { + /// Indicates that the conversion failed due to a positive overflow. + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + IntoInner, DummyDebug, FromNoError, + Desc("conversion resulted in positive overflow") + )] + pub struct PosOverflow(pub T); +} + +custom_derive! { + /** + Indicates that a conversion from a floating point type failed. + */ + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + IntoInner, DummyDebug, FromNoError, + EnumDesc( + NegOverflow => "conversion resulted in negative overflow", + PosOverflow => "conversion resulted in positive overflow", + NotANumber => "conversion target does not support not-a-number", + ), + FromName(NegOverflow), + FromName(PosOverflow), + FromRemap(RangeError(NegOverflow, PosOverflow)) + )] + pub enum FloatError { + /// Input was too negative for the target type. + NegOverflow(T), + + /// Input was too positive for the target type. + PosOverflow(T), + + /// Input was not-a-number, which the target type could not represent. + NotANumber(T), + } +} + +custom_derive! { + /** + Indicates that a conversion failed due to a range error. + */ + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, + IntoInner, DummyDebug, FromNoError, + EnumDesc( + NegOverflow => "conversion resulted in negative overflow", + PosOverflow => "conversion resulted in positive overflow", + ), + FromName(NegOverflow), + FromName(PosOverflow) + )] + pub enum RangeError { + /// Input was too negative for the target type. + NegOverflow(T), + + /// Input was too positive the target type. + PosOverflow(T), + } +} + +custom_derive! { + /** + Indicates that a conversion failed due to a range error. + + This is a variant of `RangeError` that does not retain the input value which caused the error. It exists to help unify some utility methods and should not generally be used directly, unless you are targeting the `Unwrap*` traits. + */ + #[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, + FromNoError, + EnumDesc( + NegOverflow => "conversion resulted in negative overflow", + PosOverflow => "conversion resulted in positive overflow", + ), + FromName(NegOverflow), + FromName(PosOverflow), + FromRemap(RangeError(NegOverflow, PosOverflow)) + )] + pub enum RangeErrorKind { + /// Input was too negative for the target type. + NegOverflow, + + /// Input was too positive for the target type. + PosOverflow, + } +} + +/** +Saturates a `Result`. +*/ +pub trait Saturate { + /// The result of saturating. + type Output; + + /** + Replaces an overflow error with a saturated value. + + Unlike `unwrap_or_saturate`, this method can be used in cases where the `Result` error type can encode failures *other* than overflow and underflow. For example, you cannot saturate a float-to-integer conversion using `unwrap_or_saturate` as the error might be `NotANumber`, which doesn't have a meaningful saturation "direction". + + The output of this method will be a `Result` where the error type *does not* contain overflow conditions. What conditions remain must still be dealt with in some fashion. + */ + fn saturate(self) -> Self::Output; +} + +impl Saturate for Result> +where T: Saturated { + type Output = Result>; + + #[inline] + fn saturate(self) -> Self::Output { + use self::FloatError::*; + match self { + Ok(v) => Ok(v), + Err(NegOverflow(_)) => Ok(T::saturated_min()), + Err(PosOverflow(_)) => Ok(T::saturated_max()), + Err(NotANumber(v)) => Err(Unrepresentable(v)) + } + } +} + +impl Saturate for Result> +where T: Saturated { + type Output = Result; + + #[inline] + fn saturate(self) -> Self::Output { + use self::RangeError::*; + match self { + Ok(v) => Ok(v), + Err(NegOverflow(_)) => Ok(T::saturated_min()), + Err(PosOverflow(_)) => Ok(T::saturated_max()) + } + } +} + +impl Saturate for Result +where T: Saturated { + type Output = Result; + + #[inline] + fn saturate(self) -> Self::Output { + use self::RangeErrorKind::*; + match self { + Ok(v) => Ok(v), + Err(NegOverflow) => Ok(T::saturated_min()), + Err(PosOverflow) => Ok(T::saturated_max()) + } + } +} + +/** +Safely unwrap a `Result` that cannot contain an error. +*/ +pub trait UnwrapOk { + /** + Unwraps a `Result` without possibility of failing. + + Technically, this is not necessary; it's provided simply to make user code a little clearer. + */ + fn unwrap_ok(self) -> T; +} + +impl UnwrapOk for Result { + #[inline] + fn unwrap_ok(self) -> T { + match self { + Ok(v) => v, + Err(no_error) => match no_error {}, + } + } +} + +/** +Unwrap a conversion by saturating to infinity. +*/ +pub trait UnwrapOrInf { + /// The result of unwrapping. + type Output; + + /** + Either unwraps the successfully converted value, or saturates to infinity in the "direction" of overflow. + */ + fn unwrap_or_inf(self) -> Self::Output; +} + +/** +Unwrap a conversion by replacing a failure with an invalid sentinel value. +*/ +pub trait UnwrapOrInvalid { + /// The result of unwrapping. + type Output; + + /** + Either unwraps the successfully converted value, or returns the output type's invalid sentinel value. + */ + fn unwrap_or_invalid(self) -> Self::Output; +} + +/** +Unwrap a conversion by saturating. +*/ +pub trait UnwrapOrSaturate { + /// The result of unwrapping. + type Output; + + /** + Either unwraps the successfully converted value, or saturates in the "direction" of overflow. + */ + fn unwrap_or_saturate(self) -> Self::Output; +} + +impl UnwrapOrInf for Result +where T: SignedInfinity, E: Into { + type Output = T; + #[inline] + fn unwrap_or_inf(self) -> T { + use self::RangeErrorKind::*; + match self.map_err(Into::into) { + Ok(v) => v, + Err(NegOverflow) => T::neg_infinity(), + Err(PosOverflow) => T::pos_infinity(), + } + } +} + +impl UnwrapOrInvalid for Result +where T: InvalidSentinel { + type Output = T; + #[inline] + fn unwrap_or_invalid(self) -> T { + match self { + Ok(v) => v, + Err(..) => T::invalid_sentinel(), + } + } +} + +impl UnwrapOrSaturate for Result +where T: Saturated, E: Into { + type Output = T; + #[inline] + fn unwrap_or_saturate(self) -> T { + use self::RangeErrorKind::*; + match self.map_err(Into::into) { + Ok(v) => v, + Err(NegOverflow) => T::saturated_min(), + Err(PosOverflow) => T::saturated_max(), + } + } +} diff --git a/src/vendor/conv/src/impls.rs b/src/vendor/conv/src/impls.rs new file mode 100644 index 0000000000..424ee8338f --- /dev/null +++ b/src/vendor/conv/src/impls.rs @@ -0,0 +1,591 @@ +macro_rules! max_of { + ($name:ident) => { ::std::$name::MAX }; +} + +macro_rules! min_of { + ($name:ident) => { ::std::$name::MIN }; +} + +macro_rules! approx_blind { + (($($attrs:tt)*), $src:ty, $dst:ty, $scheme:ty) => { + as_item! { + $($attrs)* + impl ::ApproxFrom<$src, $scheme> for $dst { + type Err = ::errors::NoError; + #[inline] + fn approx_from(src: $src) -> Result<$dst, Self::Err> { + Ok(src as $dst) + } + } + } + }; +} + +macro_rules! approx_z_to_dmax { + (($($attrs:tt)*), $src:ty, $dst:ident, $scheme:ty) => { + as_item! { + $($attrs)* + impl ::ApproxFrom<$src, $scheme> for $dst { + type Err = ::errors::RangeError<$src>; + #[inline] + fn approx_from(src: $src) -> Result<$dst, Self::Err> { + if !(0 <= src) { + return Err(::errors::RangeError::NegOverflow(src)); + } + if !(src <= max_of!($dst) as $src) { + return Err(::errors::RangeError::PosOverflow(src)); + } + Ok(src as $dst) + } + } + } + }; +} + +macro_rules! approx_to_dmax { + (($($attrs:tt)*), $src:ty, $dst:ident, $scheme:ty) => { + as_item! { + $($attrs)* + impl ::ApproxFrom<$src, $scheme> for $dst { + type Err = ::errors::PosOverflow<$src>; + #[inline] + fn approx_from(src: $src) -> Result<$dst, Self::Err> { + if !(src <= max_of!($dst) as $src) { + return Err(::errors::PosOverflow(src)); + } + Ok(src as $dst) + } + } + } + }; +} + +macro_rules! approx_dmin_to_dmax { + (($($attrs:tt)*), $src:ty, $dst:ident, $scheme:ty) => { + as_item! { + $($attrs)* + impl ::ApproxFrom<$src, $scheme> for $dst { + type Err = ::errors::RangeError<$src>; + #[inline] + fn approx_from(src: $src) -> Result<$dst, Self::Err> { + if !(min_of!($dst) as $src <= src) { + return Err(::errors::RangeError::NegOverflow(src)); + } + if !(src <= max_of!($dst) as $src) { + return Err(::errors::RangeError::PosOverflow(src)); + } + Ok(src as $dst) + } + } + } + } +} + +macro_rules! approx_z_up { + (($($attrs:tt)*), $src:ty, $dst:ident, $scheme:ty) => { + as_item! { + $($attrs)* + impl ::ApproxFrom<$src, $scheme> for $dst { + type Err = ::errors::NegOverflow<$src>; + #[inline] + fn approx_from(src: $src) -> Result<$dst, Self::Err> { + if !(0 <= src) { + return Err(::errors::NegOverflow(src)); + } + Ok(src as $dst) + } + } + } + }; +} + +macro_rules! approx_dmin_to_dmax_no_nan { + (($($attrs:tt)*), $src:ty, $dst:ident, $scheme:ty) => { + approx_dmin_to_dmax_no_nan! { ($($attrs)*), $src, $dst, $scheme, approx: |s| s } + }; + + (($($attrs:tt)*), $src:ty, $dst:ident, $scheme:ty, approx: |$src_name:ident| $conv:expr) => { + approx_range_no_nan! { + ($($attrs)*), $src, + $dst, [min_of!($dst) as $src, max_of!($dst) as $src], + $scheme, approx: |$src_name| $conv + } + }; +} + +macro_rules! approx_range_no_nan { + (($($attrs:tt)*), $src:ty, $dst:ident, [$min:expr, $max:expr], $scheme:ty) => { + approx_range_no_nan! { ($($attrs)*), $src, $dst, [$min, $max], $scheme, approx: |s| s } + }; + + (($($attrs:tt)*), $src:ty, $dst:ident, [$min:expr, $max:expr], $scheme:ty, approx: |$src_name:ident| $conv:expr) => { + as_item! { + $($attrs)* + impl ::ApproxFrom<$src, $scheme> for $dst { + type Err = ::errors::FloatError<$src>; + #[inline] + fn approx_from(src: $src) -> Result<$dst, Self::Err> { + if src.is_nan() { + return Err(::errors::FloatError::NotANumber(src)); + } + let approx = { let $src_name = src; $conv }; + if !($min <= approx) { + return Err(::errors::FloatError::NegOverflow(src)); + } + if !(approx <= $max) { + return Err(::errors::FloatError::PosOverflow(src)); + } + Ok(approx as $dst) + } + } + } + }; +} + +macro_rules! num_conv { + (@ $src:ty=> $(,)*) => {}; + + (@ $src:ty=> #[32] $($tail:tt)*) => { + num_conv! { @ $src=> (#[cfg(target_pointer_width="32")]) $($tail)* } + }; + + (@ $src:ty=> #[64] $($tail:tt)*) => { + num_conv! { @ $src=> (#[cfg(target_pointer_width="64")]) $($tail)* } + }; + + (@ $src:ty=> e $($tail:tt)*) => { num_conv! { @ $src=> () e $($tail)* } }; + (@ $src:ty=> n+ $($tail:tt)*) => { num_conv! { @ $src=> () n+ $($tail)* } }; + (@ $src:ty=> n $($tail:tt)*) => { num_conv! { @ $src=> () n $($tail)* } }; + (@ $src:ty=> w+ $($tail:tt)*) => { num_conv! { @ $src=> () w+ $($tail)* } }; + (@ $src:ty=> w $($tail:tt)*) => { num_conv! { @ $src=> () w $($tail)* } }; + (@ $src:ty=> aW $($tail:tt)*) => { num_conv! { @ $src=> () aW $($tail)* } }; + (@ $src:ty=> nf $($tail:tt)*) => { num_conv! { @ $src=> () nf $($tail)* } }; + (@ $src:ty=> fan $($tail:tt)*) => { num_conv! { @ $src=> () fan $($tail)* } }; + + // Exact conversion + (@ $src:ty=> ($($attrs:tt)*) e $dst:ty, $($tail:tt)*) => { + as_item! { + approx_blind! { ($($attrs)*), $src, $dst, ::DefaultApprox } + approx_blind! { ($($attrs)*), $src, $dst, ::Wrapping } + + $($attrs)* + impl ::ValueFrom<$src> for $dst { + type Err = ::errors::NoError; + #[inline] + fn value_from(src: $src) -> Result<$dst, Self::Err> { + Ok(src as $dst) + } + } + } + num_conv! { @ $src=> $($tail)* } + }; + + // Narrowing a signed type *into* an unsigned type where the destination type's maximum value is representable by the source type. + (@ $src:ty=> ($($attrs:tt)*) n+ $dst:ident, $($tail:tt)*) => { + as_item! { + approx_z_to_dmax! { ($($attrs)*), $src, $dst, ::DefaultApprox } + approx_blind! { ($($attrs)*), $src, $dst, ::Wrapping } + + $($attrs)* + impl ::ValueFrom<$src> for $dst { + type Err = ::errors::RangeError<$src>; + #[inline] + fn value_from(src: $src) -> Result<$dst, Self::Err> { + if !(0 <= src) { + return Err(::errors::RangeError::NegOverflow(src)); + } + if !(src <= max_of!($dst) as $src) { + return Err(::errors::RangeError::PosOverflow(src)); + } + Ok(src as $dst) + } + } + } + num_conv! { @ $src=> $($tail)* } + }; + + // Narrowing an unsigned type *into* a type where the destination type's maximum value is representable by the source type. + (@ $src:ty=> ($($attrs:tt)*) n- $dst:ident, $($tail:tt)*) => { + as_item! { + approx_to_dmax! { ($($attrs)*), $src, $dst, ::DefaultApprox } + approx_blind! { ($($attrs)*), $src, $dst, ::Wrapping } + + $($attrs)* + impl ::ValueFrom<$src> for $dst { + type Err = ::errors::PosOverflow<$src>; + #[inline] + fn value_from(src: $src) -> Result<$dst, Self::Err> { + if !(src <= max_of!($dst) as $src) { + return Err(::errors::PosOverflow(src)); + } + Ok(src as $dst) + } + } + } + num_conv! { @ $src=> $($tail)* } + }; + + // Narrowing where the destination type's bounds are representable by the source type. + (@ $src:ty=> ($($attrs:tt)*) n $dst:ident, $($tail:tt)*) => { + as_item! { + approx_dmin_to_dmax! { ($($attrs)*), $src, $dst, ::DefaultApprox } + approx_blind! { ($($attrs)*), $src, $dst, ::Wrapping } + + $($attrs)* + impl ::ValueFrom<$src> for $dst { + type Err = ::errors::RangeError<$src>; + #[inline] + fn value_from(src: $src) -> Result<$dst, Self::Err> { + if !(min_of!($dst) as $src <= src) { + return Err(::errors::RangeError::NegOverflow(src)); + } + if !(src <= max_of!($dst) as $src) { + return Err(::errors::RangeError::PosOverflow(src)); + } + Ok(src as $dst) + } + } + } + num_conv! { @ $src=> $($tail)* } + }; + + // Widening a signed type *into* an unsigned type. + (@ $src:ty=> ($($attrs:tt)*) w+ $dst:ident, $($tail:tt)*) => { + as_item! { + approx_z_up! { ($($attrs)*), $src, $dst, ::DefaultApprox } + approx_blind! { ($($attrs)*), $src, $dst, ::Wrapping } + + $($attrs)* + impl ::ValueFrom<$src> for $dst { + type Err = ::errors::NegOverflow<$src>; + #[inline] + fn value_from(src: $src) -> Result<$dst, Self::Err> { + if !(0 <= src) { + return Err(::errors::NegOverflow(src)); + } + Ok(src as $dst) + } + } + } + num_conv! { @ $src=> $($tail)* } + }; + + // Widening. + (@ $src:ty=> ($($attrs:tt)*) w $dst:ident, $($tail:tt)*) => { + as_item! { + approx_blind! { ($($attrs)*), $src, $dst, ::DefaultApprox } + approx_blind! { ($($attrs)*), $src, $dst, ::Wrapping } + + $($attrs)* + impl ::ValueFrom<$src> for $dst { + type Err = ::errors::NoError; + #[inline] + fn value_from(src: $src) -> Result<$dst, Self::Err> { + Ok(src as $dst) + } + } + } + num_conv! { @ $src=> $($tail)* } + }; + + // Narrowing *into* a floating-point type where the conversion is only exact within a given range. + (@ $src:ty=> ($($attrs:tt)*) nf [+- $bound:expr] $dst:ident, $($tail:tt)*) => { + as_item! { + approx_blind! { ($($attrs)*), $src, $dst, ::DefaultApprox } + + $($attrs)* + impl ::ValueFrom<$src> for $dst { + type Err = ::errors::RangeError<$src>; + #[inline] + fn value_from(src: $src) -> Result<$dst, Self::Err> { + if !(-$bound <= src) { + return Err(::errors::RangeError::NegOverflow(src)); + } + if !(src <= $bound) { + return Err(::errors::RangeError::PosOverflow(src)); + } + Ok(src as $dst) + } + } + } + num_conv! { @ $src=> $($tail)* } + }; + + (@ $src:ty=> ($($attrs:tt)*) nf [, $max:expr] $dst:ident, $($tail:tt)*) => { + as_item! { + approx_blind! { ($($attrs)*), $src, $dst, ::DefaultApprox } + + $($attrs)* + impl ::ValueFrom<$src> for $dst { + type Err = ::errors::PosOverflow<$src>; + #[inline] + fn value_from(src: $src) -> Result<$dst, Self::Err> { + if !(src <= $max) { + return Err(::errors::PosOverflow(src)); + } + Ok(src as $dst) + } + } + } + num_conv! { @ $src=> $($tail)* } + }; + + // Approximately narrowing a floating point value *into* a type where the source value is constrained by the given range of values. + (@ $src:ty=> ($($attrs:tt)*) fan [$min:expr, $max:expr] $dst:ident, $($tail:tt)*) => { + as_item! { + approx_range_no_nan! { ($($attrs)*), $src, $dst, [$min, $max], + ::DefaultApprox } + approx_range_no_nan! { ($($attrs)*), $src, $dst, [$min, $max], + ::RoundToNearest, approx: |s| s.round() } + approx_range_no_nan! { ($($attrs)*), $src, $dst, [$min, $max], + ::RoundToNegInf, approx: |s| s.floor() } + approx_range_no_nan! { ($($attrs)*), $src, $dst, [$min, $max], + ::RoundToPosInf, approx: |s| s.ceil() } + approx_range_no_nan! { ($($attrs)*), $src, $dst, [$min, $max], + ::RoundToZero, approx: |s| s.trunc() } + } + num_conv! { @ $src=> $($tail)* } + }; + + (@ $src:ty=> ($($attrs:tt)*) fan $dst:ident, $($tail:tt)*) => { + as_item! { + approx_dmin_to_dmax_no_nan! { ($($attrs)*), $src, $dst, ::DefaultApprox } + approx_dmin_to_dmax_no_nan! { ($($attrs)*), $src, $dst, ::RoundToNearest, + approx: |s| s.round() } + approx_dmin_to_dmax_no_nan! { ($($attrs)*), $src, $dst, ::RoundToNegInf, + approx: |s| s.floor() } + approx_dmin_to_dmax_no_nan! { ($($attrs)*), $src, $dst, ::RoundToPosInf, + approx: |s| s.ceil() } + approx_dmin_to_dmax_no_nan! { ($($attrs)*), $src, $dst, ::RoundToZero, + approx: |s| s.trunc() } + } + num_conv! { @ $src=> $($tail)* } + }; + + ($src:ty=> $($tail:tt)*) => { + num_conv! { @ $src=> $($tail)*, } + }; +} + +mod lang_ints { + num_conv! { i8=> w i16, w i32, w i64, w+u8, w+u16, w+u32, w+u64, w isize, w+usize } + num_conv! { i16=> n i8, w i32, w i64, n+u8, w+u16, w+u32, w+u64, w isize, w+usize } + num_conv! { i32=> n i8, n i16, w i64, n+u8, n+u16, w+u32, w+u64 } + num_conv! { i64=> n i8, n i16, n i32, n+u8, n+u16, n+u32, w+u64 } + num_conv! { i32=> #[32] e isize, #[64] w isize, w+usize } + num_conv! { i64=> #[32] n isize, #[64] e isize, #[32] n+usize, #[64] w+usize } + + num_conv! { u8=> n-i8, w i16, w i32, w i64, w u16, w u32, w u64, w isize, w usize } + num_conv! { u16=> n-i8, n-i16, w i32, w i64, n-u8, w u32, w u64, w isize, w usize } + num_conv! { u32=> n-i8, n-i16, n-i32, w i64, n-u8, n-u16, w u64 } + num_conv! { u64=> n-i8, n-i16, n-i32, n-i64, n-u8, n-u16, n-u32 } + num_conv! { u32=> #[32] n-isize, #[64] w isize, #[32] e usize, #[64] w usize } + num_conv! { u64=> n-isize, #[32] n-usize, #[64] e usize } + + num_conv! { isize=> n i8, n i16, #[32] e i32, #[32] w i64, #[64] n i32, #[64] e i64 } + num_conv! { isize=> n+u8, n+u16, #[32] w+u32, #[32] w+u64, #[64] n+u32, #[64] w+u64 } + num_conv! { isize=> w+usize } + + num_conv! { usize=> n-i8, n-i16, #[32] n-i32, #[32] w i64, #[64] n-i32, #[64] n-i64 } + num_conv! { usize=> n-u8, n-u16, #[32] e u32, #[32] w u64, #[64] n-u32, #[64] e u64 } + num_conv! { usize=> n-isize } +} + +mod lang_floats { + use {ApproxFrom, ApproxScheme}; + use ValueFrom; + use errors::{NoError, RangeError}; + + // f32 -> f64: strictly widening + impl ApproxFrom for f64 + where Scheme: ApproxScheme { + type Err = NoError; + #[inline] + fn approx_from(src: f32) -> Result { + Ok(src as f64) + } + } + + impl ValueFrom for f64 { + type Err = NoError; + #[inline] + fn value_from(src: f32) -> Result { + Ok(src as f64) + } + } + + // f64 -> f32: narrowing, approximate + impl ApproxFrom for f32 { + type Err = RangeError; + #[inline] + fn approx_from(src: f64) -> Result { + if !src.is_finite() { + return Ok(src as f32); + } + if !(::std::f32::MIN as f64 <= src) { + return Err(RangeError::NegOverflow(src)); + } + if !(src <= ::std::f32::MAX as f64) { + return Err(RangeError::PosOverflow(src)); + } + Ok(src as f32) + } + } +} + +mod lang_int_to_float { + num_conv! { i8=> w f32, w f64 } + num_conv! { i16=> w f32, w f64 } + num_conv! { i32=> nf [+- 16_777_216] f32, w f64 } + num_conv! { i64=> nf [+- 16_777_216] f32, nf [+- 9_007_199_254_740_992] f64 } + + num_conv! { u8=> w f32, w f64 } + num_conv! { u16=> w f32, w f64 } + num_conv! { u32=> nf [, 16_777_216] f32, w f64 } + num_conv! { u64=> nf [, 16_777_216] f32, nf [, 9_007_199_254_740_992] f64 } + + num_conv! { isize=> nf [+- 16_777_216] f32, + #[32] w f64, #[64] nf [+- 9_007_199_254_740_992] f64 } + num_conv! { usize=> nf [, 16_777_216] f32, + #[32] w f64, #[64] nf [, 9_007_199_254_740_992] f64 } +} + +mod lang_float_to_int { + /* + We use explicit ranges on narrowing float-to-int conversions because it *turns out* that just because you can cast an integer to a float, this *does not* mean you can cast it back and get the original input. The non-explicit-range implementation of `fan` *depends* on this, so it was kinda *totally broken* for narrowing conversions. + + *Yeah.* That's floating point for you! + */ + num_conv! { f32=> fan i8, fan i16, + fan [-2.1474836e9, 2.1474835e9] i32, + fan [-9.223372e18, 9.2233715e18] i64 } + num_conv! { f32=> fan u8, fan u16, + fan [0.0, 4.294967e9] u32, + fan [0.0, 1.8446743e19] u64 } + num_conv! { f32=> + #[32] fan [-2.1474836e9, 2.1474835e9] isize, + #[32] fan [0.0, 4.294967e9] usize, + #[64] fan [-9.223372e18, 9.2233715e18] isize, + #[64] fan [0.0, 1.8446743e19] usize } + + num_conv! { f64=> fan i8, fan i16, fan i32, + fan [-9.223372036854776e18, 9.223372036854775e18] i64 } + num_conv! { f64=> fan u8, fan u16, fan u32, + fan [0.0, 1.844674407370955e19] u64 } + num_conv! { f64=> + #[32] fan isize, #[32] fan usize, + #[64] fan [-9.223372036854776e18, 9.223372036854775e18] isize, + #[64] fan [0.0, 1.844674407370955e19] usize } +} + +mod lang_char_to_int { + use TryFrom; + use ValueFrom; + use errors::{NoError, PosOverflow}; + + impl TryFrom for u32 { + type Err = NoError; + #[inline] + fn try_from(src: char) -> Result { + Ok(src as u32) + } + } + + impl TryFrom for usize { + type Err = NoError; + #[inline] + fn try_from(src: char) -> Result { + Ok(src as usize) + } + } + + impl TryFrom for isize { + type Err = NoError; + #[inline] + fn try_from(src: char) -> Result { + Ok(src as isize) + } + } + + macro_rules! conv_char_to_int { + ($($ts:ty),* $(,)*) => { + $( + impl TryFrom for $ts { + type Err = PosOverflow; + #[inline] + fn try_from(src: char) -> Result<$ts, Self::Err> { + <$ts as ValueFrom<_>>::value_from(src as u32) + .map_err(|_| PosOverflow(src)) + } + } + )* + }; + } + + macro_rules! conv_char_to_int_wide { + ($($ts:ty),* $(,)*) => { + $( + impl TryFrom for $ts { + type Err = NoError; + #[inline] + fn try_from(src: char) -> Result<$ts, Self::Err> { + <$ts as ValueFrom<_>>::value_from(src as u32) + } + } + )* + }; + } + + conv_char_to_int! { i8, i16, i32, u8, u16 } + conv_char_to_int_wide! { i64, u64 } +} + +mod lang_int_to_char { + use TryFrom; + use ValueFrom; + use errors::{NoError, Unrepresentable, UnwrapOk}; + + impl TryFrom for char { + type Err = NoError; + #[inline] + fn try_from(src: u8) -> Result { + Ok(src as char) + } + } + impl TryFrom for char { + type Err = Unrepresentable; + #[inline] + fn try_from(src: u16) -> Result { + TryFrom::try_from( + >::value_from(src).unwrap_ok() + ).map_err(|_| Unrepresentable(src)) + } + } + + impl TryFrom for char { + type Err = Unrepresentable; + #[inline] + fn try_from(src: u32) -> Result { + ::std::char::from_u32(src).ok_or_else(|| Unrepresentable(src)) + } + } + + macro_rules! conv_int_to_char { + ($($ts:ty),* $(,)*) => { + $( + impl TryFrom<$ts> for char { + type Err = Unrepresentable<$ts>; + #[inline] + fn try_from(src: $ts) -> Result { + >::value_from(src) + .map_err(|_| Unrepresentable(src)) + .and_then(|usv| TryFrom::try_from(usv) + .map_err(|_| Unrepresentable(src))) + } + } + )* + }; + } + + conv_int_to_char! { i8, i16, i32, i64, isize, u64, usize } +} diff --git a/src/vendor/conv/src/lib.rs b/src/vendor/conv/src/lib.rs new file mode 100644 index 0000000000..195b8d10ff --- /dev/null +++ b/src/vendor/conv/src/lib.rs @@ -0,0 +1,525 @@ +/*! +This crate provides a number of conversion traits with more specific semantics than those provided by `as` or `From`/`Into`. + +The goal with the traits provided here is to be more specific about what generic code can rely on, as well as provide reasonably self-describing alternatives to the standard `From`/`Into` traits. For example, the although `T: From` might be satisfied, it imposes no restrictions on the *kind* of conversion being implemented. As such, the traits in this crate try to be very specific about what conversions are allowed. This makes them less generally applicable, but more useful where they *do* apply. + +In addition, `From`/`Into` requires all conversions to succeed or panic. All conversion traits in this crate define an associated error type, allowing code to react to failed conversions as appropriate. + + +